tracing: Add initial opentracing support

Add initial support for opentracing by using the `jaeger` package.
Since opentracing uses the `context` package, add a `context.Context`
as the first parameter to all the functions that we might want to
trace. Trace "spans" (trace points) are then added by extracting the
trace details from the specified context parameter.

Notes:

- Although the tracer is created in `main()`, the "root span"
  (aka the first trace point) is not added until `beforeSubcommands()`.

  This is by design and is a compromise: by delaying the creation of the
  root span, the spans become much more readable since using the web-based
  JaegerUI, you will see traces like this:

  ```
  kata-runtime: kata-runtime create
  ------------  -------------------
       ^                ^
       |                |
  Trace name        First span name
                    (which clearly shows the CLI command that was run)
  ```

  Creating the span earlier means it is necessary to expand 'n' spans in
  the UI before you get to see the name of the CLI command that was run.
  In adding support, this became very tedious, hence my design decision to
  defer the creation of the root span until after signal handling has been
  setup and after CLI options have been parsed, but still very early in
  the code path.

  - At this stage, the tracing stops at the `virtcontainers` call
  boundary.

- Tracing is "always on" as there doesn't appear to be a way to toggle
  it. However, its resolves to a "nop" unless the tracer can talk to a
  jaeger agent.

Note that this commit required a bit of rework to `beforeSubcommands()`
to reduce the cyclomatic complexity.

Fixes #557.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
This commit is contained in:
James O. D. Hunt 2018-08-09 15:07:32 +01:00
parent 0ede467256
commit 3a1bbd0271
138 changed files with 20465 additions and 154 deletions

63
Gopkg.lock generated
View File

@ -19,6 +19,14 @@
pruneopts = "NUT"
revision = "1d2a6a3ea132a86abd0731408b7dc34f2fc17d55"
[[projects]]
branch = "master"
digest = "1:8ecb89af7dfe3ac401bdb0c9390b134ef96a97e85f732d2b0604fb7b3977839f"
name = "github.com/codahale/hdrhistogram"
packages = ["."]
pruneopts = "NUT"
revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
[[projects]]
digest = "1:3d1a50e9f27c661df8c5552e7f2f6b9d2a8b641c65aeac7373f8a5c60d9f6856"
name = "github.com/containerd/cri-containerd"
@ -170,6 +178,26 @@
pruneopts = "NUT"
revision = "4e3b9264a330d094b0386c3703c5f379119711e8"
[[projects]]
digest = "1:7da29c22bcc5c2ffb308324377dc00b5084650348c2799e573ed226d8cc9faf0"
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"log",
]
pruneopts = "NUT"
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
[[projects]]
digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "NUT"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
@ -210,6 +238,39 @@
pruneopts = "NUT"
revision = "890a5c3458b43e6104ff5da8dfa139d013d77544"
[[projects]]
digest = "1:3d48c38e0eca8c66df62379c5ae7a83fb5cd839b94f241354c07ba077da7bc45"
name = "github.com/uber/jaeger-client-go"
packages = [
".",
"config",
"internal/baggage",
"internal/baggage/remote",
"internal/spanlog",
"internal/throttler",
"internal/throttler/remote",
"log",
"rpcmetrics",
"thrift",
"thrift-gen/agent",
"thrift-gen/baggage",
"thrift-gen/jaeger",
"thrift-gen/sampling",
"thrift-gen/zipkincore",
"utils",
]
pruneopts = "NUT"
revision = "b043381d944715b469fd6b37addfd30145ca1758"
version = "v2.14.0"
[[projects]]
digest = "1:0f09db8429e19d57c8346ad76fbbc679341fa86073d3b8fb5ac919f0357d8f4c"
name = "github.com/uber/jaeger-lib"
packages = ["metrics"]
pruneopts = "NUT"
revision = "ed3a127ec5fef7ae9ea95b01b542c47fbd999ce5"
version = "v1.5.0"
[[projects]]
digest = "1:137331c8068398ffaf600c283e721389a6981d3b27ee84a8510e8c77505a464e"
name = "github.com/urfave/cli"
@ -354,10 +415,12 @@
"github.com/opencontainers/runc/libcontainer/specconv",
"github.com/opencontainers/runc/libcontainer/utils",
"github.com/opencontainers/runtime-spec/specs-go",
"github.com/opentracing/opentracing-go",
"github.com/safchain/ethtool",
"github.com/sirupsen/logrus",
"github.com/sirupsen/logrus/hooks/syslog",
"github.com/stretchr/testify/assert",
"github.com/uber/jaeger-client-go/config",
"github.com/urfave/cli",
"github.com/vishvananda/netlink",
"github.com/vishvananda/netns",

View File

@ -102,6 +102,7 @@ type proxy struct {
type runtime struct {
Debug bool `toml:"enable_debug"`
Tracing bool `toml:"enable_tracing"`
InterNetworkModel string `toml:"internetworking_model"`
}
@ -538,6 +539,10 @@ func loadConfiguration(configPath string, ignoreLogging bool) (resolvedConfigPat
kataLog.Logger.Level = originalLoggerLevel
}
if tomlConf.Runtime.Tracing {
tracing = true
}
if tomlConf.Runtime.InterNetworkModel != "" {
err = config.InterNetworkModel.SetModel(tomlConf.Runtime.InterNetworkModel)
if err != nil {

View File

@ -189,3 +189,8 @@ path = "@SHIMPATH@"
# Used when the Container network interface can be bridged using
# macvtap.
internetworking_model="@DEFNETWORKMODEL@"
# If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled)
#enable_tracing = true

View File

@ -7,6 +7,7 @@
package main
import (
"context"
"errors"
"fmt"
"io/ioutil"
@ -17,6 +18,7 @@ import (
vc "github.com/kata-containers/runtime/virtcontainers"
vf "github.com/kata-containers/runtime/virtcontainers/factory"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
)
@ -62,6 +64,11 @@ var createCLICommand = cli.Command{
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok {
return errors.New("invalid runtime config")
@ -72,7 +79,7 @@ var createCLICommand = cli.Command{
return err
}
return create(context.Args().First(),
return create(ctx, context.Args().First(),
context.String("bundle"),
console,
context.String("pid-file"),
@ -85,12 +92,16 @@ var createCLICommand = cli.Command{
// Use a variable to allow tests to modify its value
var getKernelParamsFunc = getKernelParams
func create(containerID, bundlePath, console, pidFilePath string, detach bool,
func create(ctx context.Context, containerID, bundlePath, console, pidFilePath string, detach bool,
runtimeConfig oci.RuntimeConfig) error {
var err error
span, ctx := opentracing.StartSpanFromContext(ctx, "create")
defer span.Finish()
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
// Checks the MUST and MUST NOT from OCI runtime specification
if bundlePath, err = validCreateParams(containerID, bundlePath); err != nil {
@ -136,12 +147,12 @@ func create(containerID, bundlePath, console, pidFilePath string, detach bool,
var process vc.Process
switch containerType {
case vc.PodSandbox:
process, err = createSandbox(ociSpec, runtimeConfig, containerID, bundlePath, console, disableOutput)
process, err = createSandbox(ctx, ociSpec, runtimeConfig, containerID, bundlePath, console, disableOutput)
if err != nil {
return err
}
case vc.PodContainer:
process, err = createContainer(ociSpec, containerID, bundlePath, console, disableOutput)
process, err = createContainer(ctx, ociSpec, containerID, bundlePath, console, disableOutput)
if err != nil {
return err
}
@ -152,7 +163,7 @@ func create(containerID, bundlePath, console, pidFilePath string, detach bool,
// is shim's in our case. This is mandatory to make sure there is no one
// else (like Docker) trying to create those files on our behalf. We want to
// know those files location so that we can remove them when delete is called.
cgroupsPathList, err := processCgroupsPath(ociSpec, containerType.IsSandbox())
cgroupsPathList, err := processCgroupsPath(ctx, ociSpec, containerType.IsSandbox())
if err != nil {
return err
}
@ -163,14 +174,14 @@ func create(containerID, bundlePath, console, pidFilePath string, detach bool,
cgroupsDirPath = ociSpec.Linux.CgroupsPath
}
if err := createCgroupsFiles(containerID, cgroupsDirPath, cgroupsPathList, process.Pid); err != nil {
if err := createCgroupsFiles(ctx, containerID, cgroupsDirPath, cgroupsPathList, process.Pid); err != nil {
return err
}
// Creation of PID file has to be the last thing done in the create
// because containerd considers the create complete after this file
// is created.
return createPIDFile(pidFilePath, process.Pid)
return createPIDFile(ctx, pidFilePath, process.Pid)
}
var systemdKernelParam = []vc.Param{
@ -241,8 +252,10 @@ func setKernelParams(containerID string, runtimeConfig *oci.RuntimeConfig) error
return nil
}
func createSandbox(ociSpec oci.CompatOCISpec, runtimeConfig oci.RuntimeConfig,
func createSandbox(ctx context.Context, ociSpec oci.CompatOCISpec, runtimeConfig oci.RuntimeConfig,
containerID, bundlePath, console string, disableOutput bool) (vc.Process, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "createSandbox")
defer span.Finish()
err := setKernelParams(containerID, &runtimeConfig)
if err != nil {
@ -259,15 +272,17 @@ func createSandbox(ociSpec oci.CompatOCISpec, runtimeConfig oci.RuntimeConfig,
return vc.Process{}, err
}
kataLog = kataLog.WithField("sandbox", sandbox.ID())
sid := sandbox.ID()
kataLog = kataLog.WithField("sandbox", sid)
setExternalLoggers(kataLog)
span.SetTag("sandbox", sid)
containers := sandbox.GetAllContainers()
if len(containers) != 1 {
return vc.Process{}, fmt.Errorf("BUG: Container list from sandbox is wrong, expecting only one container, found %d containers", len(containers))
}
if err := addContainerIDMapping(containerID, sandbox.ID()); err != nil {
if err := addContainerIDMapping(ctx, containerID, sandbox.ID()); err != nil {
return vc.Process{}, err
}
@ -288,9 +303,12 @@ func setEphemeralStorageType(ociSpec oci.CompatOCISpec) oci.CompatOCISpec {
return ociSpec
}
func createContainer(ociSpec oci.CompatOCISpec, containerID, bundlePath,
func createContainer(ctx context.Context, ociSpec oci.CompatOCISpec, containerID, bundlePath,
console string, disableOutput bool) (vc.Process, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "createContainer")
defer span.Finish()
ociSpec = setEphemeralStorageType(ociSpec)
contConfig, err := oci.ContainerConfig(ociSpec, bundlePath, containerID, console, disableOutput)
@ -305,20 +323,24 @@ func createContainer(ociSpec oci.CompatOCISpec, containerID, bundlePath,
kataLog = kataLog.WithField("sandbox", sandboxID)
setExternalLoggers(kataLog)
span.SetTag("sandbox", sandboxID)
_, c, err := vci.CreateContainer(sandboxID, contConfig)
if err != nil {
return vc.Process{}, err
}
if err := addContainerIDMapping(containerID, sandboxID); err != nil {
if err := addContainerIDMapping(ctx, containerID, sandboxID); err != nil {
return vc.Process{}, err
}
return c.Process(), nil
}
func createCgroupsFiles(containerID string, cgroupsDirPath string, cgroupsPathList []string, pid int) error {
func createCgroupsFiles(ctx context.Context, containerID string, cgroupsDirPath string, cgroupsPathList []string, pid int) error {
span, _ := opentracing.StartSpanFromContext(ctx, "createCgroupsFiles")
defer span.Finish()
if len(cgroupsPathList) == 0 {
kataLog.WithField("pid", pid).Info("Cgroups files not created because cgroupsPath was empty")
return nil
@ -361,7 +383,10 @@ func createCgroupsFiles(containerID string, cgroupsDirPath string, cgroupsPathLi
return nil
}
func createPIDFile(pidFilePath string, pid int) error {
func createPIDFile(ctx context.Context, pidFilePath string, pid int) error {
span, _ := opentracing.StartSpanFromContext(ctx, "createPIDFile")
defer span.Finish()
if pidFilePath == "" {
// runtime should not fail since pid file is optional
return nil

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"errors"
"flag"
"fmt"
@ -45,7 +46,7 @@ func mockCPUSetContent(contents map[string]string) error {
}
func testCreateCgroupsFilesSuccessful(t *testing.T, cgroupsDirPath string, cgroupsPathList []string, pid int) {
if err := createCgroupsFiles("foo", cgroupsDirPath, cgroupsPathList, pid); err != nil {
if err := createCgroupsFiles(context.Background(), "foo", cgroupsDirPath, cgroupsPathList, pid); err != nil {
t.Fatalf("This test should succeed (cgroupsPath %q, pid %d): %s", cgroupsPathList, pid, err)
}
}
@ -95,7 +96,7 @@ func TestCreateCgroupsFilesFailToWriteFile(t *testing.T) {
files := []string{file}
err = createCgroupsFiles("foo", "cgroups-file", files, testPID)
err = createCgroupsFiles(context.Background(), "foo", "cgroups-file", files, testPID)
assert.Error(err)
}
@ -135,7 +136,7 @@ func TestCreatePIDFileSuccessful(t *testing.T) {
}
pidFilePath := filepath.Join(pidDirPath, "pid-file-path")
if err := createPIDFile(pidFilePath, testPID); err != nil {
if err := createPIDFile(context.Background(), pidFilePath, testPID); err != nil {
t.Fatal(err)
}
@ -155,7 +156,7 @@ func TestCreatePIDFileSuccessful(t *testing.T) {
func TestCreatePIDFileEmptyPathSuccessful(t *testing.T) {
file := ""
if err := createPIDFile(file, testPID); err != nil {
if err := createPIDFile(context.Background(), file, testPID); err != nil {
t.Fatalf("This test should not fail (pidFilePath %q, pid %d)", file, testPID)
}
}
@ -179,7 +180,7 @@ func TestCreatePIDFileUnableToRemove(t *testing.T) {
err = os.MkdirAll(subdir, os.FileMode(0000))
assert.NoError(err)
err = createPIDFile(file, testPID)
err = createPIDFile(context.Background(), file, testPID)
assert.Error(err)
// let it be deleted
@ -196,7 +197,7 @@ func TestCreatePIDFileUnableToCreate(t *testing.T) {
subdir := filepath.Join(tmpdir, "dir")
file := filepath.Join(subdir, "pidfile")
err = createPIDFile(file, testPID)
err = createPIDFile(context.Background(), file, testPID)
// subdir doesn't exist
assert.Error(err)
@ -337,7 +338,7 @@ func TestCreateInvalidArgs(t *testing.T) {
}
for i, d := range data {
err := create(d.containerID, d.bundlePath, d.console, d.pidFilePath, d.detach, d.runtimeConfig)
err := create(context.Background(), d.containerID, d.bundlePath, d.console, d.pidFilePath, d.detach, d.runtimeConfig)
assert.Errorf(err, "test %d (%+v)", i, d)
}
}
@ -376,7 +377,7 @@ func TestCreateInvalidConfigJSON(t *testing.T) {
f.Close()
for detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
assert.Errorf(err, "%+v", detach)
assert.False(vcmock.IsMockError(err))
os.RemoveAll(path)
@ -420,7 +421,7 @@ func TestCreateInvalidContainerType(t *testing.T) {
assert.NoError(err)
for detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
assert.Errorf(err, "%+v", detach)
assert.False(vcmock.IsMockError(err))
os.RemoveAll(path)
@ -465,7 +466,7 @@ func TestCreateContainerInvalid(t *testing.T) {
assert.NoError(err)
for detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
assert.Errorf(err, "%+v", detach)
assert.False(vcmock.IsMockError(err))
os.RemoveAll(path)
@ -556,7 +557,7 @@ func TestCreateProcessCgroupsPathSuccessful(t *testing.T) {
assert.NoError(err)
for _, detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, detach, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, detach, runtimeConfig)
assert.NoError(err, "detached: %+v", detach)
os.RemoveAll(path)
}
@ -640,7 +641,7 @@ func TestCreateCreateCgroupsFilesFail(t *testing.T) {
assert.NoError(err)
for detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
assert.Errorf(err, "%+v", detach)
assert.False(vcmock.IsMockError(err))
os.RemoveAll(path)
@ -716,7 +717,7 @@ func TestCreateCreateCreatePidFileFail(t *testing.T) {
assert.NoError(err)
for detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
assert.Errorf(err, "%+v", detach)
assert.False(vcmock.IsMockError(err))
os.RemoveAll(path)
@ -782,7 +783,7 @@ func TestCreate(t *testing.T) {
assert.NoError(err)
for detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
assert.NoError(err, "%+v", detach)
os.RemoveAll(path)
}
@ -839,7 +840,7 @@ func TestCreateInvalidKernelParams(t *testing.T) {
}
for detach := range []bool{true, false} {
err := create(testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
err := create(context.Background(), testContainerID, bundlePath, testConsole, pidFilePath, true, runtimeConfig)
assert.Errorf(err, "%+v", detach)
assert.False(vcmock.IsMockError(err))
os.RemoveAll(path)
@ -884,7 +885,7 @@ func TestCreateSandboxConfigFail(t *testing.T) {
Quota: &quota,
}
_, err = createSandbox(spec, runtimeConfig, testContainerID, bundlePath, testConsole, true)
_, err = createSandbox(context.Background(), spec, runtimeConfig, testContainerID, bundlePath, testConsole, true)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -915,7 +916,7 @@ func TestCreateCreateSandboxFail(t *testing.T) {
spec, err := readOCIConfigFile(ociConfigFile)
assert.NoError(err)
_, err = createSandbox(spec, runtimeConfig, testContainerID, bundlePath, testConsole, true)
_, err = createSandbox(context.Background(), spec, runtimeConfig, testContainerID, bundlePath, testConsole, true)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
}
@ -953,7 +954,7 @@ func TestCreateCreateContainerContainerConfigFail(t *testing.T) {
assert.NoError(err)
for _, disableOutput := range []bool{true, false} {
_, err = createContainer(spec, testContainerID, bundlePath, testConsole, disableOutput)
_, err = createContainer(context.Background(), spec, testContainerID, bundlePath, testConsole, disableOutput)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
assert.True(strings.Contains(err.Error(), containerType))
@ -994,7 +995,7 @@ func TestCreateCreateContainerFail(t *testing.T) {
assert.NoError(err)
for _, disableOutput := range []bool{true, false} {
_, err = createContainer(spec, testContainerID, bundlePath, testConsole, disableOutput)
_, err = createContainer(context.Background(), spec, testContainerID, bundlePath, testConsole, disableOutput)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
os.RemoveAll(path)
@ -1060,7 +1061,7 @@ func TestCreateCreateContainer(t *testing.T) {
assert.NoError(err)
for _, disableOutput := range []bool{true, false} {
_, err = createContainer(spec, testContainerID, bundlePath, testConsole, disableOutput)
_, err = createContainer(context.Background(), spec, testContainerID, bundlePath, testConsole, disableOutput)
assert.NoError(err)
os.RemoveAll(path)
}

View File

@ -7,11 +7,13 @@
package main
import (
"context"
"fmt"
"os"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -36,6 +38,11 @@ EXAMPLE:
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
args := context.Args()
if args.Present() == false {
return fmt.Errorf("Missing container ID, should at least provide one")
@ -43,7 +50,7 @@ EXAMPLE:
force := context.Bool("force")
for _, cID := range []string(args) {
if err := delete(cID, force); err != nil {
if err := delete(ctx, cID, force); err != nil {
return err
}
}
@ -52,9 +59,13 @@ EXAMPLE:
},
}
func delete(containerID string, force bool) error {
func delete(ctx context.Context, containerID string, force bool) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "delete")
defer span.Finish()
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
// Checks the MUST and MUST NOT from OCI runtime specification
status, sandboxID, err := getExistingContainerInfo(containerID)
@ -71,6 +82,9 @@ func delete(containerID string, force bool) error {
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
span.SetTag("sandbox", sandboxID)
containerType, err := oci.GetContainerType(status.Annotations)
if err != nil {
return err
@ -93,11 +107,11 @@ func delete(containerID string, force bool) error {
switch containerType {
case vc.PodSandbox:
if err := deleteSandbox(sandboxID); err != nil {
if err := deleteSandbox(ctx, sandboxID); err != nil {
return err
}
case vc.PodContainer:
if err := deleteContainer(sandboxID, containerID, forceStop); err != nil {
if err := deleteContainer(ctx, sandboxID, containerID, forceStop); err != nil {
return err
}
default:
@ -107,19 +121,22 @@ func delete(containerID string, force bool) error {
// In order to prevent any file descriptor leak related to cgroups files
// that have been previously created, we have to remove them before this
// function returns.
cgroupsPathList, err := processCgroupsPath(ociSpec, containerType.IsSandbox())
cgroupsPathList, err := processCgroupsPath(ctx, ociSpec, containerType.IsSandbox())
if err != nil {
return err
}
if err := delContainerIDMapping(containerID); err != nil {
if err := delContainerIDMapping(ctx, containerID); err != nil {
return err
}
return removeCgroupsPath(containerID, cgroupsPathList)
return removeCgroupsPath(ctx, containerID, cgroupsPathList)
}
func deleteSandbox(sandboxID string) error {
func deleteSandbox(ctx context.Context, sandboxID string) error {
span, _ := opentracing.StartSpanFromContext(ctx, "deleteSandbox")
defer span.Finish()
status, err := vci.StatusSandbox(sandboxID)
if err != nil {
return err
@ -138,7 +155,10 @@ func deleteSandbox(sandboxID string) error {
return nil
}
func deleteContainer(sandboxID, containerID string, forceStop bool) error {
func deleteContainer(ctx context.Context, sandboxID, containerID string, forceStop bool) error {
span, _ := opentracing.StartSpanFromContext(ctx, "deleteContainer")
defer span.Finish()
if forceStop {
if _, err := vci.StopContainer(sandboxID, containerID); err != nil {
return err
@ -152,7 +172,10 @@ func deleteContainer(sandboxID, containerID string, forceStop bool) error {
return nil
}
func removeCgroupsPath(containerID string, cgroupsPathList []string) error {
func removeCgroupsPath(ctx context.Context, containerID string, cgroupsPathList []string) error {
span, _ := opentracing.StartSpanFromContext(ctx, "removeCgroupsPath")
defer span.Finish()
if len(cgroupsPathList) == 0 {
kataLog.WithField("container", containerID).Info("Cgroups files not removed because cgroupsPath was empty")
return nil

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"flag"
"io/ioutil"
"os"
@ -20,7 +21,7 @@ import (
)
func testRemoveCgroupsPathSuccessful(t *testing.T, cgroupsPathList []string) {
if err := removeCgroupsPath("foo", cgroupsPathList); err != nil {
if err := removeCgroupsPath(context.Background(), "foo", cgroupsPathList); err != nil {
t.Fatalf("This test should succeed (cgroupsPathList = %v): %s", cgroupsPathList, err)
}
}
@ -50,7 +51,7 @@ func TestDeleteInvalidContainer(t *testing.T) {
assert := assert.New(t)
// Missing container id
err := delete("", false)
err := delete(context.Background(), "", false)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -60,7 +61,7 @@ func TestDeleteInvalidContainer(t *testing.T) {
ctrsMapTreePath = path
// Container missing in ListSandbox
err = delete(testContainerID, false)
err = delete(context.Background(), testContainerID, false)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -87,7 +88,7 @@ func TestDeleteMissingContainerTypeAnnotation(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -116,7 +117,7 @@ func TestDeleteInvalidConfig(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -172,7 +173,7 @@ func TestDeleteSandbox(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -189,7 +190,7 @@ func TestDeleteSandbox(t *testing.T) {
testingImpl.StatusSandboxFunc = nil
}()
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -201,7 +202,7 @@ func TestDeleteSandbox(t *testing.T) {
testingImpl.StopSandboxFunc = nil
}()
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -213,7 +214,7 @@ func TestDeleteSandbox(t *testing.T) {
testingImpl.DeleteSandboxFunc = nil
}()
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Nil(err)
}
@ -251,7 +252,7 @@ func TestDeleteInvalidContainerType(t *testing.T) {
}()
// Delete an invalid container type
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -290,7 +291,7 @@ func TestDeleteSandboxRunning(t *testing.T) {
}()
// Delete on a running sandbox should fail
err = delete(sandbox.ID(), false)
err = delete(context.Background(), sandbox.ID(), false)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -313,7 +314,7 @@ func TestDeleteSandboxRunning(t *testing.T) {
}()
// Force delete a running sandbox
err = delete(sandbox.ID(), true)
err = delete(context.Background(), sandbox.ID(), true)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -325,7 +326,7 @@ func TestDeleteSandboxRunning(t *testing.T) {
testingImpl.DeleteSandboxFunc = nil
}()
err = delete(sandbox.ID(), true)
err = delete(context.Background(), sandbox.ID(), true)
assert.Nil(err)
}
@ -370,7 +371,7 @@ func TestDeleteRunningContainer(t *testing.T) {
}()
// Delete on a running container should fail.
err = delete(sandbox.MockContainers[0].ID(), false)
err = delete(context.Background(), sandbox.MockContainers[0].ID(), false)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -379,7 +380,7 @@ func TestDeleteRunningContainer(t *testing.T) {
defer os.RemoveAll(path)
// force delete
err = delete(sandbox.MockContainers[0].ID(), true)
err = delete(context.Background(), sandbox.MockContainers[0].ID(), true)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -392,7 +393,7 @@ func TestDeleteRunningContainer(t *testing.T) {
assert.NoError(err)
defer os.RemoveAll(path)
err = delete(sandbox.MockContainers[0].ID(), true)
err = delete(context.Background(), sandbox.MockContainers[0].ID(), true)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -408,7 +409,7 @@ func TestDeleteRunningContainer(t *testing.T) {
assert.NoError(err)
defer os.RemoveAll(path)
err = delete(sandbox.MockContainers[0].ID(), true)
err = delete(context.Background(), sandbox.MockContainers[0].ID(), true)
assert.Nil(err)
}
@ -452,7 +453,7 @@ func TestDeleteContainer(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
err = delete(sandbox.MockContainers[0].ID(), false)
err = delete(context.Background(), sandbox.MockContainers[0].ID(), false)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -465,7 +466,7 @@ func TestDeleteContainer(t *testing.T) {
testingImpl.StopContainerFunc = nil
}()
err = delete(sandbox.MockContainers[0].ID(), false)
err = delete(context.Background(), sandbox.MockContainers[0].ID(), false)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -481,7 +482,7 @@ func TestDeleteContainer(t *testing.T) {
assert.NoError(err)
defer os.RemoveAll(path)
err = delete(sandbox.MockContainers[0].ID(), false)
err = delete(context.Background(), sandbox.MockContainers[0].ID(), false)
assert.Nil(err)
}
@ -613,6 +614,6 @@ func TestRemoveCGroupsPath(t *testing.T) {
_ = os.Chmod(tmpdir, 0755)
}()
err = removeCgroupsPath("foo", []string{dir})
err = removeCgroupsPath(context.Background(), "foo", []string{dir})
assert.Error(err)
}

View File

@ -14,6 +14,7 @@ import (
"time"
vc "github.com/kata-containers/runtime/virtcontainers"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@ -136,6 +137,14 @@ information is displayed once every 5 seconds.`,
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
span, _ := opentracing.StartSpanFromContext(ctx, "events")
defer span.Finish()
containerID := context.Args().First()
if containerID == "" {
return fmt.Errorf("container id cannot be empty")
@ -143,6 +152,7 @@ information is displayed once every 5 seconds.`,
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
duration := context.Duration("interval")
if duration <= 0 {
@ -162,6 +172,8 @@ information is displayed once every 5 seconds.`,
})
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
span.SetTag("sandbox", sandboxID)
if status.State.State == vc.StateStopped {
return fmt.Errorf("container with id %s is not running", status.ID)

View File

@ -7,6 +7,7 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -16,6 +17,7 @@ import (
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
)
@ -107,7 +109,12 @@ EXAMPLE:
},
},
Action: func(context *cli.Context) error {
return execute(context)
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
return execute(ctx, context)
},
}
@ -181,11 +188,15 @@ func generateExecParams(context *cli.Context, specProcess *oci.CompatOCIProcess)
return params, nil
}
func execute(context *cli.Context) error {
func execute(ctx context.Context, context *cli.Context) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "execute")
defer span.Finish()
containerID := context.Args().First()
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
status, sandboxID, err := getExistingContainerInfo(containerID)
if err != nil {
@ -194,6 +205,7 @@ func execute(context *cli.Context) error {
kataLog = kataLog.WithField("sandbox", sandboxID)
setExternalLoggers(kataLog)
span.SetTag("sandbox", sandboxID)
// Retrieve OCI spec configuration.
ociSpec, err := oci.GetOCIConfig(status)
@ -211,6 +223,7 @@ func execute(context *cli.Context) error {
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
// container MUST be ready or running.
if status.State.State != vc.StateReady &&
@ -253,7 +266,7 @@ func execute(context *cli.Context) error {
// Creation of PID file has to be the last thing done in the exec
// because containerd considers the exec to have finished starting
// after this file is created.
if err := createPIDFile(params.pidFile, process.Pid); err != nil {
if err := createPIDFile(ctx, params.pidFile, process.Pid); err != nil {
return err
}

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"flag"
"io/ioutil"
"os"
@ -56,7 +57,7 @@ func TestExecuteErrors(t *testing.T) {
ctx := createCLIContext(flagSet)
// missing container id
err := execute(ctx)
err := execute(context.Background(), ctx)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -66,7 +67,7 @@ func TestExecuteErrors(t *testing.T) {
// StatusSandbox error
flagSet.Parse([]string{testContainerID})
err = execute(ctx)
err = execute(context.Background(), ctx)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -83,7 +84,7 @@ func TestExecuteErrors(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
err = execute(ctx)
err = execute(context.Background(), ctx)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -103,7 +104,7 @@ func TestExecuteErrors(t *testing.T) {
return newSingleContainerStatus(testContainerID, containerState, annotations), nil
}
err = execute(ctx)
err = execute(context.Background(), ctx)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -115,7 +116,7 @@ func TestExecuteErrors(t *testing.T) {
return newSingleContainerStatus(testContainerID, containerState, annotations), nil
}
err = execute(ctx)
err = execute(context.Background(), ctx)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -127,7 +128,7 @@ func TestExecuteErrors(t *testing.T) {
return newSingleContainerStatus(testContainerID, containerState, annotations), nil
}
err = execute(ctx)
err = execute(context.Background(), ctx)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -295,6 +296,7 @@ func TestExecuteWithFlags(t *testing.T) {
// EnterContainer error
err = fn(ctx)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
testingImpl.EnterContainerFunc = func(sandboxID, containerID string, cmd vc.Cmd) (vc.VCSandbox, vc.VCContainer, *vc.Process, error) {

View File

@ -24,6 +24,7 @@ import (
"syscall"
vc "github.com/kata-containers/runtime/virtcontainers"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -282,6 +283,13 @@ var kataCheckCLICommand = cli.Command{
Name: checkCmd,
Usage: "tests if system can run " + project,
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
span, _ := opentracing.StartSpanFromContext(ctx, "kata-check")
defer span.Finish()
setCPUtype()
@ -292,7 +300,7 @@ var kataCheckCLICommand = cli.Command{
requiredKernelModules: archRequiredKernelModules,
}
err := hostIsVMContainerCapable(details)
err = hostIsVMContainerCapable(details)
if err != nil {
return err

View File

@ -17,6 +17,7 @@ import (
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
)
@ -398,6 +399,14 @@ var kataEnvCLICommand = cli.Command{
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
span, _ := opentracing.StartSpanFromContext(ctx, "kata-env")
defer span.Finish()
return handleSettings(defaultOutputFile, context)
},
}

View File

@ -7,12 +7,14 @@
package main
import (
"context"
"fmt"
"strconv"
"syscall"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -37,6 +39,11 @@ EXAMPLE:
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
args := context.Args()
if args.Present() == false {
return fmt.Errorf("Missing container ID")
@ -48,7 +55,7 @@ EXAMPLE:
signal = "SIGTERM"
}
return kill(args.First(), signal, context.Bool("all"))
return kill(ctx, args.First(), signal, context.Bool("all"))
},
}
@ -90,9 +97,13 @@ var signals = map[string]syscall.Signal{
"SIGXFSZ": syscall.SIGXFSZ,
}
func kill(containerID, signal string, all bool) error {
func kill(ctx context.Context, containerID, signal string, all bool) error {
span, _ := opentracing.StartSpanFromContext(ctx, "kill")
defer span.Finish()
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
// Checks the MUST and MUST NOT from OCI runtime specification
status, sandboxID, err := getExistingContainerInfo(containerID)
@ -108,6 +119,9 @@ func kill(containerID, signal string, all bool) error {
"sandbox": sandboxID,
})
span.SetTag("container", containerID)
span.SetTag("sandbox", sandboxID)
setExternalLoggers(kataLog)
signum, err := processSignal(signal)

View File

@ -16,6 +16,7 @@ import (
"text/tabwriter"
"time"
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
vc "github.com/kata-containers/runtime/virtcontainers"
@ -108,6 +109,14 @@ To list containers created using a non-default value for "--root":
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
span, _ := opentracing.StartSpanFromContext(ctx, "list")
defer span.Finish()
s, err := getContainers(context)
if err != nil {
return err

View File

@ -7,6 +7,7 @@
package main
import (
"context"
"errors"
"fmt"
"io"
@ -20,6 +21,7 @@ import (
vf "github.com/kata-containers/runtime/virtcontainers/factory"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -56,6 +58,9 @@ var originalLoggerLevel logrus.Level
var debug = false
// if true, enable opentracing support.
var tracing = false
// if true, coredump when an internal error occurs or a fatal signal is received
var crashOnError = false
@ -131,6 +136,10 @@ var runtimeCommands = []cli.Command{
// parsing occurs.
var runtimeBeforeSubcommands = beforeSubcommands
// runtimeAfterSubcommands is the function to run after the command-line
// has been parsed.
var runtimeAfterSubcommands = afterSubcommands
// runtimeCommandNotFound is the function to handle an invalid sub-command.
var runtimeCommandNotFound = commandNotFound
@ -161,7 +170,13 @@ func init() {
kataLog.Logger.Level = logrus.DebugLevel
}
func setupSignalHandler() {
// setupSignalHandler sets up signal handling, starting a go routine to deal
// with signals as they arrive.
//
// Note that the specified context is NOT used to create a trace span (since the
// first (root) span must be created in beforeSubcommands()): it is simply
// used to pass to the crash handling functions to finalise tracing.
func setupSignalHandler(ctx context.Context) {
sigCh := make(chan os.Signal, 8)
for _, sig := range handledSignals() {
@ -181,7 +196,7 @@ func setupSignalHandler() {
if fatalSignal(nativeSignal) {
kataLog.WithField("signal", sig).Error("received fatal signal")
die()
die(ctx)
} else if debug && nonFatalSignal(nativeSignal) {
kataLog.WithField("signal", sig).Debug("handling signal")
backtrace()
@ -206,15 +221,7 @@ func setExternalLoggers(logger *logrus.Entry) {
// beforeSubcommands is the function to perform preliminary checks
// before command-line parsing occurs.
func beforeSubcommands(context *cli.Context) error {
if context.GlobalBool(showConfigPathsOption) {
files := getDefaultConfigFilePaths()
for _, file := range files {
fmt.Fprintf(defaultOutputFile, "%s\n", file)
}
exit(0)
}
handleShowConfig(context)
if userWantsUsage(context) || (context.NArg() == 1 && (context.Args()[0] == checkCmd)) {
// No setup required if the user just
@ -241,11 +248,17 @@ func beforeSubcommands(context *cli.Context) error {
return fmt.Errorf("unknown log-format %q", context.GlobalString("log-format"))
}
var traceRootSpan string
// Add the name of the sub-command to each log entry for easier
// debugging.
cmdName := context.Args().First()
if context.App.Command(cmdName) != nil {
kataLog = kataLog.WithField("command", cmdName)
// Name for the root span (used for tracing) now the
// sub-command name is known.
traceRootSpan = name + " " + cmdName
}
setExternalLoggers(kataLog)
@ -262,6 +275,19 @@ func beforeSubcommands(context *cli.Context) error {
fatal(err)
}
if traceRootSpan != "" {
// Create the tracer.
//
// Note: no spans are created until the command-line has been parsed.
// This delays collection of trace data slightly but benefits the user by
// ensuring the first span is the name of the sub-command being
// invoked from the command-line.
err = setupTracing(context, traceRootSpan)
if err != nil {
return err
}
}
args := strings.Join(context.Args(), " ")
fields := logrus.Fields{
@ -273,10 +299,63 @@ func beforeSubcommands(context *cli.Context) error {
kataLog.WithFields(fields).Info()
// make the data accessible to the sub-commands.
context.App.Metadata = map[string]interface{}{
"runtimeConfig": runtimeConfig,
"configFile": configFile,
context.App.Metadata["runtimeConfig"] = runtimeConfig
context.App.Metadata["configFile"] = configFile
return nil
}
// handleShowConfig determines if the user wishes to see the configuration
// paths. If so, it will display them and then exit.
func handleShowConfig(context *cli.Context) {
if context.GlobalBool(showConfigPathsOption) {
files := getDefaultConfigFilePaths()
for _, file := range files {
fmt.Fprintf(defaultOutputFile, "%s\n", file)
}
exit(0)
}
}
func setupTracing(context *cli.Context, rootSpanName string) error {
tracer, err := createTracer(name)
if err != nil {
fatal(err)
}
// Create the root span now that the sub-command name is
// known.
//
// Note that this "Before" function is called (and returns)
// before the subcommand handler is called. As such, we cannot
// "Finish()" the span here - that is handled in the .After
// function.
span := tracer.StartSpan(rootSpanName)
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
// Associate the root span with the context
ctx = opentracing.ContextWithSpan(ctx, span)
// Add tracer to metadata and update the context
context.App.Metadata["tracer"] = tracer
context.App.Metadata["context"] = ctx
return nil
}
func afterSubcommands(c *cli.Context) error {
ctx, err := cliContextToContext(c)
if err != nil {
return err
}
stopTracing(ctx)
return nil
}
@ -336,7 +415,7 @@ func setCLIGlobals() {
// createRuntimeApp creates an application to process the command-line
// arguments and invoke the requested runtime command.
func createRuntimeApp(args []string) error {
func createRuntimeApp(ctx context.Context, args []string) error {
app := cli.NewApp()
app.Name = name
@ -347,8 +426,14 @@ func createRuntimeApp(args []string) error {
app.Flags = runtimeFlags
app.Commands = runtimeCommands
app.Before = runtimeBeforeSubcommands
app.After = runtimeAfterSubcommands
app.EnableBashCompletion = true
// allow sub-commands to access context
app.Metadata = map[string]interface{}{
"context": ctx,
}
return app.Run(args)
}
@ -387,18 +472,38 @@ func (f *fatalWriter) Write(p []byte) (n int, err error) {
return f.cliErrWriter.Write(p)
}
func createRuntime() {
setupSignalHandler()
func createRuntime(ctx context.Context) {
setupSignalHandler(ctx)
setCLIGlobals()
err := createRuntimeApp(os.Args)
err := createRuntimeApp(ctx, os.Args)
if err != nil {
fatal(err)
}
}
func main() {
defer handlePanic()
createRuntime()
// cliContextToContext extracts the generic context from the specified
// cli context.
func cliContextToContext(c *cli.Context) (context.Context, error) {
if c == nil {
return nil, errors.New("need cli.Context")
}
// extract the main context
ctx, ok := c.App.Metadata["context"].(context.Context)
if !ok {
return nil, errors.New("invalid or missing context in metadata")
}
return ctx, nil
}
func main() {
// create a new empty context
ctx := context.Background()
defer handlePanic(ctx)
createRuntime(ctx)
}

View File

@ -7,6 +7,7 @@ package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"flag"
@ -26,6 +27,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/pkg/vcmock"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
jaeger "github.com/uber/jaeger-client-go"
"github.com/urfave/cli"
)
@ -474,6 +476,10 @@ func createCLIContextWithApp(flagSet *flag.FlagSet, app *cli.App) *cli.Context {
ctx.App.Metadata = map[string]interface{}{}
}
// add standard entries
ctx.App.Metadata["context"] = context.Background()
ctx.App.Metadata["tracer"] = &jaeger.Tracer{}
return ctx
}
@ -918,7 +924,7 @@ func TestMainCreateRuntimeApp(t *testing.T) {
args := []string{name}
err = createRuntimeApp(args)
err = createRuntimeApp(context.Background(), args)
assert.NoError(err, "%v", args)
}
@ -941,7 +947,7 @@ func TestMainCreateRuntimeAppInvalidSubCommand(t *testing.T) {
}()
// calls fatal() so no return
_ = createRuntimeApp([]string{name, "i-am-an-invalid-sub-command"})
_ = createRuntimeApp(context.Background(), []string{name, "i-am-an-invalid-sub-command"})
assert.NotEqual(exitStatus, 0)
}
@ -985,7 +991,7 @@ func TestMainCreateRuntime(t *testing.T) {
}()
assert.Equal(exitStatus, 0)
createRuntime()
createRuntime(context.Background())
assert.NotEqual(exitStatus, 0)
}
@ -1012,7 +1018,7 @@ func TestMainVersionPrinter(t *testing.T) {
setCLIGlobals()
err = createRuntimeApp([]string{name, "--version"})
err = createRuntimeApp(context.Background(), []string{name, "--version"})
assert.NoError(err)
err = grep(fmt.Sprintf(`%s\s*:\s*%s`, name, version), output)
@ -1060,7 +1066,7 @@ func TestMainFatalWriter(t *testing.T) {
setCLIGlobals()
err := createRuntimeApp([]string{name, cmd})
err := createRuntimeApp(context.Background(), []string{name, cmd})
assert.Error(err)
re := regexp.MustCompile(

View File

@ -7,6 +7,7 @@ package main
import (
"bufio"
"context"
"errors"
"fmt"
"io/ioutil"
@ -20,6 +21,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
"github.com/opencontainers/runc/libcontainer/utils"
specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
)
@ -125,7 +127,10 @@ func validCreateParams(containerID, bundlePath string) (string, error) {
// processCgroupsPath process the cgroups path as expected from the
// OCI runtime specification. It returns a list of complete paths
// that should be created and used for every specified resource.
func processCgroupsPath(ociSpec oci.CompatOCISpec, isSandbox bool) ([]string, error) {
func processCgroupsPath(ctx context.Context, ociSpec oci.CompatOCISpec, isSandbox bool) ([]string, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "processCgroupsPath")
defer span.Finish()
var cgroupsPathList []string
if ociSpec.Linux.CgroupsPath == "" {
@ -371,7 +376,10 @@ func fetchContainerIDMapping(containerID string) (string, error) {
return files[0].Name(), nil
}
func addContainerIDMapping(containerID, sandboxID string) error {
func addContainerIDMapping(ctx context.Context, containerID, sandboxID string) error {
span, _ := opentracing.StartSpanFromContext(ctx, "addContainerIDMapping")
defer span.Finish()
if containerID == "" {
return fmt.Errorf("Missing container ID")
}
@ -395,7 +403,10 @@ func addContainerIDMapping(containerID, sandboxID string) error {
return nil
}
func delContainerIDMapping(containerID string) error {
func delContainerIDMapping(ctx context.Context, containerID string) error {
span, _ := opentracing.StartSpanFromContext(ctx, "delContainerIDMapping")
defer span.Finish()
if containerID == "" {
return fmt.Errorf("Missing container ID")
}

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
@ -182,7 +183,7 @@ func TestValidCreateParamsBundleIsAFile(t *testing.T) {
func testProcessCgroupsPath(t *testing.T, ociSpec oci.CompatOCISpec, expected []string) {
assert := assert.New(t)
result, err := processCgroupsPath(ociSpec, true)
result, err := processCgroupsPath(context.Background(), ociSpec, true)
assert.NoError(err)
@ -268,7 +269,7 @@ func TestProcessCgroupsPathAbsoluteNoCgroupMountDestinationFailure(t *testing.T)
for _, d := range cgroupTestData {
ociSpec.Linux.Resources = d.linuxSpec
for _, isSandbox := range []bool{true, false} {
_, err := processCgroupsPath(ociSpec, isSandbox)
_, err := processCgroupsPath(context.Background(), ociSpec, isSandbox)
assert.Error(err, "This test should fail because no cgroup mount destination provided")
}
}
@ -563,14 +564,14 @@ func TestFetchContainerIDMappingSuccess(t *testing.T) {
func TestAddContainerIDMappingContainerIDEmptyFailure(t *testing.T) {
assert := assert.New(t)
err := addContainerIDMapping("", testSandboxID)
err := addContainerIDMapping(context.Background(), "", testSandboxID)
assert.Error(err)
}
func TestAddContainerIDMappingSandboxIDEmptyFailure(t *testing.T) {
assert := assert.New(t)
err := addContainerIDMapping(testContainerID, "")
err := addContainerIDMapping(context.Background(), testContainerID, "")
assert.Error(err)
}
@ -585,7 +586,7 @@ func TestAddContainerIDMappingSuccess(t *testing.T) {
_, err = os.Stat(filepath.Join(ctrsMapTreePath, testContainerID, testSandboxID))
assert.True(os.IsNotExist(err))
err = addContainerIDMapping(testContainerID, testSandboxID)
err = addContainerIDMapping(context.Background(), testContainerID, testSandboxID)
assert.NoError(err)
_, err = os.Stat(filepath.Join(ctrsMapTreePath, testContainerID, testSandboxID))
@ -595,7 +596,7 @@ func TestAddContainerIDMappingSuccess(t *testing.T) {
func TestDelContainerIDMappingContainerIDEmptyFailure(t *testing.T) {
assert := assert.New(t)
err := delContainerIDMapping("")
err := delContainerIDMapping(context.Background(), "")
assert.Error(err)
}
@ -609,7 +610,7 @@ func TestDelContainerIDMappingSuccess(t *testing.T) {
_, err = os.Stat(filepath.Join(ctrsMapTreePath, testContainerID, testSandboxID))
assert.NoError(err)
err = delContainerIDMapping(testContainerID)
err = delContainerIDMapping(context.Background(), testContainerID)
assert.NoError(err)
_, err = os.Stat(filepath.Join(ctrsMapTreePath, testContainerID, testSandboxID))

View File

@ -7,6 +7,9 @@
package main
import (
"context"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -22,9 +25,7 @@ Where "<container-id>" is the container name to be paused.`,
Description: `The pause command suspends all processes in a container.
` + noteText,
Action: func(context *cli.Context) error {
return toggleContainerPause(context.Args().First(), true)
},
Action: pause,
}
var resumeCLICommand = cli.Command{
@ -36,14 +37,34 @@ Where "<container-id>" is the container name to be resumed.`,
Description: `The resume command unpauses all processes in a container.
` + noteText,
Action: func(context *cli.Context) error {
return toggleContainerPause(context.Args().First(), false)
},
Action: resume,
}
func toggleContainerPause(containerID string, pause bool) (err error) {
func pause(c *cli.Context) error {
return toggle(c, true)
}
func resume(c *cli.Context) error {
return toggle(c, false)
}
func toggle(c *cli.Context, pause bool) error {
ctx, err := cliContextToContext(c)
if err != nil {
return err
}
return toggleContainerPause(ctx, c.Args().First(), pause)
}
func toggleContainerPause(ctx context.Context, containerID string, pause bool) (err error) {
span, _ := opentracing.StartSpanFromContext(ctx, "pause")
defer span.Finish()
span.SetTag("pause", pause)
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
// Checks the MUST and MUST NOT from OCI runtime specification
status, sandboxID, err := getExistingContainerInfo(containerID)
@ -59,6 +80,8 @@ func toggleContainerPause(containerID string, pause bool) (err error) {
})
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
span.SetTag("sandbox", sandboxID)
if pause {
err = vci.PauseContainer(sandboxID, containerID)

View File

@ -7,9 +7,11 @@
package main
import (
"context"
"fmt"
vc "github.com/kata-containers/runtime/virtcontainers"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -26,6 +28,11 @@ var psCLICommand = cli.Command{
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
if context.Args().Present() == false {
return fmt.Errorf("Missing container ID, should at least provide one")
}
@ -38,18 +45,22 @@ var psCLICommand = cli.Command{
args = context.Args()[1:]
}
return ps(context.Args().First(), context.String("format"), args)
return ps(ctx, context.Args().First(), context.String("format"), args)
},
SkipArgReorder: true,
}
func ps(containerID, format string, args []string) error {
func ps(ctx context.Context, containerID, format string, args []string) error {
span, _ := opentracing.StartSpanFromContext(ctx, "ps")
defer span.Finish()
if containerID == "" {
return fmt.Errorf("Missing container ID")
}
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
// Checks the MUST and MUST NOT from OCI runtime specification
status, sandboxID, err := getExistingContainerInfo(containerID)
@ -65,6 +76,8 @@ func ps(containerID, format string, args []string) error {
})
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
span.SetTag("sandbox", sandboxID)
// container MUST be running
if status.State.State != vc.StateRunning {

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"flag"
"os"
"testing"
@ -66,11 +67,11 @@ func TestPSFailure(t *testing.T) {
}()
// inexistent container
err = ps("xyz123abc", "json", []string{"-ef"})
err = ps(context.Background(), "xyz123abc", "json", []string{"-ef"})
assert.Error(err)
// container is not running
err = ps(sandbox.ID(), "json", []string{"-ef"})
err = ps(context.Background(), sandbox.ID(), "json", []string{"-ef"})
assert.Error(err)
}
@ -113,6 +114,6 @@ func TestPSSuccessful(t *testing.T) {
testingImpl.ProcessListContainerFunc = nil
}()
err = ps(sandbox.ID(), "json", []string{})
err = ps(context.Background(), sandbox.ID(), "json", []string{})
assert.NoError(err)
}

View File

@ -7,12 +7,14 @@
package main
import (
"context"
"errors"
"fmt"
"os"
"syscall"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
)
@ -58,12 +60,17 @@ var runCLICommand = cli.Command{
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok {
return errors.New("invalid runtime config")
}
return run(context.Args().First(),
return run(ctx, context.Args().First(),
context.String("bundle"),
context.String("console"),
context.String("console-socket"),
@ -73,19 +80,21 @@ var runCLICommand = cli.Command{
},
}
func run(containerID, bundle, console, consoleSocket, pidFile string, detach bool,
func run(ctx context.Context, containerID, bundle, console, consoleSocket, pidFile string, detach bool,
runtimeConfig oci.RuntimeConfig) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "run")
defer span.Finish()
consolePath, err := setupConsole(console, consoleSocket)
if err != nil {
return err
}
if err := create(containerID, bundle, consolePath, pidFile, detach, runtimeConfig); err != nil {
if err := create(ctx, containerID, bundle, consolePath, pidFile, detach, runtimeConfig); err != nil {
return err
}
sandbox, err := start(containerID)
sandbox, err := start(ctx, containerID)
if err != nil {
return err
}
@ -110,7 +119,7 @@ func run(containerID, bundle, console, consoleSocket, pidFile string, detach boo
}
// delete container's resources
if err := delete(sandbox.ID(), true); err != nil {
if err := delete(ctx, sandbox.ID(), true); err != nil {
return err
}

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"flag"
"fmt"
"io/ioutil"
@ -142,7 +143,7 @@ func TestRunInvalidArgs(t *testing.T) {
}
for i, a := range args {
err := run(a.containerID, a.bundle, a.console, a.consoleSocket, a.pidFile, a.detach, a.runtimeConfig)
err := run(context.Background(), a.containerID, a.bundle, a.console, a.consoleSocket, a.pidFile, a.detach, a.runtimeConfig)
assert.Errorf(err, "test %d (%+v)", i, a)
}
}
@ -284,7 +285,7 @@ func TestRunContainerSuccessful(t *testing.T) {
testingImpl.DeleteContainerFunc = nil
}()
err = run(d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
err = run(context.Background(), d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
// should return ExitError with the message and exit code
e, ok := err.(*cli.ExitError)
@ -358,7 +359,7 @@ func TestRunContainerDetachSuccessful(t *testing.T) {
testingImpl.DeleteContainerFunc = nil
}()
err = run(d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, true, d.runtimeConfig)
err = run(context.Background(), d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, true, d.runtimeConfig)
// should not return ExitError
assert.NoError(err)
@ -431,7 +432,7 @@ func TestRunContainerDeleteFail(t *testing.T) {
testingImpl.DeleteContainerFunc = nil
}()
err = run(d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
err = run(context.Background(), d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
// should not return ExitError
err, ok := err.(*cli.ExitError)
@ -508,7 +509,7 @@ func TestRunContainerWaitFail(t *testing.T) {
testingImpl.DeleteContainerFunc = nil
}()
err = run(d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
err = run(context.Background(), d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
// should not return ExitError
err, ok := err.(*cli.ExitError)
@ -566,7 +567,7 @@ func TestRunContainerStartFail(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
err = run(d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
err = run(context.Background(), d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
// should not return ExitError
err, ok := err.(*cli.ExitError)
@ -621,7 +622,7 @@ func TestRunContainerStartFailExistingContainer(t *testing.T) {
testingImpl.StartSandboxFunc = nil
}()
err = run(d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
err = run(context.Background(), d.sandbox.ID(), d.bundlePath, d.consolePath, "", d.pidFilePath, false, d.runtimeConfig)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}

View File

@ -7,6 +7,7 @@ package main
import (
"bytes"
"context"
"fmt"
"os/signal"
"runtime/pprof"
@ -29,14 +30,14 @@ var handledSignalsMap = map[syscall.Signal]bool{
syscall.SIGUSR1: false,
}
func handlePanic() {
func handlePanic(ctx context.Context) {
r := recover()
if r != nil {
msg := fmt.Sprintf("%s", r)
kataLog.WithField("panic", msg).Error("fatal error")
die()
die(ctx)
}
}
@ -84,7 +85,9 @@ func handledSignals() []syscall.Signal {
return signals
}
func die() {
func die(ctx context.Context) {
stopTracing(ctx)
backtrace()
if crashOnError {

View File

@ -13,6 +13,7 @@ import (
"os"
"github.com/opencontainers/runc/libcontainer/specconv"
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
)
@ -72,6 +73,14 @@ generate a proper rootless spec file.`,
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
span, _ := opentracing.StartSpanFromContext(ctx, "spec")
defer span.Finish()
spec := specconv.Example()
checkNoFile := func(name string) error {

View File

@ -7,10 +7,12 @@
package main
import (
"context"
"fmt"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -25,13 +27,18 @@ var startCLICommand = cli.Command{
unique on your host.`,
Description: `The start command executes the user defined process in a created container .`,
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
args := context.Args()
if args.Present() == false {
return fmt.Errorf("Missing container ID, should at least provide one")
}
for _, cID := range []string(args) {
if _, err := start(cID); err != nil {
if _, err := start(ctx, cID); err != nil {
return err
}
}
@ -40,9 +47,13 @@ var startCLICommand = cli.Command{
},
}
func start(containerID string) (vc.VCSandbox, error) {
func start(ctx context.Context, containerID string) (vc.VCSandbox, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "start")
defer span.Finish()
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
// Checks the MUST and MUST NOT from OCI runtime specification
status, sandboxID, err := getExistingContainerInfo(containerID)
@ -58,6 +69,8 @@ func start(containerID string) (vc.VCSandbox, error) {
})
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
span.SetTag("sandbox", sandboxID)
containerType, err := oci.GetContainerType(status.Annotations)
if err != nil {

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"flag"
"io/ioutil"
"os"
@ -22,7 +23,7 @@ func TestStartInvalidArgs(t *testing.T) {
assert := assert.New(t)
// Missing container id
_, err := start("")
_, err := start(context.Background(), "")
assert.Error(err)
assert.False(vcmock.IsMockError(err))
@ -31,7 +32,7 @@ func TestStartInvalidArgs(t *testing.T) {
defer os.RemoveAll(path)
// Mock StatusContainer error
_, err = start(testContainerID)
_, err = start(context.Background(), testContainerID)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -41,7 +42,7 @@ func TestStartInvalidArgs(t *testing.T) {
ctrsMapTreePath = path
// Container missing in container mapping
_, err = start(testContainerID)
_, err = start(context.Background(), testContainerID)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -70,7 +71,7 @@ func TestStartSandbox(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
_, err = start(sandbox.ID())
_, err = start(context.Background(), sandbox.ID())
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -82,7 +83,7 @@ func TestStartSandbox(t *testing.T) {
testingImpl.StartSandboxFunc = nil
}()
_, err = start(sandbox.ID())
_, err = start(context.Background(), sandbox.ID())
assert.Nil(err)
}
@ -108,7 +109,7 @@ func TestStartMissingAnnotation(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
_, err = start(sandbox.ID())
_, err = start(context.Background(), sandbox.ID())
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
@ -144,7 +145,7 @@ func TestStartContainerSucessFailure(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
_, err = start(testContainerID)
_, err = start(context.Background(), testContainerID)
assert.Error(err)
assert.True(vcmock.IsMockError(err))
@ -156,7 +157,7 @@ func TestStartContainerSucessFailure(t *testing.T) {
testingImpl.StartContainerFunc = nil
}()
_, err = start(testContainerID)
_, err = start(context.Background(), testContainerID)
assert.Nil(err)
}

View File

@ -7,11 +7,13 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
)
@ -24,17 +26,26 @@ var stateCLICommand = cli.Command{
Description: `The state command outputs current state information for the
instance of a container.`,
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
args := context.Args()
if len(args) != 1 {
return fmt.Errorf("Expecting only one container ID, got %d: %v", len(args), []string(args))
}
return state(args.First())
return state(ctx, args.First())
},
}
func state(containerID string) error {
func state(ctx context.Context, containerID string) error {
span, _ := opentracing.StartSpanFromContext(ctx, "state")
defer span.Finish()
kataLog = kataLog.WithField("container", containerID)
span.SetTag("container", containerID)
setExternalLoggers(kataLog)

View File

@ -6,6 +6,7 @@
package main
import (
"context"
"flag"
"io/ioutil"
"os"
@ -59,7 +60,7 @@ func TestStateSuccessful(t *testing.T) {
ctrsMapTreePath = path
// trying with an inexistent id
err = state("123456789")
err = state(context.Background(), "123456789")
assert.Error(err)
path, err = createTempContainerIDMapping(testContainerID, sandbox.ID())
@ -79,6 +80,6 @@ func TestStateSuccessful(t *testing.T) {
testingImpl.StatusContainerFunc = nil
}()
err = state(testContainerID)
err = state(context.Background(), testContainerID)
assert.NoError(err)
}

78
cli/tracing.go Normal file
View File

@ -0,0 +1,78 @@
// Copyright (c) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"context"
"io"
opentracing "github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/config"
)
// Implements jaeger-client-go.Logger interface
type traceLogger struct {
}
// tracerCloser contains a copy of the closer returned by createTracer() which
// is used by stopTracing().
var tracerCloser io.Closer
func (t traceLogger) Error(msg string) {
kataLog.Error(msg)
}
func (t traceLogger) Infof(msg string, args ...interface{}) {
kataLog.Infof(msg, args...)
}
func createTracer(name string) (opentracing.Tracer, error) {
cfg := &config.Configuration{
ServiceName: name,
// If tracing is disabled, use a NOP trace implementation
Disabled: !tracing,
// Note that span logging reporter option cannot be enabled as
// it pollutes the output stream which causes (atleast) the
// "state" command to fail under Docker.
Sampler: &config.SamplerConfig{
Type: "const",
Param: 1,
},
}
logger := traceLogger{}
tracer, closer, err := cfg.NewTracer(config.Logger(logger))
if err != nil {
return nil, err
}
// save for stopTracing()'s exclusive use
tracerCloser = closer
// Seems to be essential to ensure non-root spans are logged
opentracing.SetGlobalTracer(tracer)
return tracer, nil
}
// stopTracing() ends all tracing, reporting the spans to the collector.
func stopTracing(ctx context.Context) {
if !tracing {
return
}
span := opentracing.SpanFromContext(ctx)
if span != nil {
span.Finish()
}
// report all possible spans to the collector
tracerCloser.Close()
}

View File

@ -15,6 +15,7 @@ import (
"github.com/docker/go-units"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -127,6 +128,14 @@ other options are ignored.
},
},
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
span, _ := opentracing.StartSpanFromContext(ctx, "update")
defer span.Finish()
if context.Args().Present() == false {
return fmt.Errorf("Missing container ID, should at least provide one")
}
@ -135,6 +144,7 @@ other options are ignored.
kataLog = kataLog.WithField("container", containerID)
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
status, sandboxID, err := getExistingContainerInfo(containerID)
if err != nil {
@ -150,6 +160,9 @@ other options are ignored.
setExternalLoggers(kataLog)
span.SetTag("container", containerID)
span.SetTag("sandbox", sandboxID)
// container MUST be running
if status.State.State != vc.StateRunning {
return fmt.Errorf("Container %s is not running", containerID)

View File

@ -6,6 +6,7 @@
package main
import (
opentracing "github.com/opentracing/opentracing-go"
"github.com/urfave/cli"
)
@ -13,6 +14,14 @@ var versionCLICommand = cli.Command{
Name: "version",
Usage: "display version details",
Action: func(context *cli.Context) error {
ctx, err := cliContextToContext(context)
if err != nil {
return err
}
span, _ := opentracing.StartSpanFromContext(ctx, "version")
defer span.Finish()
cli.VersionPrinter(context)
return nil
},

21
vendor/github.com/codahale/hdrhistogram/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Coda Hale
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

564
vendor/github.com/codahale/hdrhistogram/hdr.go generated vendored Normal file
View File

@ -0,0 +1,564 @@
// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram
// data structure. The HDR Histogram allows for fast and accurate analysis of
// the extreme ranges of data with non-normal distributions, like latency.
package hdrhistogram
import (
"fmt"
"math"
)
// A Bracket is a part of a cumulative distribution.
type Bracket struct {
Quantile float64
Count, ValueAt int64
}
// A Snapshot is an exported view of a Histogram, useful for serializing them.
// A Histogram can be constructed from it by passing it to Import.
type Snapshot struct {
LowestTrackableValue int64
HighestTrackableValue int64
SignificantFigures int64
Counts []int64
}
// A Histogram is a lossy data structure used to record the distribution of
// non-normally distributed data (like latency) with a high degree of accuracy
// and a bounded degree of precision.
type Histogram struct {
lowestTrackableValue int64
highestTrackableValue int64
unitMagnitude int64
significantFigures int64
subBucketHalfCountMagnitude int32
subBucketHalfCount int32
subBucketMask int64
subBucketCount int32
bucketCount int32
countsLen int32
totalCount int64
counts []int64
}
// New returns a new Histogram instance capable of tracking values in the given
// range and with the given amount of precision.
func New(minValue, maxValue int64, sigfigs int) *Histogram {
if sigfigs < 1 || 5 < sigfigs {
panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs))
}
largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs)
subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution))))
subBucketHalfCountMagnitude := subBucketCountMagnitude
if subBucketHalfCountMagnitude < 1 {
subBucketHalfCountMagnitude = 1
}
subBucketHalfCountMagnitude--
unitMagnitude := int32(math.Floor(math.Log2(float64(minValue))))
if unitMagnitude < 0 {
unitMagnitude = 0
}
subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1))
subBucketHalfCount := subBucketCount / 2
subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude)
// determine exponent range needed to support the trackable value with no
// overflow:
smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude)
bucketsNeeded := int32(1)
for smallestUntrackableValue < maxValue {
smallestUntrackableValue <<= 1
bucketsNeeded++
}
bucketCount := bucketsNeeded
countsLen := (bucketCount + 1) * (subBucketCount / 2)
return &Histogram{
lowestTrackableValue: minValue,
highestTrackableValue: maxValue,
unitMagnitude: int64(unitMagnitude),
significantFigures: int64(sigfigs),
subBucketHalfCountMagnitude: subBucketHalfCountMagnitude,
subBucketHalfCount: subBucketHalfCount,
subBucketMask: subBucketMask,
subBucketCount: subBucketCount,
bucketCount: bucketCount,
countsLen: countsLen,
totalCount: 0,
counts: make([]int64, countsLen),
}
}
// ByteSize returns an estimate of the amount of memory allocated to the
// histogram in bytes.
//
// N.B.: This does not take into account the overhead for slices, which are
// small, constant, and specific to the compiler version.
func (h *Histogram) ByteSize() int {
return 6*8 + 5*4 + len(h.counts)*8
}
// Merge merges the data stored in the given histogram with the receiver,
// returning the number of recorded values which had to be dropped.
func (h *Histogram) Merge(from *Histogram) (dropped int64) {
i := from.rIterator()
for i.next() {
v := i.valueFromIdx
c := i.countAtIdx
if h.RecordValues(v, c) != nil {
dropped += c
}
}
return
}
// TotalCount returns total number of values recorded.
func (h *Histogram) TotalCount() int64 {
return h.totalCount
}
// Max returns the approximate maximum recorded value.
func (h *Histogram) Max() int64 {
var max int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
max = i.highestEquivalentValue
}
}
return h.highestEquivalentValue(max)
}
// Min returns the approximate minimum recorded value.
func (h *Histogram) Min() int64 {
var min int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 && min == 0 {
min = i.highestEquivalentValue
break
}
}
return h.lowestEquivalentValue(min)
}
// Mean returns the approximate arithmetic mean of the recorded values.
func (h *Histogram) Mean() float64 {
if h.totalCount == 0 {
return 0
}
var total int64
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx)
}
}
return float64(total) / float64(h.totalCount)
}
// StdDev returns the approximate standard deviation of the recorded values.
func (h *Histogram) StdDev() float64 {
if h.totalCount == 0 {
return 0
}
mean := h.Mean()
geometricDevTotal := 0.0
i := h.iterator()
for i.next() {
if i.countAtIdx != 0 {
dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean
geometricDevTotal += (dev * dev) * float64(i.countAtIdx)
}
}
return math.Sqrt(geometricDevTotal / float64(h.totalCount))
}
// Reset deletes all recorded values and restores the histogram to its original
// state.
func (h *Histogram) Reset() {
h.totalCount = 0
for i := range h.counts {
h.counts[i] = 0
}
}
// RecordValue records the given value, returning an error if the value is out
// of range.
func (h *Histogram) RecordValue(v int64) error {
return h.RecordValues(v, 1)
}
// RecordCorrectedValue records the given value, correcting for stalls in the
// recording process. This only works for processes which are recording values
// at an expected interval (e.g., doing jitter analysis). Processes which are
// recording ad-hoc values (e.g., latency for incoming requests) can't take
// advantage of this.
func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
if err := h.RecordValue(v); err != nil {
return err
}
if expectedInterval <= 0 || v <= expectedInterval {
return nil
}
missingValue := v - expectedInterval
for missingValue >= expectedInterval {
if err := h.RecordValue(missingValue); err != nil {
return err
}
missingValue -= expectedInterval
}
return nil
}
// RecordValues records n occurrences of the given value, returning an error if
// the value is out of range.
func (h *Histogram) RecordValues(v, n int64) error {
idx := h.countsIndexFor(v)
if idx < 0 || int(h.countsLen) <= idx {
return fmt.Errorf("value %d is too large to be recorded", v)
}
h.counts[idx] += n
h.totalCount += n
return nil
}
// ValueAtQuantile returns the recorded value at the given quantile (0..100).
func (h *Histogram) ValueAtQuantile(q float64) int64 {
if q > 100 {
q = 100
}
total := int64(0)
countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5)
i := h.iterator()
for i.next() {
total += i.countAtIdx
if total >= countAtPercentile {
return h.highestEquivalentValue(i.valueFromIdx)
}
}
return 0
}
// CumulativeDistribution returns an ordered list of brackets of the
// distribution of recorded values.
func (h *Histogram) CumulativeDistribution() []Bracket {
var result []Bracket
i := h.pIterator(1)
for i.next() {
result = append(result, Bracket{
Quantile: i.percentile,
Count: i.countToIdx,
ValueAt: i.highestEquivalentValue,
})
}
return result
}
// SignificantFigures returns the significant figures used to create the
// histogram
func (h *Histogram) SignificantFigures() int64 {
return h.significantFigures
}
// LowestTrackableValue returns the lower bound on values that will be added
// to the histogram
func (h *Histogram) LowestTrackableValue() int64 {
return h.lowestTrackableValue
}
// HighestTrackableValue returns the upper bound on values that will be added
// to the histogram
func (h *Histogram) HighestTrackableValue() int64 {
return h.highestTrackableValue
}
// Histogram bar for plotting
type Bar struct {
From, To, Count int64
}
// Pretty print as csv for easy plotting
func (b Bar) String() string {
return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count)
}
// Distribution returns an ordered list of bars of the
// distribution of recorded values, counts can be normalized to a probability
func (h *Histogram) Distribution() (result []Bar) {
i := h.iterator()
for i.next() {
result = append(result, Bar{
Count: i.countAtIdx,
From: h.lowestEquivalentValue(i.valueFromIdx),
To: i.highestEquivalentValue,
})
}
return result
}
// Equals returns true if the two Histograms are equivalent, false if not.
func (h *Histogram) Equals(other *Histogram) bool {
switch {
case
h.lowestTrackableValue != other.lowestTrackableValue,
h.highestTrackableValue != other.highestTrackableValue,
h.unitMagnitude != other.unitMagnitude,
h.significantFigures != other.significantFigures,
h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude,
h.subBucketHalfCount != other.subBucketHalfCount,
h.subBucketMask != other.subBucketMask,
h.subBucketCount != other.subBucketCount,
h.bucketCount != other.bucketCount,
h.countsLen != other.countsLen,
h.totalCount != other.totalCount:
return false
default:
for i, c := range h.counts {
if c != other.counts[i] {
return false
}
}
}
return true
}
// Export returns a snapshot view of the Histogram. This can be later passed to
// Import to construct a new Histogram with the same state.
func (h *Histogram) Export() *Snapshot {
return &Snapshot{
LowestTrackableValue: h.lowestTrackableValue,
HighestTrackableValue: h.highestTrackableValue,
SignificantFigures: h.significantFigures,
Counts: append([]int64(nil), h.counts...), // copy
}
}
// Import returns a new Histogram populated from the Snapshot data (which the
// caller must stop accessing).
func Import(s *Snapshot) *Histogram {
h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures))
h.counts = s.Counts
totalCount := int64(0)
for i := int32(0); i < h.countsLen; i++ {
countAtIndex := h.counts[i]
if countAtIndex > 0 {
totalCount += countAtIndex
}
}
h.totalCount = totalCount
return h
}
func (h *Histogram) iterator() *iterator {
return &iterator{
h: h,
subBucketIdx: -1,
}
}
func (h *Histogram) rIterator() *rIterator {
return &rIterator{
iterator: iterator{
h: h,
subBucketIdx: -1,
},
}
}
func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator {
return &pIterator{
iterator: iterator{
h: h,
subBucketIdx: -1,
},
ticksPerHalfDistance: ticksPerHalfDistance,
}
}
func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
adjustedBucket := bucketIdx
if subBucketIdx >= h.subBucketCount {
adjustedBucket++
}
return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket))
}
func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 {
return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude)
}
func (h *Histogram) lowestEquivalentValue(v int64) int64 {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
return h.valueFromIndex(bucketIdx, subBucketIdx)
}
func (h *Histogram) nextNonEquivalentValue(v int64) int64 {
return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v)
}
func (h *Histogram) highestEquivalentValue(v int64) int64 {
return h.nextNonEquivalentValue(v) - 1
}
func (h *Histogram) medianEquivalentValue(v int64) int64 {
return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1)
}
func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 {
return h.counts[h.countsIndex(bucketIdx, subBucketIdx)]
}
func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 {
bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude)
offsetInBucket := subBucketIdx - h.subBucketHalfCount
return bucketBaseIdx + offsetInBucket
}
func (h *Histogram) getBucketIndex(v int64) int32 {
pow2Ceiling := bitLen(v | h.subBucketMask)
return int32(pow2Ceiling - int64(h.unitMagnitude) -
int64(h.subBucketHalfCountMagnitude+1))
}
func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 {
return int32(v >> uint(int64(idx)+int64(h.unitMagnitude)))
}
func (h *Histogram) countsIndexFor(v int64) int {
bucketIdx := h.getBucketIndex(v)
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
return int(h.countsIndex(bucketIdx, subBucketIdx))
}
type iterator struct {
h *Histogram
bucketIdx, subBucketIdx int32
countAtIdx, countToIdx, valueFromIdx int64
highestEquivalentValue int64
}
func (i *iterator) next() bool {
if i.countToIdx >= i.h.totalCount {
return false
}
// increment bucket
i.subBucketIdx++
if i.subBucketIdx >= i.h.subBucketCount {
i.subBucketIdx = i.h.subBucketHalfCount
i.bucketIdx++
}
if i.bucketIdx >= i.h.bucketCount {
return false
}
i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx)
i.countToIdx += i.countAtIdx
i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx)
i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx)
return true
}
type rIterator struct {
iterator
countAddedThisStep int64
}
func (r *rIterator) next() bool {
for r.iterator.next() {
if r.countAtIdx != 0 {
r.countAddedThisStep = r.countAtIdx
return true
}
}
return false
}
type pIterator struct {
iterator
seenLastValue bool
ticksPerHalfDistance int32
percentileToIteratorTo float64
percentile float64
}
func (p *pIterator) next() bool {
if !(p.countToIdx < p.h.totalCount) {
if p.seenLastValue {
return false
}
p.seenLastValue = true
p.percentile = 100
return true
}
if p.subBucketIdx == -1 && !p.iterator.next() {
return false
}
var done = false
for !done {
currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount)
if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile {
p.percentile = p.percentileToIteratorTo
halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1))
percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance
p.percentileToIteratorTo += 100.0 / percentileReportingTicks
return true
}
done = !p.iterator.next()
}
return true
}
func bitLen(x int64) (n int64) {
for ; x >= 0x8000; x >>= 16 {
n += 16
}
if x >= 0x80 {
x >>= 8
n += 8
}
if x >= 0x8 {
x >>= 4
n += 4
}
if x >= 0x2 {
x >>= 2
n += 2
}
if x >= 0x1 {
n++
}
return
}

45
vendor/github.com/codahale/hdrhistogram/window.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package hdrhistogram
// A WindowedHistogram combines histograms to provide windowed statistics.
type WindowedHistogram struct {
idx int
h []Histogram
m *Histogram
Current *Histogram
}
// NewWindowed creates a new WindowedHistogram with N underlying histograms with
// the given parameters.
func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram {
w := WindowedHistogram{
idx: -1,
h: make([]Histogram, n),
m: New(minValue, maxValue, sigfigs),
}
for i := range w.h {
w.h[i] = *New(minValue, maxValue, sigfigs)
}
w.Rotate()
return &w
}
// Merge returns a histogram which includes the recorded values from all the
// sections of the window.
func (w *WindowedHistogram) Merge() *Histogram {
w.m.Reset()
for _, h := range w.h {
w.m.Merge(&h)
}
return w.m
}
// Rotate resets the oldest histogram and rotates it to be used as the current
// histogram.
func (w *WindowedHistogram) Rotate() {
w.idx++
w.Current = &w.h[w.idx%len(w.h)]
w.Current.Reset()
}

21
vendor/github.com/opentracing/opentracing-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 The OpenTracing Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,198 @@
package ext
import opentracing "github.com/opentracing/opentracing-go"
// These constants define common tag names recommended for better portability across
// tracing systems and languages/platforms.
//
// The tag names are defined as typed strings, so that in addition to the usual use
//
// span.setTag(TagName, value)
//
// they also support value type validation via this additional syntax:
//
// TagName.Set(span, value)
//
var (
//////////////////////////////////////////////////////////////////////
// SpanKind (client/server or producer/consumer)
//////////////////////////////////////////////////////////////////////
// SpanKind hints at relationship between spans, e.g. client/server
SpanKind = spanKindTagName("span.kind")
// SpanKindRPCClient marks a span representing the client-side of an RPC
// or other remote call
SpanKindRPCClientEnum = SpanKindEnum("client")
SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
// SpanKindRPCServer marks a span representing the server-side of an RPC
// or other remote call
SpanKindRPCServerEnum = SpanKindEnum("server")
SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
// SpanKindProducer marks a span representing the producer-side of a
// message bus
SpanKindProducerEnum = SpanKindEnum("producer")
SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
// SpanKindConsumer marks a span representing the consumer-side of a
// message bus
SpanKindConsumerEnum = SpanKindEnum("consumer")
SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
//////////////////////////////////////////////////////////////////////
// Component name
//////////////////////////////////////////////////////////////////////
// Component is a low-cardinality identifier of the module, library,
// or package that is generating a span.
Component = stringTagName("component")
//////////////////////////////////////////////////////////////////////
// Sampling hint
//////////////////////////////////////////////////////////////////////
// SamplingPriority determines the priority of sampling this Span.
SamplingPriority = uint16TagName("sampling.priority")
//////////////////////////////////////////////////////////////////////
// Peer tags. These tags can be emitted by either client-side of
// server-side to describe the other side/service in a peer-to-peer
// communications, like an RPC call.
//////////////////////////////////////////////////////////////////////
// PeerService records the service name of the peer.
PeerService = stringTagName("peer.service")
// PeerAddress records the address name of the peer. This may be a "ip:port",
// a bare "hostname", a FQDN or even a database DSN substring
// like "mysql://username@127.0.0.1:3306/dbname"
PeerAddress = stringTagName("peer.address")
// PeerHostname records the host name of the peer
PeerHostname = stringTagName("peer.hostname")
// PeerHostIPv4 records IP v4 host address of the peer
PeerHostIPv4 = uint32TagName("peer.ipv4")
// PeerHostIPv6 records IP v6 host address of the peer
PeerHostIPv6 = stringTagName("peer.ipv6")
// PeerPort records port number of the peer
PeerPort = uint16TagName("peer.port")
//////////////////////////////////////////////////////////////////////
// HTTP Tags
//////////////////////////////////////////////////////////////////////
// HTTPUrl should be the URL of the request being handled in this segment
// of the trace, in standard URI format. The protocol is optional.
HTTPUrl = stringTagName("http.url")
// HTTPMethod is the HTTP method of the request, and is case-insensitive.
HTTPMethod = stringTagName("http.method")
// HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
// HTTP response.
HTTPStatusCode = uint16TagName("http.status_code")
//////////////////////////////////////////////////////////////////////
// DB Tags
//////////////////////////////////////////////////////////////////////
// DBInstance is database instance name.
DBInstance = stringTagName("db.instance")
// DBStatement is a database statement for the given database type.
// It can be a query or a prepared statement (i.e., before substitution).
DBStatement = stringTagName("db.statement")
// DBType is a database type. For any SQL database, "sql".
// For others, the lower-case database category, e.g. "redis"
DBType = stringTagName("db.type")
// DBUser is a username for accessing database.
DBUser = stringTagName("db.user")
//////////////////////////////////////////////////////////////////////
// Message Bus Tag
//////////////////////////////////////////////////////////////////////
// MessageBusDestination is an address at which messages can be exchanged
MessageBusDestination = stringTagName("message_bus.destination")
//////////////////////////////////////////////////////////////////////
// Error Tag
//////////////////////////////////////////////////////////////////////
// Error indicates that operation represented by the span resulted in an error.
Error = boolTagName("error")
)
// ---
// SpanKindEnum represents common span types
type SpanKindEnum string
type spanKindTagName string
// Set adds a string tag to the `span`
func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
span.SetTag(string(tag), value)
}
type rpcServerOption struct {
clientContext opentracing.SpanContext
}
func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
if r.clientContext != nil {
opentracing.ChildOf(r.clientContext).Apply(o)
}
SpanKindRPCServer.Apply(o)
}
// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
// with `client` representing the metadata for the remote peer Span if available.
// In case client == nil, due to the client not being instrumented, this RPC
// server span will be a root span.
func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
return rpcServerOption{client}
}
// ---
type stringTagName string
// Set adds a string tag to the `span`
func (tag stringTagName) Set(span opentracing.Span, value string) {
span.SetTag(string(tag), value)
}
// ---
type uint32TagName string
// Set adds a uint32 tag to the `span`
func (tag uint32TagName) Set(span opentracing.Span, value uint32) {
span.SetTag(string(tag), value)
}
// ---
type uint16TagName string
// Set adds a uint16 tag to the `span`
func (tag uint16TagName) Set(span opentracing.Span, value uint16) {
span.SetTag(string(tag), value)
}
// ---
type boolTagName string
// Add adds a bool tag to the `span`
func (tag boolTagName) Set(span opentracing.Span, value bool) {
span.SetTag(string(tag), value)
}

View File

@ -0,0 +1,32 @@
package opentracing
var (
globalTracer Tracer = NoopTracer{}
)
// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
// opentracing.Tracer instance) should call SetGlobalTracer as early as
// possible in main(), prior to calling the `StartSpan` global func below.
// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
// (etc) globals are noops.
func SetGlobalTracer(tracer Tracer) {
globalTracer = tracer
}
// GlobalTracer returns the global singleton `Tracer` implementation.
// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
// implementation that drops all data handed to it.
func GlobalTracer() Tracer {
return globalTracer
}
// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
func StartSpan(operationName string, opts ...StartSpanOption) Span {
return globalTracer.StartSpan(operationName, opts...)
}
// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
func InitGlobalTracer(tracer Tracer) {
SetGlobalTracer(tracer)
}

View File

@ -0,0 +1,57 @@
package opentracing
import "golang.org/x/net/context"
type contextKey struct{}
var activeSpanKey = contextKey{}
// ContextWithSpan returns a new `context.Context` that holds a reference to
// `span`'s SpanContext.
func ContextWithSpan(ctx context.Context, span Span) context.Context {
return context.WithValue(ctx, activeSpanKey, span)
}
// SpanFromContext returns the `Span` previously associated with `ctx`, or
// `nil` if no such `Span` could be found.
//
// NOTE: context.Context != SpanContext: the former is Go's intra-process
// context propagation mechanism, and the latter houses OpenTracing's per-Span
// identity and baggage information.
func SpanFromContext(ctx context.Context) Span {
val := ctx.Value(activeSpanKey)
if sp, ok := val.(Span); ok {
return sp
}
return nil
}
// StartSpanFromContext starts and returns a Span with `operationName`, using
// any Span found within `ctx` as a ChildOfRef. If no such parent could be
// found, StartSpanFromContext creates a root (parentless) Span.
//
// The second return value is a context.Context object built around the
// returned Span.
//
// Example usage:
//
// SomeFunction(ctx context.Context, ...) {
// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
// defer sp.Finish()
// ...
// }
func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
}
// startSpanFromContextWithTracer is factored out for testing purposes.
func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
var span Span
if parentSpan := SpanFromContext(ctx); parentSpan != nil {
opts = append(opts, ChildOf(parentSpan.Context()))
span = tracer.StartSpan(operationName, opts...)
} else {
span = tracer.StartSpan(operationName, opts...)
}
return span, ContextWithSpan(ctx, span)
}

View File

@ -0,0 +1,245 @@
package log
import (
"fmt"
"math"
)
type fieldType int
const (
stringType fieldType = iota
boolType
intType
int32Type
uint32Type
int64Type
uint64Type
float32Type
float64Type
errorType
objectType
lazyLoggerType
)
// Field instances are constructed via LogBool, LogString, and so on.
// Tracing implementations may then handle them via the Field.Marshal
// method.
//
// "heavily influenced by" (i.e., partially stolen from)
// https://github.com/uber-go/zap
type Field struct {
key string
fieldType fieldType
numericVal int64
stringVal string
interfaceVal interface{}
}
// String adds a string-valued key:value pair to a Span.LogFields() record
func String(key, val string) Field {
return Field{
key: key,
fieldType: stringType,
stringVal: val,
}
}
// Bool adds a bool-valued key:value pair to a Span.LogFields() record
func Bool(key string, val bool) Field {
var numericVal int64
if val {
numericVal = 1
}
return Field{
key: key,
fieldType: boolType,
numericVal: numericVal,
}
}
// Int adds an int-valued key:value pair to a Span.LogFields() record
func Int(key string, val int) Field {
return Field{
key: key,
fieldType: intType,
numericVal: int64(val),
}
}
// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
func Int32(key string, val int32) Field {
return Field{
key: key,
fieldType: int32Type,
numericVal: int64(val),
}
}
// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,
fieldType: int64Type,
numericVal: val,
}
}
// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
func Uint32(key string, val uint32) Field {
return Field{
key: key,
fieldType: uint32Type,
numericVal: int64(val),
}
}
// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
func Uint64(key string, val uint64) Field {
return Field{
key: key,
fieldType: uint64Type,
numericVal: int64(val),
}
}
// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
func Float32(key string, val float32) Field {
return Field{
key: key,
fieldType: float32Type,
numericVal: int64(math.Float32bits(val)),
}
}
// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
func Float64(key string, val float64) Field {
return Field{
key: key,
fieldType: float64Type,
numericVal: int64(math.Float64bits(val)),
}
}
// Error adds an error with the key "error" to a Span.LogFields() record
func Error(err error) Field {
return Field{
key: "error",
fieldType: errorType,
interfaceVal: err,
}
}
// Object adds an object-valued key:value pair to a Span.LogFields() record
func Object(key string, obj interface{}) Field {
return Field{
key: key,
fieldType: objectType,
interfaceVal: obj,
}
}
// LazyLogger allows for user-defined, late-bound logging of arbitrary data
type LazyLogger func(fv Encoder)
// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
// implementation will call the LazyLogger function at an indefinite time in
// the future (after Lazy() returns).
func Lazy(ll LazyLogger) Field {
return Field{
fieldType: lazyLoggerType,
interfaceVal: ll,
}
}
// Encoder allows access to the contents of a Field (via a call to
// Field.Marshal).
//
// Tracer implementations typically provide an implementation of Encoder;
// OpenTracing callers typically do not need to concern themselves with it.
type Encoder interface {
EmitString(key, value string)
EmitBool(key string, value bool)
EmitInt(key string, value int)
EmitInt32(key string, value int32)
EmitInt64(key string, value int64)
EmitUint32(key string, value uint32)
EmitUint64(key string, value uint64)
EmitFloat32(key string, value float32)
EmitFloat64(key string, value float64)
EmitObject(key string, value interface{})
EmitLazyLogger(value LazyLogger)
}
// Marshal passes a Field instance through to the appropriate
// field-type-specific method of an Encoder.
func (lf Field) Marshal(visitor Encoder) {
switch lf.fieldType {
case stringType:
visitor.EmitString(lf.key, lf.stringVal)
case boolType:
visitor.EmitBool(lf.key, lf.numericVal != 0)
case intType:
visitor.EmitInt(lf.key, int(lf.numericVal))
case int32Type:
visitor.EmitInt32(lf.key, int32(lf.numericVal))
case int64Type:
visitor.EmitInt64(lf.key, int64(lf.numericVal))
case uint32Type:
visitor.EmitUint32(lf.key, uint32(lf.numericVal))
case uint64Type:
visitor.EmitUint64(lf.key, uint64(lf.numericVal))
case float32Type:
visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
case float64Type:
visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
case errorType:
if err, ok := lf.interfaceVal.(error); ok {
visitor.EmitString(lf.key, err.Error())
} else {
visitor.EmitString(lf.key, "<nil>")
}
case objectType:
visitor.EmitObject(lf.key, lf.interfaceVal)
case lazyLoggerType:
visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
}
}
// Key returns the field's key.
func (lf Field) Key() string {
return lf.key
}
// Value returns the field's value as interface{}.
func (lf Field) Value() interface{} {
switch lf.fieldType {
case stringType:
return lf.stringVal
case boolType:
return lf.numericVal != 0
case intType:
return int(lf.numericVal)
case int32Type:
return int32(lf.numericVal)
case int64Type:
return int64(lf.numericVal)
case uint32Type:
return uint32(lf.numericVal)
case uint64Type:
return uint64(lf.numericVal)
case float32Type:
return math.Float32frombits(uint32(lf.numericVal))
case float64Type:
return math.Float64frombits(uint64(lf.numericVal))
case errorType, objectType, lazyLoggerType:
return lf.interfaceVal
default:
return nil
}
}
// String returns a string representation of the key and value.
func (lf Field) String() string {
return fmt.Sprint(lf.key, ":", lf.Value())
}

View File

@ -0,0 +1,54 @@
package log
import "fmt"
// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
// a la Span.LogFields().
func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
if len(keyValues)%2 != 0 {
return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
}
fields := make([]Field, len(keyValues)/2)
for i := 0; i*2 < len(keyValues); i++ {
key, ok := keyValues[i*2].(string)
if !ok {
return nil, fmt.Errorf(
"non-string key (pair #%d): %T",
i, keyValues[i*2])
}
switch typedVal := keyValues[i*2+1].(type) {
case bool:
fields[i] = Bool(key, typedVal)
case string:
fields[i] = String(key, typedVal)
case int:
fields[i] = Int(key, typedVal)
case int8:
fields[i] = Int32(key, int32(typedVal))
case int16:
fields[i] = Int32(key, int32(typedVal))
case int32:
fields[i] = Int32(key, typedVal)
case int64:
fields[i] = Int64(key, typedVal)
case uint:
fields[i] = Uint64(key, uint64(typedVal))
case uint64:
fields[i] = Uint64(key, typedVal)
case uint8:
fields[i] = Uint32(key, uint32(typedVal))
case uint16:
fields[i] = Uint32(key, uint32(typedVal))
case uint32:
fields[i] = Uint32(key, typedVal)
case float32:
fields[i] = Float32(key, typedVal)
case float64:
fields[i] = Float64(key, typedVal)
default:
// When in doubt, coerce to a string
fields[i] = String(key, fmt.Sprint(typedVal))
}
}
return fields, nil
}

64
vendor/github.com/opentracing/opentracing-go/noop.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package opentracing
import "github.com/opentracing/opentracing-go/log"
// A NoopTracer is a trivial, minimum overhead implementation of Tracer
// for which all operations are no-ops.
//
// The primary use of this implementation is in libraries, such as RPC
// frameworks, that make tracing an optional feature controlled by the
// end user. A no-op implementation allows said libraries to use it
// as the default Tracer and to write instrumentation that does
// not need to keep checking if the tracer instance is nil.
//
// For the same reason, the NoopTracer is the default "global" tracer
// (see GlobalTracer and SetGlobalTracer functions).
//
// WARNING: NoopTracer does not support baggage propagation.
type NoopTracer struct{}
type noopSpan struct{}
type noopSpanContext struct{}
var (
defaultNoopSpanContext = noopSpanContext{}
defaultNoopSpan = noopSpan{}
defaultNoopTracer = NoopTracer{}
)
const (
emptyString = ""
)
// noopSpanContext:
func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
// noopSpan:
func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
func (n noopSpan) BaggageItem(key string) string { return emptyString }
func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
func (n noopSpan) LogFields(fields ...log.Field) {}
func (n noopSpan) LogKV(keyVals ...interface{}) {}
func (n noopSpan) Finish() {}
func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
func (n noopSpan) SetOperationName(operationName string) Span { return n }
func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
func (n noopSpan) LogEvent(event string) {}
func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
func (n noopSpan) Log(data LogData) {}
// StartSpan belongs to the Tracer interface.
func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
return defaultNoopSpan
}
// Inject belongs to the Tracer interface.
func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
return nil
}
// Extract belongs to the Tracer interface.
func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
return nil, ErrSpanContextNotFound
}

View File

@ -0,0 +1,176 @@
package opentracing
import (
"errors"
"net/http"
)
///////////////////////////////////////////////////////////////////////////////
// CORE PROPAGATION INTERFACES:
///////////////////////////////////////////////////////////////////////////////
var (
// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
// Tracer.Extract() is not recognized by the Tracer implementation.
ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
// ErrSpanContextNotFound occurs when the `carrier` passed to
// Tracer.Extract() is valid and uncorrupted but has insufficient
// information to extract a SpanContext.
ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
// operate on a SpanContext which it is not prepared to handle (for
// example, since it was created by a different tracer implementation).
ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
// implementations expect a different type of `carrier` than they are
// given.
ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
// ErrSpanContextCorrupted occurs when the `carrier` passed to
// Tracer.Extract() is of the expected type but is corrupted.
ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
)
///////////////////////////////////////////////////////////////////////////////
// BUILTIN PROPAGATION FORMATS:
///////////////////////////////////////////////////////////////////////////////
// BuiltinFormat is used to demarcate the values within package `opentracing`
// that are intended for use with the Tracer.Inject() and Tracer.Extract()
// methods.
type BuiltinFormat byte
const (
// Binary represents SpanContexts as opaque binary data.
//
// For Tracer.Inject(): the carrier must be an `io.Writer`.
//
// For Tracer.Extract(): the carrier must be an `io.Reader`.
Binary BuiltinFormat = iota
// TextMap represents SpanContexts as key:value string pairs.
//
// Unlike HTTPHeaders, the TextMap format does not restrict the key or
// value character sets in any way.
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
TextMap
// HTTPHeaders represents SpanContexts as HTTP header string pairs.
//
// Unlike TextMap, the HTTPHeaders format requires that the keys and values
// be valid as HTTP headers as-is (i.e., character casing may be unstable
// and special characters are disallowed in keys, values should be
// URL-escaped, etc).
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
//
// See HTTPHeaderCarrier for an implementation of both TextMapWriter
// and TextMapReader that defers to an http.Header instance for storage.
// For example, Inject():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := span.Tracer().Inject(
// span, opentracing.HTTPHeaders, carrier)
//
// Or Extract():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// span, err := tracer.Extract(
// opentracing.HTTPHeaders, carrier)
//
HTTPHeaders
)
// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
// it, the caller can encode a SpanContext for propagation as entries in a map
// of unicode strings.
type TextMapWriter interface {
// Set a key:value pair to the carrier. Multiple calls to Set() for the
// same key leads to undefined behavior.
//
// NOTE: The backing store for the TextMapWriter may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
Set(key, val string)
}
// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
// the caller can decode a propagated SpanContext as entries in a map of
// unicode strings.
type TextMapReader interface {
// ForeachKey returns TextMap contents via repeated calls to the `handler`
// function. If any call to `handler` returns a non-nil error, ForeachKey
// terminates and returns that error.
//
// NOTE: The backing store for the TextMapReader may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
//
// The "foreach" callback pattern reduces unnecessary copying in some cases
// and also allows implementations to hold locks while the map is read.
ForeachKey(handler func(key, val string) error) error
}
// TextMapCarrier allows the use of regular map[string]string
// as both TextMapWriter and TextMapReader.
type TextMapCarrier map[string]string
// ForeachKey conforms to the TextMapReader interface.
func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
for k, v := range c {
if err := handler(k, v); err != nil {
return err
}
}
return nil
}
// Set implements Set() of opentracing.TextMapWriter
func (c TextMapCarrier) Set(key, val string) {
c[key] = val
}
// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
//
// Example usage for server side:
//
// carrier := opentracing.HttpHeadersCarrier(httpReq.Header)
// spanContext, err := tracer.Extract(opentracing.HttpHeaders, carrier)
//
// Example usage for client side:
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HttpHeaders,
// carrier)
//
type HTTPHeadersCarrier http.Header
// Set conforms to the TextMapWriter interface.
func (c HTTPHeadersCarrier) Set(key, val string) {
h := http.Header(c)
h.Add(key, val)
}
// ForeachKey conforms to the TextMapReader interface.
func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
for k, vals := range c {
for _, v := range vals {
if err := handler(k, v); err != nil {
return err
}
}
}
return nil
}

185
vendor/github.com/opentracing/opentracing-go/span.go generated vendored Normal file
View File

@ -0,0 +1,185 @@
package opentracing
import (
"time"
"github.com/opentracing/opentracing-go/log"
)
// SpanContext represents Span state that must propagate to descendant Spans and across process
// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
type SpanContext interface {
// ForeachBaggageItem grants access to all baggage items stored in the
// SpanContext.
// The handler function will be called for each baggage key/value pair.
// The ordering of items is not guaranteed.
//
// The bool return value indicates if the handler wants to continue iterating
// through the rest of the baggage items; for example if the handler is trying to
// find some baggage item by pattern matching the name, it can return false
// as soon as the item is found to stop further iterations.
ForeachBaggageItem(handler func(k, v string) bool)
}
// Span represents an active, un-finished span in the OpenTracing system.
//
// Spans are created by the Tracer interface.
type Span interface {
// Sets the end timestamp and finalizes Span state.
//
// With the exception of calls to Context() (which are always allowed),
// Finish() must be the last call made to any span instance, and to do
// otherwise leads to undefined behavior.
Finish()
// FinishWithOptions is like Finish() but with explicit control over
// timestamps and log data.
FinishWithOptions(opts FinishOptions)
// Context() yields the SpanContext for this Span. Note that the return
// value of Context() is still valid after a call to Span.Finish(), as is
// a call to Span.Context() after a call to Span.Finish().
Context() SpanContext
// Sets or changes the operation name.
SetOperationName(operationName string) Span
// Adds a tag to the span.
//
// If there is a pre-existing tag set for `key`, it is overwritten.
//
// Tag values can be numeric types, strings, or bools. The behavior of
// other tag value types is undefined at the OpenTracing level. If a
// tracing system does not know how to handle a particular value type, it
// may ignore the tag, but shall not panic.
SetTag(key string, value interface{}) Span
// LogFields is an efficient and type-checked way to record key:value
// logging data about a Span, though the programming interface is a little
// more verbose than LogKV(). Here's an example:
//
// span.LogFields(
// log.String("event", "soft error"),
// log.String("type", "cache timeout"),
// log.Int("waited.millis", 1500))
//
// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
LogFields(fields ...log.Field)
// LogKV is a concise, readable way to record key:value logging data about
// a Span, though unfortunately this also makes it less efficient and less
// type-safe than LogFields(). Here's an example:
//
// span.LogKV(
// "event", "soft error",
// "type", "cache timeout",
// "waited.millis", 1500)
//
// For LogKV (as opposed to LogFields()), the parameters must appear as
// key-value pairs, like
//
// span.LogKV(key1, val1, key2, val2, key3, val3, ...)
//
// The keys must all be strings. The values may be strings, numeric types,
// bools, Go error instances, or arbitrary structs.
//
// (Note to implementors: consider the log.InterleavedKVToFields() helper)
LogKV(alternatingKeyValues ...interface{})
// SetBaggageItem sets a key:value pair on this Span and its SpanContext
// that also propagates to descendants of this Span.
//
// SetBaggageItem() enables powerful functionality given a full-stack
// opentracing integration (e.g., arbitrary application data from a mobile
// app can make it, transparently, all the way into the depths of a storage
// system), and with it some powerful costs: use this feature with care.
//
// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
// *future* causal descendants of the associated Span.
//
// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
// value is copied into every local *and remote* child of the associated
// Span, and that can add up to a lot of network and cpu overhead.
//
// Returns a reference to this Span for chaining.
SetBaggageItem(restrictedKey, value string) Span
// Gets the value for a baggage item given its key. Returns the empty string
// if the value isn't found in this Span.
BaggageItem(restrictedKey string) string
// Provides access to the Tracer that created this Span.
Tracer() Tracer
// Deprecated: use LogFields or LogKV
LogEvent(event string)
// Deprecated: use LogFields or LogKV
LogEventWithPayload(event string, payload interface{})
// Deprecated: use LogFields or LogKV
Log(data LogData)
}
// LogRecord is data associated with a single Span log. Every LogRecord
// instance must specify at least one Field.
type LogRecord struct {
Timestamp time.Time
Fields []log.Field
}
// FinishOptions allows Span.FinishWithOptions callers to override the finish
// timestamp and provide log data via a bulk interface.
type FinishOptions struct {
// FinishTime overrides the Span's finish time, or implicitly becomes
// time.Now() if FinishTime.IsZero().
//
// FinishTime must resolve to a timestamp that's >= the Span's StartTime
// (per StartSpanOptions).
FinishTime time.Time
// LogRecords allows the caller to specify the contents of many LogFields()
// calls with a single slice. May be nil.
//
// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
// be set explicitly). Also, they must be >= the Span's start timestamp and
// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
// behavior of FinishWithOptions() is undefined.
//
// If specified, the caller hands off ownership of LogRecords at
// FinishWithOptions() invocation time.
//
// If specified, the (deprecated) BulkLogData must be nil or empty.
LogRecords []LogRecord
// BulkLogData is DEPRECATED.
BulkLogData []LogData
}
// LogData is DEPRECATED
type LogData struct {
Timestamp time.Time
Event string
Payload interface{}
}
// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
func (ld *LogData) ToLogRecord() LogRecord {
var literalTimestamp time.Time
if ld.Timestamp.IsZero() {
literalTimestamp = time.Now()
} else {
literalTimestamp = ld.Timestamp
}
rval := LogRecord{
Timestamp: literalTimestamp,
}
if ld.Payload == nil {
rval.Fields = []log.Field{
log.String("event", ld.Event),
}
} else {
rval.Fields = []log.Field{
log.String("event", ld.Event),
log.Object("payload", ld.Payload),
}
}
return rval
}

305
vendor/github.com/opentracing/opentracing-go/tracer.go generated vendored Normal file
View File

@ -0,0 +1,305 @@
package opentracing
import "time"
// Tracer is a simple, thin interface for Span creation and SpanContext
// propagation.
type Tracer interface {
// Create, start, and return a new Span with the given `operationName` and
// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
// from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
//
// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
// opentracing.FollowsFrom()) becomes the root of its own trace.
//
// Examples:
//
// var tracer opentracing.Tracer = ...
//
// // The root-span case:
// sp := tracer.StartSpan("GetFeed")
//
// // The vanilla child span case:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()))
//
// // All the bells and whistles:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()),
// opentracing.Tag("user_agent", loggedReq.UserAgent),
// opentracing.StartTime(loggedReq.Timestamp),
// )
//
StartSpan(operationName string, opts ...StartSpanOption) Span
// Inject() takes the `sm` SpanContext instance and injects it for
// propagation within `carrier`. The actual type of `carrier` depends on
// the value of `format`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see
// https://godoc.org/golang.org/x/net/context#WithValue).
//
// Example usage (sans error handling):
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HTTPHeaders,
// carrier)
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Implementations may return opentracing.ErrUnsupportedFormat if `format`
// is not supported by (or not known by) the implementation.
//
// Implementations may return opentracing.ErrInvalidCarrier or any other
// implementation-specific error if the format is supported but injection
// fails anyway.
//
// See Tracer.Extract().
Inject(sm SpanContext, format interface{}, carrier interface{}) error
// Extract() returns a SpanContext instance given `format` and `carrier`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see
// https://godoc.org/golang.org/x/net/context#WithValue).
//
// Example usage (with StartSpan):
//
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
//
// // ... assuming the ultimate goal here is to resume the trace with a
// // server-side Span:
// var serverSpan opentracing.Span
// if err == nil {
// span = tracer.StartSpan(
// rpcMethodName, ext.RPCServerOption(clientContext))
// } else {
// span = tracer.StartSpan(rpcMethodName)
// }
//
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Return values:
// - A successful Extract returns a SpanContext instance and a nil error
// - If there was simply no SpanContext to extract in `carrier`, Extract()
// returns (nil, opentracing.ErrSpanContextNotFound)
// - If `format` is unsupported or unrecognized, Extract() returns (nil,
// opentracing.ErrUnsupportedFormat)
// - If there are more fundamental problems with the `carrier` object,
// Extract() may return opentracing.ErrInvalidCarrier,
// opentracing.ErrSpanContextCorrupted, or implementation-specific
// errors.
//
// See Tracer.Inject().
Extract(format interface{}, carrier interface{}) (SpanContext, error)
}
// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
// mechanism to override the start timestamp, specify Span References, and make
// a single Tag or multiple Tags available at Span start time.
//
// StartSpan() callers should look at the StartSpanOption interface and
// implementations available in this package.
//
// Tracer implementations can convert a slice of `StartSpanOption` instances
// into a `StartSpanOptions` struct like so:
//
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
// sso := opentracing.StartSpanOptions{}
// for _, o := range opts {
// o.Apply(&sso)
// }
// ...
// }
//
type StartSpanOptions struct {
// Zero or more causal references to other Spans (via their SpanContext).
// If empty, start a "root" Span (i.e., start a new trace).
References []SpanReference
// StartTime overrides the Span's start time, or implicitly becomes
// time.Now() if StartTime.IsZero().
StartTime time.Time
// Tags may have zero or more entries; the restrictions on map values are
// identical to those for Span.SetTag(). May be nil.
//
// If specified, the caller hands off ownership of Tags at
// StartSpan() invocation time.
Tags map[string]interface{}
}
// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
//
// StartSpanOption borrows from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type StartSpanOption interface {
Apply(*StartSpanOptions)
}
// SpanReferenceType is an enum type describing different categories of
// relationships between two Spans. If Span-2 refers to Span-1, the
// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
// ChildOfRef means that Span-1 created Span-2.
//
// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
// or Span-2 may be sitting in a distributed queue behind Span-1.
type SpanReferenceType int
const (
// ChildOfRef refers to a parent Span that caused *and* somehow depends
// upon the new child Span. Often (but not always), the parent Span cannot
// finish until the child Span does.
//
// An timing diagram for a ChildOfRef that's blocked on the new Span:
//
// [-Parent Span---------]
// [-Child Span----]
//
// See http://opentracing.io/spec/
//
// See opentracing.ChildOf()
ChildOfRef SpanReferenceType = iota
// FollowsFromRef refers to a parent Span that does not depend in any way
// on the result of the new child Span. For instance, one might use
// FollowsFromRefs to describe pipeline stages separated by queues,
// or a fire-and-forget cache insert at the tail end of a web request.
//
// A FollowsFromRef Span is part of the same logical trace as the new Span:
// i.e., the new Span is somehow caused by the work of its FollowsFromRef.
//
// All of the following could be valid timing diagrams for children that
// "FollowFrom" a parent.
//
// [-Parent Span-] [-Child Span-]
//
//
// [-Parent Span--]
// [-Child Span-]
//
//
// [-Parent Span-]
// [-Child Span-]
//
// See http://opentracing.io/spec/
//
// See opentracing.FollowsFrom()
FollowsFromRef
)
// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
// referenced SpanContext. See the SpanReferenceType documentation for
// supported relationships. If SpanReference is created with
// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
// syntax for starting spans:
//
// sc, _ := tracer.Extract(someFormat, someCarrier)
// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
//
// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
// not add the parent span reference to the options.
type SpanReference struct {
Type SpanReferenceType
ReferencedContext SpanContext
}
// Apply satisfies the StartSpanOption interface.
func (r SpanReference) Apply(o *StartSpanOptions) {
if r.ReferencedContext != nil {
o.References = append(o.References, r)
}
}
// ChildOf returns a StartSpanOption pointing to a dependent parent span.
// If sc == nil, the option has no effect.
//
// See ChildOfRef, SpanReference
func ChildOf(sc SpanContext) SpanReference {
return SpanReference{
Type: ChildOfRef,
ReferencedContext: sc,
}
}
// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
// the child Span but does not directly depend on its result in any way.
// If sc == nil, the option has no effect.
//
// See FollowsFromRef, SpanReference
func FollowsFrom(sc SpanContext) SpanReference {
return SpanReference{
Type: FollowsFromRef,
ReferencedContext: sc,
}
}
// StartTime is a StartSpanOption that sets an explicit start timestamp for the
// new Span.
type StartTime time.Time
// Apply satisfies the StartSpanOption interface.
func (t StartTime) Apply(o *StartSpanOptions) {
o.StartTime = time.Time(t)
}
// Tags are a generic map from an arbitrary string key to an opaque value type.
// The underlying tracing system is responsible for interpreting and
// serializing the values.
type Tags map[string]interface{}
// Apply satisfies the StartSpanOption interface.
func (t Tags) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
for k, v := range t {
o.Tags[k] = v
}
}
// Tag may be passed as a StartSpanOption to add a tag to new spans,
// or its Set method may be used to apply the tag to an existing Span,
// for example:
//
// tracer.StartSpan("opName", Tag{"Key", value})
//
// or
//
// Tag{"key", value}.Set(span)
type Tag struct {
Key string
Value interface{}
}
// Apply satisfies the StartSpanOption interface.
func (t Tag) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
o.Tags[t.Key] = t.Value
}
// Set applies the tag to an existing Span.
func (t Tag) Set(s Span) {
s.SetTag(t.Key, t.Value)
}

23
vendor/github.com/pkg/errors/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

269
vendor/github.com/pkg/errors/errors.go generated vendored Normal file
View File

@ -0,0 +1,269 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
func WithStack(err error) error {
if err == nil {
return nil
}
return &withStack{
err,
callers(),
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
// WithMessage annotates err with a new message.
// If err is nil, WithMessage returns nil.
func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

178
vendor/github.com/pkg/errors/stack.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

201
vendor/github.com/uber/jaeger-client-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,77 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"github.com/opentracing/opentracing-go/log"
"github.com/uber/jaeger-client-go/internal/baggage"
)
// baggageSetter is an actor that can set a baggage value on a Span given certain
// restrictions (eg. maxValueLength).
type baggageSetter struct {
restrictionManager baggage.RestrictionManager
metrics *Metrics
}
func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
return &baggageSetter{
restrictionManager: restrictionManager,
metrics: metrics,
}
}
// (NB) span should hold the lock before making this call
func (s *baggageSetter) setBaggage(span *Span, key, value string) {
var truncated bool
var prevItem string
restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
if !restriction.KeyAllowed() {
s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
s.metrics.BaggageUpdateFailure.Inc(1)
return
}
if len(value) > restriction.MaxValueLength() {
truncated = true
value = value[:restriction.MaxValueLength()]
s.metrics.BaggageTruncate.Inc(1)
}
prevItem = span.context.baggage[key]
s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
span.context = span.context.WithBaggageItem(key, value)
s.metrics.BaggageUpdateSuccess.Inc(1)
}
func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
if !span.context.IsSampled() {
return
}
fields := []log.Field{
log.String("event", "baggage"),
log.String("key", key),
log.String("value", value),
}
if prevItem != "" {
fields = append(fields, log.String("override", "true"))
}
if truncated {
fields = append(fields, log.String("truncated", "true"))
}
if !valid {
fields = append(fields, log.String("invalid", "true"))
}
span.logFieldsNoLocking(fields...)
}

View File

@ -0,0 +1,372 @@
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/internal/baggage/remote"
throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
"github.com/uber/jaeger-client-go/rpcmetrics"
)
const defaultSamplingProbability = 0.001
// Configuration configures and creates Jaeger Tracer
type Configuration struct {
// ServiceName specifies the service name to use on the tracer.
// Can be provided via environment variable named JAEGER_SERVICE_NAME
ServiceName string `yaml:"serviceName"`
// Disabled can be provided via environment variable named JAEGER_DISABLED
Disabled bool `yaml:"disabled"`
// RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS
RPCMetrics bool `yaml:"rpc_metrics"`
// Tags can be provided via environment variable named JAEGER_TAGS
Tags []opentracing.Tag `yaml:"tags"`
Sampler *SamplerConfig `yaml:"sampler"`
Reporter *ReporterConfig `yaml:"reporter"`
Headers *jaeger.HeadersConfig `yaml:"headers"`
BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
Throttler *ThrottlerConfig `yaml:"throttler"`
}
// SamplerConfig allows initializing a non-default sampler. All fields are optional.
type SamplerConfig struct {
// Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
// Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE
Type string `yaml:"type"`
// Param is a value passed to the sampler.
// Valid values for Param field are:
// - for "const" sampler, 0 or 1 for always false/true respectively
// - for "probabilistic" sampler, a probability between 0 and 1
// - for "rateLimiting" sampler, the number of spans per second
// - for "remote" sampler, param is the same as for "probabilistic"
// and indicates the initial sampling rate before the actual one
// is received from the mothership.
// Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM
Param float64 `yaml:"param"`
// SamplingServerURL is the address of jaeger-agent's HTTP sampling server
// Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT
SamplingServerURL string `yaml:"samplingServerURL"`
// MaxOperations is the maximum number of operations that the sampler
// will keep track of. If an operation is not tracked, a default probabilistic
// sampler will be used rather than the per operation specific sampler.
// Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS
MaxOperations int `yaml:"maxOperations"`
// SamplingRefreshInterval controls how often the remotely controlled sampler will poll
// jaeger-agent for the appropriate sampling strategy.
// Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
}
// ReporterConfig configures the reporter. All fields are optional.
type ReporterConfig struct {
// QueueSize controls how many spans the reporter can keep in memory before it starts dropping
// new spans. The queue is continuously drained by a background go-routine, as fast as spans
// can be sent out of process.
// Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
QueueSize int `yaml:"queueSize"`
// BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
// It is generally not useful, as it only matters for very low traffic services.
// Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
BufferFlushInterval time.Duration
// LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
// and logs all submitted spans. Main Configuration.Logger must be initialized in the code
// for this option to have any effect.
// Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS
LogSpans bool `yaml:"logSpans"`
// LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address
// Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
LocalAgentHostPort string `yaml:"localAgentHostPort"`
}
// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
// certain baggage keys. All fields are optional.
type BaggageRestrictionsConfig struct {
// DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
// manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
// been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
// restrictions have been retrieved from jaeger-agent.
DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
// HostPort is the hostPort of jaeger-agent's baggage restrictions server
HostPort string `yaml:"hostPort"`
// RefreshInterval controls how often the baggage restriction manager will poll
// jaeger-agent for the most recent baggage restrictions.
RefreshInterval time.Duration `yaml:"refreshInterval"`
}
// ThrottlerConfig configures the throttler which can be used to throttle the
// rate at which the client may send debug requests.
type ThrottlerConfig struct {
// HostPort of jaeger-agent's credit server.
HostPort string `yaml:"hostPort"`
// RefreshInterval controls how often the throttler will poll jaeger-agent
// for more throttling credits.
RefreshInterval time.Duration `yaml:"refreshInterval"`
// SynchronousInitialization determines whether or not the throttler should
// synchronously fetch credits from the agent when an operation is seen for
// the first time. This should be set to true if the client will be used by
// a short lived service that needs to ensure that credits are fetched
// upfront such that sampling or throttling occurs.
SynchronousInitialization bool `yaml:"synchronousInitialization"`
}
type nullCloser struct{}
func (*nullCloser) Close() error { return nil }
// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
// before shutdown.
//
// Deprecated: use NewTracer() function
func (c Configuration) New(
serviceName string,
options ...Option,
) (opentracing.Tracer, io.Closer, error) {
if serviceName != "" {
c.ServiceName = serviceName
}
return c.NewTracer(options...)
}
// NewTracer returns a new tracer based on the current configuration, using the given options,
// and a closer func that can be used to flush buffers before shutdown.
func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
if c.ServiceName == "" {
return nil, nil, errors.New("no service name provided")
}
if c.Disabled {
return &opentracing.NoopTracer{}, &nullCloser{}, nil
}
opts := applyOptions(options...)
tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
if c.RPCMetrics {
Observer(
rpcmetrics.NewObserver(
opts.metrics.Namespace("jaeger-rpc", map[string]string{"component": "jaeger"}),
rpcmetrics.DefaultNameNormalizer,
),
)(&opts) // adds to c.observers
}
if c.Sampler == nil {
c.Sampler = &SamplerConfig{
Type: jaeger.SamplerTypeRemote,
Param: defaultSamplingProbability,
}
}
if c.Reporter == nil {
c.Reporter = &ReporterConfig{}
}
sampler := opts.sampler
if sampler == nil {
s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
if err != nil {
return nil, nil, err
}
sampler = s
}
reporter := opts.reporter
if reporter == nil {
r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
if err != nil {
return nil, nil, err
}
reporter = r
}
tracerOptions := []jaeger.TracerOption{
jaeger.TracerOptions.Metrics(tracerMetrics),
jaeger.TracerOptions.Logger(opts.logger),
jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
jaeger.TracerOptions.Gen128Bit(opts.gen128Bit),
jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
}
for _, tag := range opts.tags {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
}
for _, tag := range c.Tags {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
}
for _, obs := range opts.observers {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
}
for _, cobs := range opts.contribObservers {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
}
for format, injector := range opts.injectors {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
}
for format, extractor := range opts.extractors {
tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
}
if c.BaggageRestrictions != nil {
mgr := remote.NewRestrictionManager(
c.ServiceName,
remote.Options.Metrics(tracerMetrics),
remote.Options.Logger(opts.logger),
remote.Options.HostPort(c.BaggageRestrictions.HostPort),
remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
remote.Options.DenyBaggageOnInitializationFailure(
c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
),
)
tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
}
if c.Throttler != nil {
debugThrottler := throttler.NewThrottler(
c.ServiceName,
throttler.Options.Metrics(tracerMetrics),
throttler.Options.Logger(opts.logger),
throttler.Options.HostPort(c.Throttler.HostPort),
throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
throttler.Options.SynchronousInitialization(
c.Throttler.SynchronousInitialization,
),
)
tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
}
tracer, closer := jaeger.NewTracer(
c.ServiceName,
sampler,
reporter,
tracerOptions...,
)
return tracer, closer, nil
}
// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
// It returns a closer func that can be used to flush buffers before shutdown.
func (c Configuration) InitGlobalTracer(
serviceName string,
options ...Option,
) (io.Closer, error) {
if c.Disabled {
return &nullCloser{}, nil
}
tracer, closer, err := c.New(serviceName, options...)
if err != nil {
return nil, err
}
opentracing.SetGlobalTracer(tracer)
return closer, nil
}
// NewSampler creates a new sampler based on the configuration
func (sc *SamplerConfig) NewSampler(
serviceName string,
metrics *jaeger.Metrics,
) (jaeger.Sampler, error) {
samplerType := strings.ToLower(sc.Type)
if samplerType == jaeger.SamplerTypeConst {
return jaeger.NewConstSampler(sc.Param != 0), nil
}
if samplerType == jaeger.SamplerTypeProbabilistic {
if sc.Param >= 0 && sc.Param <= 1.0 {
return jaeger.NewProbabilisticSampler(sc.Param)
}
return nil, fmt.Errorf(
"Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1",
sc.Param,
)
}
if samplerType == jaeger.SamplerTypeRateLimiting {
return jaeger.NewRateLimitingSampler(sc.Param), nil
}
if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
sc2 := *sc
sc2.Type = jaeger.SamplerTypeProbabilistic
initSampler, err := sc2.NewSampler(serviceName, nil)
if err != nil {
return nil, err
}
options := []jaeger.SamplerOption{
jaeger.SamplerOptions.Metrics(metrics),
jaeger.SamplerOptions.InitialSampler(initSampler),
jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
}
if sc.MaxOperations != 0 {
options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations))
}
if sc.SamplingRefreshInterval != 0 {
options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval))
}
return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
}
return nil, fmt.Errorf("Unknown sampler type %v", sc.Type)
}
// NewReporter instantiates a new reporter that submits spans to tcollector
func (rc *ReporterConfig) NewReporter(
serviceName string,
metrics *jaeger.Metrics,
logger jaeger.Logger,
) (jaeger.Reporter, error) {
sender, err := rc.newTransport()
if err != nil {
return nil, err
}
reporter := jaeger.NewRemoteReporter(
sender,
jaeger.ReporterOptions.QueueSize(rc.QueueSize),
jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
jaeger.ReporterOptions.Logger(logger),
jaeger.ReporterOptions.Metrics(metrics))
if rc.LogSpans && logger != nil {
logger.Infof("Initializing logging reporter\n")
reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
}
return reporter, err
}
func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0)
}

View File

@ -0,0 +1,205 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"os"
"strconv"
"strings"
"time"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/uber/jaeger-client-go"
)
const (
// environment variable names
envServiceName = "JAEGER_SERVICE_NAME"
envDisabled = "JAEGER_DISABLED"
envRPCMetrics = "JAEGER_RPC_METRICS"
envTags = "JAEGER_TAGS"
envSamplerType = "JAEGER_SAMPLER_TYPE"
envSamplerParam = "JAEGER_SAMPLER_PARAM"
envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT"
envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
envAgentHost = "JAEGER_AGENT_HOST"
envAgentPort = "JAEGER_AGENT_PORT"
)
// FromEnv uses environment variables to set the tracer's Configuration
func FromEnv() (*Configuration, error) {
c := &Configuration{}
if e := os.Getenv(envServiceName); e != "" {
c.ServiceName = e
}
if e := os.Getenv(envRPCMetrics); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
c.RPCMetrics = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
}
}
if e := os.Getenv(envDisabled); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
c.Disabled = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
}
}
if e := os.Getenv(envTags); e != "" {
c.Tags = parseTags(e)
}
if s, err := samplerConfigFromEnv(); err == nil {
c.Sampler = s
} else {
return nil, errors.Wrap(err, "cannot obtain sampler config from env")
}
if r, err := reporterConfigFromEnv(); err == nil {
c.Reporter = r
} else {
return nil, errors.Wrap(err, "cannot obtain reporter config from env")
}
return c, nil
}
// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
func samplerConfigFromEnv() (*SamplerConfig, error) {
sc := &SamplerConfig{}
if e := os.Getenv(envSamplerType); e != "" {
sc.Type = e
}
if e := os.Getenv(envSamplerParam); e != "" {
if value, err := strconv.ParseFloat(e, 64); err == nil {
sc.Param = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
}
}
if e := os.Getenv(envSamplerManagerHostPort); e != "" {
sc.SamplingServerURL = e
}
if e := os.Getenv(envSamplerMaxOperations); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
sc.MaxOperations = int(value)
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
}
}
if e := os.Getenv(envSamplerRefreshInterval); e != "" {
if value, err := time.ParseDuration(e); err == nil {
sc.SamplingRefreshInterval = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
}
}
return sc, nil
}
// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
func reporterConfigFromEnv() (*ReporterConfig, error) {
rc := &ReporterConfig{}
if e := os.Getenv(envReporterMaxQueueSize); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
rc.QueueSize = int(value)
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
}
}
if e := os.Getenv(envReporterFlushInterval); e != "" {
if value, err := time.ParseDuration(e); err == nil {
rc.BufferFlushInterval = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
}
}
if e := os.Getenv(envReporterLogSpans); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
rc.LogSpans = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
}
}
host := jaeger.DefaultUDPSpanServerHost
if e := os.Getenv(envAgentHost); e != "" {
host = e
}
port := jaeger.DefaultUDPSpanServerPort
if e := os.Getenv(envAgentPort); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
port = int(value)
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
}
}
// the side effect of this is that we are building the default value, even if none of the env vars
// were not explicitly passed
rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
return rc, nil
}
// parseTags parses the given string into a collection of Tags.
// Spec for this value:
// - comma separated list of key=value
// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
// is an environment variable and `defaultValue` is the value to use in case the env var is not set
func parseTags(sTags string) []opentracing.Tag {
pairs := strings.Split(sTags, ",")
tags := make([]opentracing.Tag, 0)
for _, p := range pairs {
kv := strings.SplitN(p, "=", 2)
k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
ed := strings.SplitN(v[2:len(v)-1], ":", 2)
e, d := ed[0], ed[1]
v = os.Getenv(e)
if v == "" && d != "" {
v = d
}
}
tag := opentracing.Tag{Key: k, Value: v}
tags = append(tags, tag)
}
return tags
}

View File

@ -0,0 +1,140 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
opentracing "github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-lib/metrics"
"github.com/uber/jaeger-client-go"
)
// Option is a function that sets some option on the client.
type Option func(c *Options)
// Options control behavior of the client.
type Options struct {
metrics metrics.Factory
logger jaeger.Logger
reporter jaeger.Reporter
sampler jaeger.Sampler
contribObservers []jaeger.ContribObserver
observers []jaeger.Observer
gen128Bit bool
zipkinSharedRPCSpan bool
tags []opentracing.Tag
injectors map[interface{}]jaeger.Injector
extractors map[interface{}]jaeger.Extractor
}
// Metrics creates an Option that initializes Metrics in the tracer,
// which is used to emit statistics about spans.
func Metrics(factory metrics.Factory) Option {
return func(c *Options) {
c.metrics = factory
}
}
// Logger can be provided to log Reporter errors, as well as to log spans
// if Reporter.LogSpans is set to true.
func Logger(logger jaeger.Logger) Option {
return func(c *Options) {
c.logger = logger
}
}
// Reporter can be provided explicitly to override the configuration.
// Useful for testing, e.g. by passing InMemoryReporter.
func Reporter(reporter jaeger.Reporter) Option {
return func(c *Options) {
c.reporter = reporter
}
}
// Sampler can be provided explicitly to override the configuration.
func Sampler(sampler jaeger.Sampler) Option {
return func(c *Options) {
c.sampler = sampler
}
}
// Observer can be registered with the Tracer to receive notifications about new Spans.
func Observer(observer jaeger.Observer) Option {
return func(c *Options) {
c.observers = append(c.observers, observer)
}
}
// ContribObserver can be registered with the Tracer to recieve notifications
// about new spans.
func ContribObserver(observer jaeger.ContribObserver) Option {
return func(c *Options) {
c.contribObservers = append(c.contribObservers, observer)
}
}
// Gen128Bit specifies whether to generate 128bit trace IDs.
func Gen128Bit(gen128Bit bool) Option {
return func(c *Options) {
c.gen128Bit = gen128Bit
}
}
// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
// and server spans a la zipkin. If false, client and server spans will be assigned
// different IDs.
func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
return func(c *Options) {
c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
}
}
// Tag creates an option that adds a tracer-level tag.
func Tag(key string, value interface{}) Option {
return func(c *Options) {
c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
}
}
// Injector registers an Injector with the given format.
func Injector(format interface{}, injector jaeger.Injector) Option {
return func(c *Options) {
c.injectors[format] = injector
}
}
// Extractor registers an Extractor with the given format.
func Extractor(format interface{}, extractor jaeger.Extractor) Option {
return func(c *Options) {
c.extractors[format] = extractor
}
}
func applyOptions(options ...Option) Options {
opts := Options{
injectors: make(map[interface{}]jaeger.Injector),
extractors: make(map[interface{}]jaeger.Extractor),
}
for _, option := range options {
option(&opts)
}
if opts.metrics == nil {
opts.metrics = metrics.NullFactory
}
if opts.logger == nil {
opts.logger = jaeger.NullLogger
}
return opts
}

85
vendor/github.com/uber/jaeger-client-go/constants.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
JaegerClientVersion = "Go-2.14.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
// if found in the carrier, forces the trace to be sampled as "debug" trace.
// The value of the header is recorded as the tag on the root span, so that the
// trace can be found in the UI using this value as a correlation ID.
JaegerDebugHeader = "jaeger-debug-id"
// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
// a root span does not exist.
JaegerBaggageHeader = "jaeger-baggage"
// TracerHostnameTagKey used to report host name of the process.
TracerHostnameTagKey = "hostname"
// TracerIPTagKey used to report ip of the process.
TracerIPTagKey = "ip"
// TracerUUIDTagKey used to report UUID of the client process.
TracerUUIDTagKey = "client-uuid"
// SamplerTypeTagKey reports which sampler was used on the root span.
SamplerTypeTagKey = "sampler.type"
// SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
SamplerParamTagKey = "sampler.param"
// TraceContextHeaderName is the http header name used to propagate tracing context.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceContextHeaderName = "uber-trace-id"
// TracerStateHeaderName is deprecated.
// Deprecated: use TraceContextHeaderName
TracerStateHeaderName = TraceContextHeaderName
// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceBaggageHeaderPrefix = "uberctx-"
// SamplerTypeConst is the type of sampler that always makes the same decision.
SamplerTypeConst = "const"
// SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
SamplerTypeRemote = "remote"
// SamplerTypeProbabilistic is the type of sampler that samples traces
// with a certain fixed probability.
SamplerTypeProbabilistic = "probabilistic"
// SamplerTypeRateLimiting is the type of sampler that samples
// only up to a fixed number of traces per second.
SamplerTypeRateLimiting = "ratelimiting"
// SamplerTypeLowerBound is the type of sampler that samples
// at least a fixed number of traces per second.
SamplerTypeLowerBound = "lowerbound"
// DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
DefaultUDPSpanServerHost = "localhost"
// DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
DefaultUDPSpanServerPort = 6831
)

258
vendor/github.com/uber/jaeger-client-go/context.go generated vendored Normal file
View File

@ -0,0 +1,258 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"errors"
"fmt"
"strconv"
"strings"
)
const (
flagSampled = byte(1)
flagDebug = byte(2)
)
var (
errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
errMalformedTracerStateString = errors.New("String does not match tracer state format")
emptyContext = SpanContext{}
)
// TraceID represents unique 128bit identifier of a trace
type TraceID struct {
High, Low uint64
}
// SpanID represents unique 64bit identifier of a span
type SpanID uint64
// SpanContext represents propagated span identity and state
type SpanContext struct {
// traceID represents globally unique ID of the trace.
// Usually generated as a random number.
traceID TraceID
// spanID represents span ID that must be unique within its trace,
// but does not have to be globally unique.
spanID SpanID
// parentID refers to the ID of the parent span.
// Should be 0 if the current span is a root span.
parentID SpanID
// flags is a bitmap containing such bits as 'sampled' and 'debug'.
flags byte
// Distributed Context baggage. The is a snapshot in time.
baggage map[string]string
// debugID can be set to some correlation ID when the context is being
// extracted from a TextMap carrier.
//
// See JaegerDebugHeader in constants.go
debugID string
}
// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
for k, v := range c.baggage {
if !handler(k, v) {
break
}
}
}
// IsSampled returns whether this trace was chosen for permanent storage
// by the sampling mechanism of the tracer.
func (c SpanContext) IsSampled() bool {
return (c.flags & flagSampled) == flagSampled
}
// IsDebug indicates whether sampling was explicitly requested by the service.
func (c SpanContext) IsDebug() bool {
return (c.flags & flagDebug) == flagDebug
}
// IsValid indicates whether this context actually represents a valid trace.
func (c SpanContext) IsValid() bool {
return c.traceID.IsValid() && c.spanID != 0
}
func (c SpanContext) String() string {
if c.traceID.High == 0 {
return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
}
return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
}
// ContextFromString reconstructs the Context encoded in a string
func ContextFromString(value string) (SpanContext, error) {
var context SpanContext
if value == "" {
return emptyContext, errEmptyTracerStateString
}
parts := strings.Split(value, ":")
if len(parts) != 4 {
return emptyContext, errMalformedTracerStateString
}
var err error
if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
return emptyContext, err
}
if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
return emptyContext, err
}
if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
return emptyContext, err
}
flags, err := strconv.ParseUint(parts[3], 10, 8)
if err != nil {
return emptyContext, err
}
context.flags = byte(flags)
return context, nil
}
// TraceID returns the trace ID of this span context
func (c SpanContext) TraceID() TraceID {
return c.traceID
}
// SpanID returns the span ID of this span context
func (c SpanContext) SpanID() SpanID {
return c.spanID
}
// ParentID returns the parent span ID of this span context
func (c SpanContext) ParentID() SpanID {
return c.parentID
}
// NewSpanContext creates a new instance of SpanContext
func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
flags := byte(0)
if sampled {
flags = flagSampled
}
return SpanContext{
traceID: traceID,
spanID: spanID,
parentID: parentID,
flags: flags,
baggage: baggage}
}
// CopyFrom copies data from ctx into this context, including span identity and baggage.
// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
func (c *SpanContext) CopyFrom(ctx *SpanContext) {
c.traceID = ctx.traceID
c.spanID = ctx.spanID
c.parentID = ctx.parentID
c.flags = ctx.flags
if l := len(ctx.baggage); l > 0 {
c.baggage = make(map[string]string, l)
for k, v := range ctx.baggage {
c.baggage[k] = v
}
} else {
c.baggage = nil
}
}
// WithBaggageItem creates a new context with an extra baggage item.
func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
var newBaggage map[string]string
if c.baggage == nil {
newBaggage = map[string]string{key: value}
} else {
newBaggage = make(map[string]string, len(c.baggage)+1)
for k, v := range c.baggage {
newBaggage[k] = v
}
newBaggage[key] = value
}
// Use positional parameters so the compiler will help catch new fields.
return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""}
}
// isDebugIDContainerOnly returns true when the instance of the context is only
// used to return the debug/correlation ID from extract() method. This happens
// in the situation when "jaeger-debug-id" header is passed in the carrier to
// the extract() method, but the request otherwise has no span context in it.
// Previously this would've returned opentracing.ErrSpanContextNotFound from the
// extract method, but now it returns a dummy context with only debugID filled in.
//
// See JaegerDebugHeader in constants.go
// See textMapPropagator#Extract
func (c *SpanContext) isDebugIDContainerOnly() bool {
return !c.traceID.IsValid() && c.debugID != ""
}
// ------- TraceID -------
func (t TraceID) String() string {
if t.High == 0 {
return fmt.Sprintf("%x", t.Low)
}
return fmt.Sprintf("%x%016x", t.High, t.Low)
}
// TraceIDFromString creates a TraceID from a hexadecimal string
func TraceIDFromString(s string) (TraceID, error) {
var hi, lo uint64
var err error
if len(s) > 32 {
return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
} else if len(s) > 16 {
hiLen := len(s) - 16
if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
return TraceID{}, err
}
if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
return TraceID{}, err
}
} else {
if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
return TraceID{}, err
}
}
return TraceID{High: hi, Low: lo}, nil
}
// IsValid checks if the trace ID is valid, i.e. not zero.
func (t TraceID) IsValid() bool {
return t.High != 0 || t.Low != 0
}
// ------- SpanID -------
func (s SpanID) String() string {
return fmt.Sprintf("%x", uint64(s))
}
// SpanIDFromString creates a SpanID from a hexadecimal string
func SpanIDFromString(s string) (SpanID, error) {
if len(s) > 16 {
return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
}
id, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return SpanID(0), err
}
return SpanID(id), nil
}

View File

@ -0,0 +1,56 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
opentracing "github.com/opentracing/opentracing-go"
)
// ContribObserver can be registered with the Tracer to receive notifications
// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
type ContribObserver interface {
// Create and return a span observer. Called when a span starts.
// If the Observer is not interested in the given span, it must return (nil, false).
// E.g :
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
// var sp opentracing.Span
// sso := opentracing.StartSpanOptions{}
// if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
// // we have a valid SpanObserver
// }
// ...
// }
OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
}
// ContribSpanObserver is created by the Observer and receives notifications
// about other Span events. This interface is meant to match
// github.com/opentracing-contrib/go-observer, via duck typing, without
// directly importing the go-observer package.
type ContribSpanObserver interface {
OnSetOperationName(operationName string)
OnSetTag(key string, value interface{})
OnFinish(options opentracing.FinishOptions)
}
// wrapper observer for the old observers (see observer.go)
type oldObserver struct {
obs Observer
}
func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
spanObserver := o.obs.OnStartSpan(operationName, options)
return spanObserver, spanObserver != nil
}

24
vendor/github.com/uber/jaeger-client-go/doc.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
It is currently using Zipkin-compatible data model and can be directly
itegrated with Zipkin backend (http://zipkin.io).
For integration instructions please refer to the README:
https://github.com/uber/jaeger-client-go/blob/master/README.md
*/
package jaeger

64
vendor/github.com/uber/jaeger-client-go/header.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
// HeadersConfig contains the values for the header keys that Jaeger will use.
// These values may be either custom or default depending on whether custom
// values were provided via a configuration.
type HeadersConfig struct {
// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
// if found in the carrier, forces the trace to be sampled as "debug" trace.
// The value of the header is recorded as the tag on the root span, so that the
// trace can be found in the UI using this value as a correlation ID.
JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
// a root span does not exist.
JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
// TraceContextHeaderName is the http header name used to propagate tracing context.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
// This must be in lower-case to avoid mismatches when decoding incoming headers.
TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
}
func (c *HeadersConfig) applyDefaults() *HeadersConfig {
if c.JaegerBaggageHeader == "" {
c.JaegerBaggageHeader = JaegerBaggageHeader
}
if c.JaegerDebugHeader == "" {
c.JaegerDebugHeader = JaegerDebugHeader
}
if c.TraceBaggageHeaderPrefix == "" {
c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
}
if c.TraceContextHeaderName == "" {
c.TraceContextHeaderName = TraceContextHeaderName
}
return c
}
func getDefaultHeadersConfig() *HeadersConfig {
return &HeadersConfig{
JaegerDebugHeader: JaegerDebugHeader,
JaegerBaggageHeader: JaegerBaggageHeader,
TraceContextHeaderName: TraceContextHeaderName,
TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
}
}

View File

@ -0,0 +1,101 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"time"
"github.com/uber/jaeger-client-go"
)
const (
defaultMaxValueLength = 2048
defaultRefreshInterval = time.Minute
defaultHostPort = "localhost:5778"
)
// Option is a function that sets some option on the RestrictionManager
type Option func(options *options)
// Options is a factory for all available options
var Options options
type options struct {
denyBaggageOnInitializationFailure bool
metrics *jaeger.Metrics
logger jaeger.Logger
hostPort string
refreshInterval time.Duration
}
// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
// restrictions have been retrieved from agent.
// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
// restrictions have been retrieved from agent.
func (options) DenyBaggageOnInitializationFailure(b bool) Option {
return func(o *options) {
o.denyBaggageOnInitializationFailure = b
}
}
// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
func (options) Metrics(m *jaeger.Metrics) Option {
return func(o *options) {
o.metrics = m
}
}
// Logger creates an Option that sets the logger used by the RestrictionManager.
func (options) Logger(logger jaeger.Logger) Option {
return func(o *options) {
o.logger = logger
}
}
// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
func (options) HostPort(hostPort string) Option {
return func(o *options) {
o.hostPort = hostPort
}
}
// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
// the baggage restrictions.
func (options) RefreshInterval(refreshInterval time.Duration) Option {
return func(o *options) {
o.refreshInterval = refreshInterval
}
}
func applyOptions(o ...Option) options {
opts := options{}
for _, option := range o {
option(&opts)
}
if opts.metrics == nil {
opts.metrics = jaeger.NewNullMetrics()
}
if opts.logger == nil {
opts.logger = jaeger.NullLogger
}
if opts.hostPort == "" {
opts.hostPort = defaultHostPort
}
if opts.refreshInterval == 0 {
opts.refreshInterval = defaultRefreshInterval
}
return opts
}

View File

@ -0,0 +1,157 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"fmt"
"net/url"
"sync"
"time"
"github.com/uber/jaeger-client-go/internal/baggage"
thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
"github.com/uber/jaeger-client-go/utils"
)
type httpBaggageRestrictionManagerProxy struct {
url string
}
func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
v := url.Values{}
v.Set("service", serviceName)
return &httpBaggageRestrictionManagerProxy{
url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
}
}
func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) {
var out []*thrift.BaggageRestriction
if err := utils.GetJSON(s.url, &out); err != nil {
return nil, err
}
return out, nil
}
// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
type RestrictionManager struct {
options
mux sync.RWMutex
serviceName string
restrictions map[string]*baggage.Restriction
thriftProxy thrift.BaggageRestrictionManager
pollStopped sync.WaitGroup
stopPoll chan struct{}
invalidRestriction *baggage.Restriction
validRestriction *baggage.Restriction
// Determines if the manager has successfully retrieved baggage restrictions from agent
initialized bool
}
// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
// baggage restrictions.
func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
// TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
// restrictionsMap will need to exist per service
opts := applyOptions(options...)
m := &RestrictionManager{
serviceName: serviceName,
options: opts,
restrictions: make(map[string]*baggage.Restriction),
thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
stopPoll: make(chan struct{}),
invalidRestriction: baggage.NewRestriction(false, 0),
validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
}
m.pollStopped.Add(1)
go m.pollManager()
return m
}
// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
func (m *RestrictionManager) isReady() bool {
m.mux.RLock()
defer m.mux.RUnlock()
return m.initialized
}
// GetRestriction implements RestrictionManager#GetRestriction.
func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
m.mux.RLock()
defer m.mux.RUnlock()
if !m.initialized {
if m.denyBaggageOnInitializationFailure {
return m.invalidRestriction
}
return m.validRestriction
}
if restriction, ok := m.restrictions[key]; ok {
return restriction
}
return m.invalidRestriction
}
// Close stops remote polling and closes the RemoteRestrictionManager.
func (m *RestrictionManager) Close() error {
close(m.stopPoll)
m.pollStopped.Wait()
return nil
}
func (m *RestrictionManager) pollManager() {
defer m.pollStopped.Done()
// attempt to initialize baggage restrictions
if err := m.updateRestrictions(); err != nil {
m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
}
ticker := time.NewTicker(m.refreshInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := m.updateRestrictions(); err != nil {
m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
}
case <-m.stopPoll:
return
}
}
}
func (m *RestrictionManager) updateRestrictions() error {
restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName)
if err != nil {
m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
return err
}
newRestrictions := m.parseRestrictions(restrictions)
m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
m.mux.Lock()
defer m.mux.Unlock()
m.initialized = true
m.restrictions = newRestrictions
return nil
}
func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
setters := make(map[string]*baggage.Restriction, len(restrictions))
for _, restriction := range restrictions {
setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
}
return setters
}

View File

@ -0,0 +1,71 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage
const (
defaultMaxValueLength = 2048
)
// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
type Restriction struct {
keyAllowed bool
maxValueLength int
}
// NewRestriction returns a new Restriction.
func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
return &Restriction{
keyAllowed: keyAllowed,
maxValueLength: maxValueLength,
}
}
// KeyAllowed returns whether the baggage key for this restriction is allowed.
func (r *Restriction) KeyAllowed() bool {
return r.keyAllowed
}
// MaxValueLength returns the max length for the baggage value.
func (r *Restriction) MaxValueLength() int {
return r.maxValueLength
}
// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
// will return a Restriction for a specific baggage key which will determine whether the baggage
// key is allowed for the current service and any other applicable restrictions on the baggage
// value.
type RestrictionManager interface {
GetRestriction(service, key string) *Restriction
}
// DefaultRestrictionManager allows any baggage key.
type DefaultRestrictionManager struct {
defaultRestriction *Restriction
}
// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
if maxValueLength == 0 {
maxValueLength = defaultMaxValueLength
}
return &DefaultRestrictionManager{
defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
}
}
// GetRestriction implements RestrictionManager#GetRestriction.
func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
return m.defaultRestriction
}

View File

@ -0,0 +1,81 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spanlog
import (
"encoding/json"
"fmt"
"github.com/opentracing/opentracing-go/log"
)
type fieldsAsMap map[string]string
// MaterializeWithJSON converts log Fields into JSON string
// TODO refactor into pluggable materializer
func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
fields := fieldsAsMap(make(map[string]string, len(logFields)))
for _, field := range logFields {
field.Marshal(fields)
}
if event, ok := fields["event"]; ok && len(fields) == 1 {
return []byte(event), nil
}
return json.Marshal(fields)
}
func (ml fieldsAsMap) EmitString(key, value string) {
ml[key] = value
}
func (ml fieldsAsMap) EmitBool(key string, value bool) {
ml[key] = fmt.Sprintf("%t", value)
}
func (ml fieldsAsMap) EmitInt(key string, value int) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitInt32(key string, value int32) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitInt64(key string, value int64) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
ml[key] = fmt.Sprintf("%d", value)
}
func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
ml[key] = fmt.Sprintf("%f", value)
}
func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
ml[key] = fmt.Sprintf("%f", value)
}
func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
ml[key] = fmt.Sprintf("%+v", value)
}
func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
value(ml)
}

View File

@ -0,0 +1,99 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"time"
"github.com/uber/jaeger-client-go"
)
const (
defaultHostPort = "localhost:5778"
defaultRefreshInterval = time.Second * 5
)
// Option is a function that sets some option on the Throttler
type Option func(options *options)
// Options is a factory for all available options
var Options options
type options struct {
metrics *jaeger.Metrics
logger jaeger.Logger
hostPort string
refreshInterval time.Duration
synchronousInitialization bool
}
// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
func (options) Metrics(m *jaeger.Metrics) Option {
return func(o *options) {
o.metrics = m
}
}
// Logger creates an Option that sets the logger used by the Throttler.
func (options) Logger(logger jaeger.Logger) Option {
return func(o *options) {
o.logger = logger
}
}
// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
func (options) HostPort(hostPort string) Option {
return func(o *options) {
o.hostPort = hostPort
}
}
// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
// credits.
func (options) RefreshInterval(refreshInterval time.Duration) Option {
return func(o *options) {
o.refreshInterval = refreshInterval
}
}
// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
// fetch credits from the agent when an operation is seen for the first time. This should be set to true
// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
// such that sampling or throttling occurs.
func (options) SynchronousInitialization(b bool) Option {
return func(o *options) {
o.synchronousInitialization = b
}
}
func applyOptions(o ...Option) options {
opts := options{}
for _, option := range o {
option(&opts)
}
if opts.metrics == nil {
opts.metrics = jaeger.NewNullMetrics()
}
if opts.logger == nil {
opts.logger = jaeger.NullLogger
}
if opts.hostPort == "" {
opts.hostPort = defaultHostPort
}
if opts.refreshInterval == 0 {
opts.refreshInterval = defaultRefreshInterval
}
return opts
}

View File

@ -0,0 +1,216 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"fmt"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/utils"
)
const (
// minimumCredits is the minimum amount of credits necessary to not be throttled.
// i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
minimumCredits = 1.0
)
var (
errorUUIDNotSet = errors.New("Throttler UUID must be set")
)
type operationBalance struct {
Operation string `json:"operation"`
Balance float64 `json:"balance"`
}
type creditResponse struct {
Balances []operationBalance `json:"balances"`
}
type httpCreditManagerProxy struct {
hostPort string
}
func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
return &httpCreditManagerProxy{
hostPort: hostPort,
}
}
// N.B. Operations list must not be empty.
func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
params := url.Values{}
params.Set("service", serviceName)
params.Set("uuid", uuid)
for _, op := range operations {
params.Add("operations", op)
}
var resp creditResponse
if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
return nil, errors.Wrap(err, "Failed to receive credits from agent")
}
return &resp, nil
}
// Throttler retrieves credits from agent and uses it to throttle operations.
type Throttler struct {
options
mux sync.RWMutex
service string
uuid atomic.Value
creditManager *httpCreditManagerProxy
credits map[string]float64 // map of operation->credits
close chan struct{}
stopped sync.WaitGroup
}
// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
// the service.
func NewThrottler(service string, options ...Option) *Throttler {
opts := applyOptions(options...)
creditManager := newHTTPCreditManagerProxy(opts.hostPort)
t := &Throttler{
options: opts,
creditManager: creditManager,
service: service,
credits: make(map[string]float64),
close: make(chan struct{}),
}
t.stopped.Add(1)
go t.pollManager()
return t
}
// IsAllowed implements Throttler#IsAllowed.
func (t *Throttler) IsAllowed(operation string) bool {
t.mux.Lock()
defer t.mux.Unlock()
value, ok := t.credits[operation]
if !ok || value == 0 {
if !ok {
// NOTE: This appears to be a no-op at first glance, but it stores
// the operation key in the map. Necessary for functionality of
// Throttler#operations method.
t.credits[operation] = 0
}
if !t.synchronousInitialization {
t.metrics.ThrottledDebugSpans.Inc(1)
return false
}
// If it is the first time this operation is being checked, synchronously fetch
// the credits.
credits, err := t.fetchCredits([]string{operation})
if err != nil {
// Failed to receive credits from agent, try again next time
t.logger.Error("Failed to fetch credits: " + err.Error())
return false
}
if len(credits.Balances) == 0 {
// This shouldn't happen but just in case
return false
}
for _, opBalance := range credits.Balances {
t.credits[opBalance.Operation] += opBalance.Balance
}
}
return t.isAllowed(operation)
}
// Close stops the throttler from fetching credits from remote.
func (t *Throttler) Close() error {
close(t.close)
t.stopped.Wait()
return nil
}
// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
// requests are made.
func (t *Throttler) SetProcess(process jaeger.Process) {
if process.UUID != "" {
t.uuid.Store(process.UUID)
}
}
// N.B. This function must be called with the Write Lock
func (t *Throttler) isAllowed(operation string) bool {
credits := t.credits[operation]
if credits < minimumCredits {
t.metrics.ThrottledDebugSpans.Inc(1)
return false
}
t.credits[operation] = credits - minimumCredits
return true
}
func (t *Throttler) pollManager() {
defer t.stopped.Done()
ticker := time.NewTicker(t.refreshInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
t.refreshCredits()
case <-t.close:
return
}
}
}
func (t *Throttler) operations() []string {
t.mux.RLock()
defer t.mux.RUnlock()
operations := make([]string, 0, len(t.credits))
for op := range t.credits {
operations = append(operations, op)
}
return operations
}
func (t *Throttler) refreshCredits() {
operations := t.operations()
if len(operations) == 0 {
return
}
newCredits, err := t.fetchCredits(operations)
if err != nil {
t.metrics.ThrottlerUpdateFailure.Inc(1)
t.logger.Error("Failed to fetch credits: " + err.Error())
return
}
t.metrics.ThrottlerUpdateSuccess.Inc(1)
t.mux.Lock()
defer t.mux.Unlock()
for _, opBalance := range newCredits.Balances {
t.credits[opBalance.Operation] += opBalance.Balance
}
}
func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
uuid := t.uuid.Load()
uuidStr, _ := uuid.(string)
if uuid == nil || uuidStr == "" {
return nil, errorUUIDNotSet
}
return t.creditManager.FetchCredits(uuidStr, t.service, operations)
}

View File

@ -0,0 +1,32 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package throttler
// Throttler is used to rate limits operations. For example, given how debug spans
// are always sampled, a throttler can be enabled per client to rate limit the amount
// of debug spans a client can start.
type Throttler interface {
// IsAllowed determines whether the operation should be allowed and not be
// throttled.
IsAllowed(operation string) bool
}
// DefaultThrottler doesn't throttle at all.
type DefaultThrottler struct{}
// IsAllowed implements Throttler#IsAllowed.
func (t DefaultThrottler) IsAllowed(operation string) bool {
return true
}

55
vendor/github.com/uber/jaeger-client-go/interop.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"github.com/opentracing/opentracing-go"
)
// TODO this file should not be needed after TChannel PR.
type formatKey int
// SpanContextFormat is a constant used as OpenTracing Format.
// Requires *SpanContext as carrier.
// This format is intended for interop with TChannel or other Zipkin-like tracers.
const SpanContextFormat formatKey = iota
type jaegerTraceContextPropagator struct {
tracer *Tracer
}
func (p *jaegerTraceContextPropagator) Inject(
ctx SpanContext,
abstractCarrier interface{},
) error {
carrier, ok := abstractCarrier.(*SpanContext)
if !ok {
return opentracing.ErrInvalidCarrier
}
carrier.CopyFrom(&ctx)
return nil
}
func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
carrier, ok := abstractCarrier.(*SpanContext)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
}
ctx := new(SpanContext)
ctx.CopyFrom(carrier)
return *ctx, nil
}

84
vendor/github.com/uber/jaeger-client-go/jaeger_tag.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"fmt"
"github.com/opentracing/opentracing-go/log"
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
)
type tags []*j.Tag
// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
fields := tags(make([]*j.Tag, 0, len(logFields)))
for _, field := range logFields {
field.Marshal(&fields)
}
return fields
}
func (t *tags) EmitString(key, value string) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
}
func (t *tags) EmitBool(key string, value bool) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
}
func (t *tags) EmitInt(key string, value int) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitInt32(key string, value int32) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitInt64(key string, value int64) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
}
func (t *tags) EmitUint32(key string, value uint32) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitUint64(key string, value uint64) {
vLong := int64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
}
func (t *tags) EmitFloat32(key string, value float32) {
vDouble := float64(value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
}
func (t *tags) EmitFloat64(key string, value float64) {
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
}
func (t *tags) EmitObject(key string, value interface{}) {
vStr := fmt.Sprintf("%+v", value)
*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
}
func (t *tags) EmitLazyLogger(value log.LazyLogger) {
value(t)
}

View File

@ -0,0 +1,179 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"time"
"github.com/opentracing/opentracing-go"
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/utils"
)
// BuildJaegerThrift builds jaeger span based on internal span.
func BuildJaegerThrift(span *Span) *j.Span {
span.Lock()
defer span.Unlock()
startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
duration := span.duration.Nanoseconds() / int64(time.Microsecond)
jaegerSpan := &j.Span{
TraceIdLow: int64(span.context.traceID.Low),
TraceIdHigh: int64(span.context.traceID.High),
SpanId: int64(span.context.spanID),
ParentSpanId: int64(span.context.parentID),
OperationName: span.operationName,
Flags: int32(span.context.flags),
StartTime: startTime,
Duration: duration,
Tags: buildTags(span.tags),
Logs: buildLogs(span.logs),
References: buildReferences(span.references),
}
return jaegerSpan
}
// BuildJaegerProcessThrift creates a thrift Process type.
func BuildJaegerProcessThrift(span *Span) *j.Process {
span.Lock()
defer span.Unlock()
return buildJaegerProcessThrift(span.tracer)
}
func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
process := &j.Process{
ServiceName: tracer.serviceName,
Tags: buildTags(tracer.tags),
}
if tracer.process.UUID != "" {
process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
}
return process
}
func buildTags(tags []Tag) []*j.Tag {
jTags := make([]*j.Tag, 0, len(tags))
for _, tag := range tags {
jTag := buildTag(&tag)
jTags = append(jTags, jTag)
}
return jTags
}
func buildLogs(logs []opentracing.LogRecord) []*j.Log {
jLogs := make([]*j.Log, 0, len(logs))
for _, log := range logs {
jLog := &j.Log{
Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
Fields: ConvertLogsToJaegerTags(log.Fields),
}
jLogs = append(jLogs, jLog)
}
return jLogs
}
func buildTag(tag *Tag) *j.Tag {
jTag := &j.Tag{Key: tag.key}
switch value := tag.value.(type) {
case string:
vStr := truncateString(value)
jTag.VStr = &vStr
jTag.VType = j.TagType_STRING
case []byte:
if len(value) > maxAnnotationLength {
value = value[:maxAnnotationLength]
}
jTag.VBinary = value
jTag.VType = j.TagType_BINARY
case int:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int8:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint8:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int16:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint16:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int32:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint32:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case int64:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case uint64:
vLong := int64(value)
jTag.VLong = &vLong
jTag.VType = j.TagType_LONG
case float32:
vDouble := float64(value)
jTag.VDouble = &vDouble
jTag.VType = j.TagType_DOUBLE
case float64:
vDouble := float64(value)
jTag.VDouble = &vDouble
jTag.VType = j.TagType_DOUBLE
case bool:
vBool := value
jTag.VBool = &vBool
jTag.VType = j.TagType_BOOL
default:
vStr := truncateString(stringify(value))
jTag.VStr = &vStr
jTag.VType = j.TagType_STRING
}
return jTag
}
func buildReferences(references []Reference) []*j.SpanRef {
retMe := make([]*j.SpanRef, 0, len(references))
for _, ref := range references {
if ref.Type == opentracing.ChildOfRef {
retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
} else if ref.Type == opentracing.FollowsFromRef {
retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
}
}
return retMe
}
func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
return &j.SpanRef{
RefType: refType,
TraceIdLow: int64(ctx.traceID.Low),
TraceIdHigh: int64(ctx.traceID.High),
SpanId: int64(ctx.spanID),
}
}

90
vendor/github.com/uber/jaeger-client-go/log/logger.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"bytes"
"fmt"
"log"
"sync"
)
// Logger provides an abstract interface for logging from Reporters.
// Applications can provide their own implementation of this interface to adapt
// reporters logging to whatever logging library they prefer (stdlib log,
// logrus, go-logging, etc).
type Logger interface {
// Error logs a message at error priority
Error(msg string)
// Infof logs a message at info priority
Infof(msg string, args ...interface{})
}
// StdLogger is implementation of the Logger interface that delegates to default `log` package
var StdLogger = &stdLogger{}
type stdLogger struct{}
func (l *stdLogger) Error(msg string) {
log.Printf("ERROR: %s", msg)
}
// Infof logs a message at info priority
func (l *stdLogger) Infof(msg string, args ...interface{}) {
log.Printf(msg, args...)
}
// NullLogger is implementation of the Logger interface that is no-op
var NullLogger = &nullLogger{}
type nullLogger struct{}
func (l *nullLogger) Error(msg string) {}
func (l *nullLogger) Infof(msg string, args ...interface{}) {}
// BytesBufferLogger implements Logger backed by a bytes.Buffer.
type BytesBufferLogger struct {
mux sync.Mutex
buf bytes.Buffer
}
// Error implements Logger.
func (l *BytesBufferLogger) Error(msg string) {
l.mux.Lock()
l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
l.mux.Unlock()
}
// Infof implements Logger.
func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
l.mux.Lock()
l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
l.mux.Unlock()
}
// String returns string representation of the underlying buffer.
func (l *BytesBufferLogger) String() string {
l.mux.Lock()
defer l.mux.Unlock()
return l.buf.String()
}
// Flush empties the underlying buffer.
func (l *BytesBufferLogger) Flush() {
l.mux.Lock()
defer l.mux.Unlock()
l.buf.Reset()
}

53
vendor/github.com/uber/jaeger-client-go/logger.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import "log"
// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
// Logger provides an abstract interface for logging from Reporters.
// Applications can provide their own implementation of this interface to adapt
// reporters logging to whatever logging library they prefer (stdlib log,
// logrus, go-logging, etc).
type Logger interface {
// Error logs a message at error priority
Error(msg string)
// Infof logs a message at info priority
Infof(msg string, args ...interface{})
}
// StdLogger is implementation of the Logger interface that delegates to default `log` package
var StdLogger = &stdLogger{}
type stdLogger struct{}
func (l *stdLogger) Error(msg string) {
log.Printf("ERROR: %s", msg)
}
// Infof logs a message at info priority
func (l *stdLogger) Infof(msg string, args ...interface{}) {
log.Printf(msg, args...)
}
// NullLogger is implementation of the Logger interface that delegates to default `log` package
var NullLogger = &nullLogger{}
type nullLogger struct{}
func (l *nullLogger) Error(msg string) {}
func (l *nullLogger) Infof(msg string, args ...interface{}) {}

107
vendor/github.com/uber/jaeger-client-go/metrics.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"github.com/uber/jaeger-lib/metrics"
)
// Metrics is a container of all stats emitted by Jaeger tracer.
type Metrics struct {
// Number of traces started by this tracer as sampled
TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y"`
// Number of traces started by this tracer as not sampled
TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n"`
// Number of externally started sampled traces this tracer joined
TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y"`
// Number of externally started not-sampled traces this tracer joined
TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n"`
// Number of sampled spans started by this tracer
SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y"`
// Number of unsampled spans started by this tracer
SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n"`
// Number of spans finished by this tracer
SpansFinished metrics.Counter `metric:"finished_spans"`
// Number of errors decoding tracing context
DecodingErrors metrics.Counter `metric:"span_context_decoding_errors"`
// Number of spans successfully reported
ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok"`
// Number of spans not reported due to a Sender failure
ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err"`
// Number of spans dropped due to internal queue overflow
ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped"`
// Current number of spans in the reporter queue
ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length"`
// Number of times the Sampler succeeded to retrieve sampling strategy
SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok"`
// Number of times the Sampler failed to retrieve sampling strategy
SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err"`
// Number of times the Sampler succeeded to retrieve and update sampling strategy
SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok"`
// Number of times the Sampler failed to update sampling strategy
SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err"`
// Number of times baggage was successfully written or updated on spans.
BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok"`
// Number of times baggage failed to write or update on spans.
BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err"`
// Number of times baggage was truncated as per baggage restrictions.
BaggageTruncate metrics.Counter `metric:"baggage_truncations"`
// Number of times baggage restrictions were successfully updated.
BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok"`
// Number of times baggage restrictions failed to update.
BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err"`
// Number of times debug spans were throttled.
ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans"`
// Number of times throttler successfully updated.
ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok"`
// Number of times throttler failed to update.
ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err"`
}
// NewMetrics creates a new Metrics struct and initializes it.
func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
m := &Metrics{}
// TODO the namespace "jaeger" should be configurable (e.g. in all-in-one "jaeger-client" would make more sense)
metrics.Init(m, factory.Namespace("jaeger", nil), globalTags)
return m
}
// NewNullMetrics creates a new Metrics struct that won't report any metrics.
func NewNullMetrics() *Metrics {
return NewMetrics(metrics.NullFactory, nil)
}

88
vendor/github.com/uber/jaeger-client-go/observer.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import opentracing "github.com/opentracing/opentracing-go"
// Observer can be registered with the Tracer to receive notifications about
// new Spans.
//
// Deprecated: use jaeger.ContribObserver instead.
type Observer interface {
OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
}
// SpanObserver is created by the Observer and receives notifications about
// other Span events.
//
// Deprecated: use jaeger.ContribSpanObserver instead.
type SpanObserver interface {
OnSetOperationName(operationName string)
OnSetTag(key string, value interface{})
OnFinish(options opentracing.FinishOptions)
}
// compositeObserver is a dispatcher to other observers
type compositeObserver struct {
observers []ContribObserver
}
// compositeSpanObserver is a dispatcher to other span observers
type compositeSpanObserver struct {
observers []ContribSpanObserver
}
// noopSpanObserver is used when there are no observers registered
// on the Tracer or none of them returns span observers from OnStartSpan.
var noopSpanObserver = &compositeSpanObserver{}
func (o *compositeObserver) append(contribObserver ContribObserver) {
o.observers = append(o.observers, contribObserver)
}
func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
var spanObservers []ContribSpanObserver
for _, obs := range o.observers {
spanObs, ok := obs.OnStartSpan(sp, operationName, options)
if ok {
if spanObservers == nil {
spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
}
spanObservers = append(spanObservers, spanObs)
}
}
if len(spanObservers) == 0 {
return noopSpanObserver
}
return &compositeSpanObserver{observers: spanObservers}
}
func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
for _, obs := range o.observers {
obs.OnSetOperationName(operationName)
}
}
func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
for _, obs := range o.observers {
obs.OnSetTag(key, value)
}
}
func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
for _, obs := range o.observers {
obs.OnFinish(options)
}
}

29
vendor/github.com/uber/jaeger-client-go/process.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright (c) 2018 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
// Process holds process specific metadata that's relevant to this client.
type Process struct {
Service string
UUID string
Tags []Tag
}
// ProcessSetter sets a process. This can be used by any class that requires
// the process to be set as part of initialization.
// See internal/throttler/remote/throttler.go for an example.
type ProcessSetter interface {
SetProcess(process Process)
}

300
vendor/github.com/uber/jaeger-client-go/propagation.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"log"
"net/url"
"strings"
"sync"
opentracing "github.com/opentracing/opentracing-go"
)
// Injector is responsible for injecting SpanContext instances in a manner suitable
// for propagation via a format-specific "carrier" object. Typically the
// injection will take place across an RPC boundary, but message queues and
// other IPC mechanisms are also reasonable places to use an Injector.
type Injector interface {
// Inject takes `SpanContext` and injects it into `carrier`. The actual type
// of `carrier` depends on the `format` passed to `Tracer.Inject()`.
//
// Implementations may return opentracing.ErrInvalidCarrier or any other
// implementation-specific error if injection fails.
Inject(ctx SpanContext, carrier interface{}) error
}
// Extractor is responsible for extracting SpanContext instances from a
// format-specific "carrier" object. Typically the extraction will take place
// on the server side of an RPC boundary, but message queues and other IPC
// mechanisms are also reasonable places to use an Extractor.
type Extractor interface {
// Extract decodes a SpanContext instance from the given `carrier`,
// or (nil, opentracing.ErrSpanContextNotFound) if no context could
// be found in the `carrier`.
Extract(carrier interface{}) (SpanContext, error)
}
type textMapPropagator struct {
headerKeys *HeadersConfig
metrics Metrics
encodeValue func(string) string
decodeValue func(string) string
}
func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
return &textMapPropagator{
headerKeys: headerKeys,
metrics: metrics,
encodeValue: func(val string) string {
return val
},
decodeValue: func(val string) string {
return val
},
}
}
func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator {
return &textMapPropagator{
headerKeys: headerKeys,
metrics: metrics,
encodeValue: func(val string) string {
return url.QueryEscape(val)
},
decodeValue: func(val string) string {
// ignore decoding errors, cannot do anything about them
if v, err := url.QueryUnescape(val); err == nil {
return v
}
return val
},
}
}
type binaryPropagator struct {
tracer *Tracer
buffers sync.Pool
}
func newBinaryPropagator(tracer *Tracer) *binaryPropagator {
return &binaryPropagator{
tracer: tracer,
buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
}
}
func (p *textMapPropagator) Inject(
sc SpanContext,
abstractCarrier interface{},
) error {
textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
if !ok {
return opentracing.ErrInvalidCarrier
}
// Do not encode the string with trace context to avoid accidental double-encoding
// if people are using opentracing < 0.10.0. Our colon-separated representation
// of the trace context is already safe for HTTP headers.
textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
for k, v := range sc.baggage {
safeKey := p.addBaggageKeyPrefix(k)
safeVal := p.encodeValue(v)
textMapWriter.Set(safeKey, safeVal)
}
return nil
}
func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
}
var ctx SpanContext
var baggage map[string]string
err := textMapReader.ForeachKey(func(rawKey, value string) error {
key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
if key == p.headerKeys.TraceContextHeaderName {
var err error
safeVal := p.decodeValue(value)
if ctx, err = ContextFromString(safeVal); err != nil {
return err
}
} else if key == p.headerKeys.JaegerDebugHeader {
ctx.debugID = p.decodeValue(value)
} else if key == p.headerKeys.JaegerBaggageHeader {
if baggage == nil {
baggage = make(map[string]string)
}
for k, v := range p.parseCommaSeparatedMap(value) {
baggage[k] = v
}
} else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
if baggage == nil {
baggage = make(map[string]string)
}
safeKey := p.removeBaggageKeyPrefix(key)
safeVal := p.decodeValue(value)
baggage[safeKey] = safeVal
}
return nil
})
if err != nil {
p.metrics.DecodingErrors.Inc(1)
return emptyContext, err
}
if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
return emptyContext, opentracing.ErrSpanContextNotFound
}
ctx.baggage = baggage
return ctx, nil
}
func (p *binaryPropagator) Inject(
sc SpanContext,
abstractCarrier interface{},
) error {
carrier, ok := abstractCarrier.(io.Writer)
if !ok {
return opentracing.ErrInvalidCarrier
}
// Handle the tracer context
if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
return err
}
if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
return err
}
if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
return err
}
if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil {
return err
}
// Handle the baggage items
if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
return err
}
for k, v := range sc.baggage {
if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
return err
}
io.WriteString(carrier, k)
if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
return err
}
io.WriteString(carrier, v)
}
return nil
}
func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
carrier, ok := abstractCarrier.(io.Reader)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
}
var ctx SpanContext
if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
// Handle the baggage items
var numBaggage int32
if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
ctx.baggage = make(map[string]string, iNumBaggage)
buf := p.buffers.Get().(*bytes.Buffer)
defer p.buffers.Put(buf)
var keyLen, valLen int32
for i := 0; i < iNumBaggage; i++ {
if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
buf.Reset()
buf.Grow(int(keyLen))
if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
key := buf.String()
if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
buf.Reset()
buf.Grow(int(valLen))
if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
ctx.baggage[key] = buf.String()
}
}
return ctx, nil
}
// Converts a comma separated key value pair list into a map
// e.g. key1=value1, key2=value2, key3 = value3
// is converted to map[string]string { "key1" : "value1",
// "key2" : "value2",
// "key3" : "value3" }
func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
baggage := make(map[string]string)
value, err := url.QueryUnescape(value)
if err != nil {
log.Printf("Unable to unescape %s, %v", value, err)
return baggage
}
for _, kvpair := range strings.Split(value, ",") {
kv := strings.Split(strings.TrimSpace(kvpair), "=")
if len(kv) == 2 {
baggage[kv[0]] = kv[1]
} else {
log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
}
}
return baggage
}
// Converts a baggage item key into an http header format,
// by prepending TraceBaggageHeaderPrefix and encoding the key string
func (p *textMapPropagator) addBaggageKeyPrefix(key string) string {
// TODO encodeBaggageKeyAsHeader add caching and escaping
return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
}
func (p *textMapPropagator) removeBaggageKeyPrefix(key string) string {
// TODO decodeBaggageHeaderKey add caching and escaping
return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
}

23
vendor/github.com/uber/jaeger-client-go/reference.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import "github.com/opentracing/opentracing-go"
// Reference represents a causal reference to other Spans (via their SpanContext).
type Reference struct {
Type opentracing.SpanReferenceType
Context SpanContext
}

289
vendor/github.com/uber/jaeger-client-go/reporter.go generated vendored Normal file
View File

@ -0,0 +1,289 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/log"
)
// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
type Reporter interface {
// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
Report(span *Span)
// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
Close()
}
// ------------------------------
type nullReporter struct{}
// NewNullReporter creates a no-op reporter that ignores all reported spans.
func NewNullReporter() Reporter {
return &nullReporter{}
}
// Report implements Report() method of Reporter by doing nothing.
func (r *nullReporter) Report(span *Span) {
// no-op
}
// Close implements Close() method of Reporter by doing nothing.
func (r *nullReporter) Close() {
// no-op
}
// ------------------------------
type loggingReporter struct {
logger Logger
}
// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
func NewLoggingReporter(logger Logger) Reporter {
return &loggingReporter{logger}
}
// Report implements Report() method of Reporter by logging the span to the logger.
func (r *loggingReporter) Report(span *Span) {
r.logger.Infof("Reporting span %+v", span)
}
// Close implements Close() method of Reporter by doing nothing.
func (r *loggingReporter) Close() {
// no-op
}
// ------------------------------
// InMemoryReporter is used for testing, and simply collects spans in memory.
type InMemoryReporter struct {
spans []opentracing.Span
lock sync.Mutex
}
// NewInMemoryReporter creates a reporter that stores spans in memory.
// NOTE: the Tracer should be created with options.PoolSpans = false.
func NewInMemoryReporter() *InMemoryReporter {
return &InMemoryReporter{
spans: make([]opentracing.Span, 0, 10),
}
}
// Report implements Report() method of Reporter by storing the span in the buffer.
func (r *InMemoryReporter) Report(span *Span) {
r.lock.Lock()
r.spans = append(r.spans, span)
r.lock.Unlock()
}
// Close implements Close() method of Reporter by doing nothing.
func (r *InMemoryReporter) Close() {
// no-op
}
// SpansSubmitted returns the number of spans accumulated in the buffer.
func (r *InMemoryReporter) SpansSubmitted() int {
r.lock.Lock()
defer r.lock.Unlock()
return len(r.spans)
}
// GetSpans returns accumulated spans as a copy of the buffer.
func (r *InMemoryReporter) GetSpans() []opentracing.Span {
r.lock.Lock()
defer r.lock.Unlock()
copied := make([]opentracing.Span, len(r.spans))
copy(copied, r.spans)
return copied
}
// Reset clears all accumulated spans.
func (r *InMemoryReporter) Reset() {
r.lock.Lock()
defer r.lock.Unlock()
r.spans = nil
}
// ------------------------------
type compositeReporter struct {
reporters []Reporter
}
// NewCompositeReporter creates a reporter that ignores all reported spans.
func NewCompositeReporter(reporters ...Reporter) Reporter {
return &compositeReporter{reporters: reporters}
}
// Report implements Report() method of Reporter by delegating to each underlying reporter.
func (r *compositeReporter) Report(span *Span) {
for _, reporter := range r.reporters {
reporter.Report(span)
}
}
// Close implements Close() method of Reporter by closing each underlying reporter.
func (r *compositeReporter) Close() {
for _, reporter := range r.reporters {
reporter.Close()
}
}
// ------------- REMOTE REPORTER -----------------
type reporterQueueItemType int
const (
defaultQueueSize = 100
defaultBufferFlushInterval = 1 * time.Second
reporterQueueItemSpan reporterQueueItemType = iota
reporterQueueItemClose
)
type reporterQueueItem struct {
itemType reporterQueueItemType
span *Span
close *sync.WaitGroup
}
type remoteReporter struct {
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
queueLength int64
closed int64 // 0 - not closed, 1 - closed
reporterOptions
sender Transport
queue chan reporterQueueItem
}
// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
// Calls to Close() block until all spans reported prior to the call to Close are flushed.
func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
options := reporterOptions{}
for _, option := range opts {
option(&options)
}
if options.bufferFlushInterval <= 0 {
options.bufferFlushInterval = defaultBufferFlushInterval
}
if options.logger == nil {
options.logger = log.NullLogger
}
if options.metrics == nil {
options.metrics = NewNullMetrics()
}
if options.queueSize <= 0 {
options.queueSize = defaultQueueSize
}
reporter := &remoteReporter{
reporterOptions: options,
sender: sender,
queue: make(chan reporterQueueItem, options.queueSize),
}
go reporter.processQueue()
return reporter
}
// Report implements Report() method of Reporter.
// It passes the span to a background go-routine for submission to Jaeger backend.
// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
// because some of them may still be successfully added to the queue.
func (r *remoteReporter) Report(span *Span) {
select {
case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span}:
atomic.AddInt64(&r.queueLength, 1)
default:
r.metrics.ReporterDropped.Inc(1)
}
}
// Close implements Close() method of Reporter by waiting for the queue to be drained.
func (r *remoteReporter) Close() {
if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
r.logger.Error("Repeated attempt to close the reporter is ignored")
return
}
r.sendCloseEvent()
r.sender.Close()
}
func (r *remoteReporter) sendCloseEvent() {
wg := &sync.WaitGroup{}
wg.Add(1)
item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
r.queue <- item // if the queue is full we will block until there is space
atomic.AddInt64(&r.queueLength, 1)
wg.Wait()
}
// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
// reporting new spans.
func (r *remoteReporter) processQueue() {
// flush causes the Sender to flush its accumulated spans and clear the buffer
flush := func() {
if flushed, err := r.sender.Flush(); err != nil {
r.metrics.ReporterFailure.Inc(int64(flushed))
r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error()))
} else if flushed > 0 {
r.metrics.ReporterSuccess.Inc(int64(flushed))
}
}
timer := time.NewTicker(r.bufferFlushInterval)
for {
select {
case <-timer.C:
flush()
case item := <-r.queue:
atomic.AddInt64(&r.queueLength, -1)
switch item.itemType {
case reporterQueueItemSpan:
span := item.span
if flushed, err := r.sender.Append(span); err != nil {
r.metrics.ReporterFailure.Inc(int64(flushed))
r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error()))
} else if flushed > 0 {
r.metrics.ReporterSuccess.Inc(int64(flushed))
// to reduce the number of gauge stats, we only emit queue length on flush
r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
}
case reporterQueueItemClose:
timer.Stop()
flush()
item.close.Done()
return
}
}
}
}

View File

@ -0,0 +1,69 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"time"
)
// ReporterOption is a function that sets some option on the reporter.
type ReporterOption func(c *reporterOptions)
// ReporterOptions is a factory for all available ReporterOption's
var ReporterOptions reporterOptions
// reporterOptions control behavior of the reporter.
type reporterOptions struct {
// queueSize is the size of internal queue where reported spans are stored before they are processed in the background
queueSize int
// bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
bufferFlushInterval time.Duration
// logger is used to log errors of span submissions
logger Logger
// metrics is used to record runtime stats
metrics *Metrics
}
// QueueSize creates a ReporterOption that sets the size of the internal queue where
// spans are stored before they are processed.
func (reporterOptions) QueueSize(queueSize int) ReporterOption {
return func(r *reporterOptions) {
r.queueSize = queueSize
}
}
// Metrics creates a ReporterOption that initializes Metrics in the reporter,
// which is used to record runtime statistics.
func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
return func(r *reporterOptions) {
r.metrics = metrics
}
}
// BufferFlushInterval creates a ReporterOption that sets how often the queue
// is force-flushed.
func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
return func(r *reporterOptions) {
r.bufferFlushInterval = bufferFlushInterval
}
}
// Logger creates a ReporterOption that initializes the logger used to log
// errors of span submissions.
func (reporterOptions) Logger(logger Logger) ReporterOption {
return func(r *reporterOptions) {
r.logger = logger
}
}

View File

@ -0,0 +1,16 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
package rpcmetrics

View File

@ -0,0 +1,63 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
import "sync"
// normalizedEndpoints is a cache for endpointName -> safeName mappings.
type normalizedEndpoints struct {
names map[string]string
maxSize int
defaultName string
normalizer NameNormalizer
mux sync.RWMutex
}
func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
return &normalizedEndpoints{
maxSize: maxSize,
normalizer: normalizer,
names: make(map[string]string, maxSize),
}
}
// normalize looks up the name in the cache, if not found it uses normalizer
// to convert the name to a safe name. If called with more than maxSize unique
// names it returns "" for all other names beyond those already cached.
func (n *normalizedEndpoints) normalize(name string) string {
n.mux.RLock()
norm, ok := n.names[name]
l := len(n.names)
n.mux.RUnlock()
if ok {
return norm
}
if l >= n.maxSize {
return ""
}
return n.normalizeWithLock(name)
}
func (n *normalizedEndpoints) normalizeWithLock(name string) string {
norm := n.normalizer.Normalize(name)
n.mux.Lock()
defer n.mux.Unlock()
// cache may have grown while we were not holding the lock
if len(n.names) >= n.maxSize {
return ""
}
n.names[name] = norm
return norm
}

View File

@ -0,0 +1,124 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
import (
"sync"
"github.com/uber/jaeger-lib/metrics"
)
const (
otherEndpointsPlaceholder = "other"
endpointNameMetricTag = "endpoint"
)
// Metrics is a collection of metrics for an endpoint describing
// throughput, success, errors, and performance.
type Metrics struct {
// RequestCountSuccess is a counter of the total number of successes.
RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
// RequestCountFailures is a counter of the number of times any failure has been observed.
RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
// RequestLatencySuccess is a latency histogram of succesful requests.
RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
// RequestLatencyFailures is a latency histogram of failed requests.
RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
// HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
// HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
// HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
// HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
}
func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
if statusCode >= 200 && statusCode < 300 {
m.HTTPStatusCode2xx.Inc(1)
} else if statusCode >= 300 && statusCode < 400 {
m.HTTPStatusCode3xx.Inc(1)
} else if statusCode >= 400 && statusCode < 500 {
m.HTTPStatusCode4xx.Inc(1)
} else if statusCode >= 500 && statusCode < 600 {
m.HTTPStatusCode5xx.Inc(1)
}
}
// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
// to a generic endpoint name "other".
type MetricsByEndpoint struct {
metricsFactory metrics.Factory
endpoints *normalizedEndpoints
metricsByEndpoint map[string]*Metrics
mux sync.RWMutex
}
func newMetricsByEndpoint(
metricsFactory metrics.Factory,
normalizer NameNormalizer,
maxNumberOfEndpoints int,
) *MetricsByEndpoint {
return &MetricsByEndpoint{
metricsFactory: metricsFactory,
endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
}
}
func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
safeName := m.endpoints.normalize(endpoint)
if safeName == "" {
safeName = otherEndpointsPlaceholder
}
m.mux.RLock()
met := m.metricsByEndpoint[safeName]
m.mux.RUnlock()
if met != nil {
return met
}
return m.getWithWriteLock(safeName)
}
// split to make easier to test
func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
m.mux.Lock()
defer m.mux.Unlock()
// it is possible that the name has been already registered after we released
// the read lock and before we grabbed the write lock, so check for that.
if met, ok := m.metricsByEndpoint[safeName]; ok {
return met
}
// it would be nice to create the struct before locking, since Init() is somewhat
// expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
met := &Metrics{}
tags := map[string]string{endpointNameMetricTag: safeName}
metrics.Init(met, m.metricsFactory, tags)
m.metricsByEndpoint[safeName] = met
return met
}

View File

@ -0,0 +1,101 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
// NameNormalizer is used to convert the endpoint names to strings
// that can be safely used as tags in the metrics.
type NameNormalizer interface {
Normalize(name string) string
}
// DefaultNameNormalizer converts endpoint names so that they contain only characters
// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
var DefaultNameNormalizer = &SimpleNameNormalizer{
SafeSets: []SafeCharacterSet{
&Range{From: 'a', To: 'z'},
&Range{From: 'A', To: 'Z'},
&Range{From: '0', To: '9'},
&Char{'-'},
&Char{'_'},
&Char{'/'},
&Char{'.'},
},
Replacement: '-',
}
// SimpleNameNormalizer uses a set of safe character sets.
type SimpleNameNormalizer struct {
SafeSets []SafeCharacterSet
Replacement byte
}
// SafeCharacterSet determines if the given character is "safe"
type SafeCharacterSet interface {
IsSafe(c byte) bool
}
// Range implements SafeCharacterSet
type Range struct {
From, To byte
}
// IsSafe implements SafeCharacterSet
func (r *Range) IsSafe(c byte) bool {
return c >= r.From && c <= r.To
}
// Char implements SafeCharacterSet
type Char struct {
Val byte
}
// IsSafe implements SafeCharacterSet
func (ch *Char) IsSafe(c byte) bool {
return c == ch.Val
}
// Normalize checks each character in the string against SafeSets,
// and if it's not safe substitutes it with Replacement.
func (n *SimpleNameNormalizer) Normalize(name string) string {
var retMe []byte
nameBytes := []byte(name)
for i, b := range nameBytes {
if n.safeByte(b) {
if retMe != nil {
retMe[i] = b
}
} else {
if retMe == nil {
retMe = make([]byte, len(nameBytes))
copy(retMe[0:i], nameBytes[0:i])
}
retMe[i] = n.Replacement
}
}
if retMe == nil {
return name
}
return string(retMe)
}
// safeByte checks if b against all safe charsets.
func (n *SimpleNameNormalizer) safeByte(b byte) bool {
for i := range n.SafeSets {
if n.SafeSets[i].IsSafe(b) {
return true
}
}
return false
}

View File

@ -0,0 +1,171 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcmetrics
import (
"strconv"
"sync"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/uber/jaeger-lib/metrics"
jaeger "github.com/uber/jaeger-client-go"
)
const defaultMaxNumberOfEndpoints = 200
// Observer is an observer that can emit RPC metrics.
type Observer struct {
metricsByEndpoint *MetricsByEndpoint
}
// NewObserver creates a new observer that can emit RPC metrics.
func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
return &Observer{
metricsByEndpoint: newMetricsByEndpoint(
metricsFactory,
normalizer,
defaultMaxNumberOfEndpoints,
),
}
}
// OnStartSpan creates a new Observer for the span.
func (o *Observer) OnStartSpan(
operationName string,
options opentracing.StartSpanOptions,
) jaeger.SpanObserver {
return NewSpanObserver(o.metricsByEndpoint, operationName, options)
}
// SpanKind identifies the span as inboud, outbound, or internal
type SpanKind int
const (
// Local span kind
Local SpanKind = iota
// Inbound span kind
Inbound
// Outbound span kind
Outbound
)
// SpanObserver collects RPC metrics
type SpanObserver struct {
metricsByEndpoint *MetricsByEndpoint
operationName string
startTime time.Time
mux sync.Mutex
kind SpanKind
httpStatusCode uint16
err bool
}
// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
func NewSpanObserver(
metricsByEndpoint *MetricsByEndpoint,
operationName string,
options opentracing.StartSpanOptions,
) *SpanObserver {
so := &SpanObserver{
metricsByEndpoint: metricsByEndpoint,
operationName: operationName,
startTime: options.StartTime,
}
for k, v := range options.Tags {
so.handleTagInLock(k, v)
}
return so
}
// handleTags watches for special tags
// - SpanKind
// - HttpStatusCode
// - Error
func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
if key == string(ext.SpanKind) {
if v, ok := value.(ext.SpanKindEnum); ok {
value = string(v)
}
if v, ok := value.(string); ok {
if v == string(ext.SpanKindRPCClientEnum) {
so.kind = Outbound
} else if v == string(ext.SpanKindRPCServerEnum) {
so.kind = Inbound
}
}
return
}
if key == string(ext.HTTPStatusCode) {
if v, ok := value.(uint16); ok {
so.httpStatusCode = v
} else if v, ok := value.(int); ok {
so.httpStatusCode = uint16(v)
} else if v, ok := value.(string); ok {
if vv, err := strconv.Atoi(v); err == nil {
so.httpStatusCode = uint16(vv)
}
}
return
}
if key == string(ext.Error) {
if v, ok := value.(bool); ok {
so.err = v
} else if v, ok := value.(string); ok {
if vv, err := strconv.ParseBool(v); err == nil {
so.err = vv
}
}
return
}
}
// OnFinish emits the RPC metrics. It only has an effect when operation name
// is not blank, and the span kind is an RPC server.
func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
so.mux.Lock()
defer so.mux.Unlock()
if so.operationName == "" || so.kind != Inbound {
return
}
mets := so.metricsByEndpoint.get(so.operationName)
latency := options.FinishTime.Sub(so.startTime)
if so.err {
mets.RequestCountFailures.Inc(1)
mets.RequestLatencyFailures.Record(latency)
} else {
mets.RequestCountSuccess.Inc(1)
mets.RequestLatencySuccess.Record(latency)
}
mets.recordHTTPStatusCode(so.httpStatusCode)
}
// OnSetOperationName records new operation name.
func (so *SpanObserver) OnSetOperationName(operationName string) {
so.mux.Lock()
so.operationName = operationName
so.mux.Unlock()
}
// OnSetTag implements SpanObserver
func (so *SpanObserver) OnSetTag(key string, value interface{}) {
so.mux.Lock()
so.handleTagInLock(key, value)
so.mux.Unlock()
}

556
vendor/github.com/uber/jaeger-client-go/sampler.go generated vendored Normal file
View File

@ -0,0 +1,556 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"fmt"
"math"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/uber/jaeger-client-go/log"
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
"github.com/uber/jaeger-client-go/utils"
)
const (
defaultSamplingServerURL = "http://localhost:5778/sampling"
defaultSamplingRefreshInterval = time.Minute
defaultMaxOperations = 2000
)
// Sampler decides whether a new trace should be sampled or not.
type Sampler interface {
// IsSampled decides whether a trace with given `id` and `operation`
// should be sampled. This function will also return the tags that
// can be used to identify the type of sampling that was applied to
// the root span. Most simple samplers would return two tags,
// sampler.type and sampler.param, similar to those used in the Configuration
IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
// Close does a clean shutdown of the sampler, stopping any background
// go-routines it may have started.
Close()
// Equal checks if the `other` sampler is functionally equivalent
// to this sampler.
// TODO remove this function. This function is used to determine if 2 samplers are equivalent
// which does not bode well with the adaptive sampler which has to create all the composite samplers
// for the comparison to occur. This is expensive to do if only one sampler has changed.
Equal(other Sampler) bool
}
// -----------------------
// ConstSampler is a sampler that always makes the same decision.
type ConstSampler struct {
Decision bool
tags []Tag
}
// NewConstSampler creates a ConstSampler.
func NewConstSampler(sample bool) Sampler {
tags := []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeConst},
{key: SamplerParamTagKey, value: sample},
}
return &ConstSampler{Decision: sample, tags: tags}
}
// IsSampled implements IsSampled() of Sampler.
func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.Decision, s.tags
}
// Close implements Close() of Sampler.
func (s *ConstSampler) Close() {
// nothing to do
}
// Equal implements Equal() of Sampler.
func (s *ConstSampler) Equal(other Sampler) bool {
if o, ok := other.(*ConstSampler); ok {
return s.Decision == o.Decision
}
return false
}
// -----------------------
// ProbabilisticSampler is a sampler that randomly samples a certain percentage
// of traces.
type ProbabilisticSampler struct {
samplingRate float64
samplingBoundary uint64
tags []Tag
}
const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
// samplingRate, in the range between 0.0 and 1.0.
//
// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
// TODO remove the error from this function for next major release
func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
if samplingRate < 0.0 || samplingRate > 1.0 {
return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
}
return newProbabilisticSampler(samplingRate), nil
}
func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
tags := []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
{key: SamplerParamTagKey, value: samplingRate},
}
return &ProbabilisticSampler{
samplingRate: samplingRate,
samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate),
tags: tags,
}
}
// SamplingRate returns the sampling probability this sampled was constructed with.
func (s *ProbabilisticSampler) SamplingRate() float64 {
return s.samplingRate
}
// IsSampled implements IsSampled() of Sampler.
func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.samplingBoundary >= id.Low, s.tags
}
// Close implements Close() of Sampler.
func (s *ProbabilisticSampler) Close() {
// nothing to do
}
// Equal implements Equal() of Sampler.
func (s *ProbabilisticSampler) Equal(other Sampler) bool {
if o, ok := other.(*ProbabilisticSampler); ok {
return s.samplingBoundary == o.samplingBoundary
}
return false
}
// -----------------------
type rateLimitingSampler struct {
maxTracesPerSecond float64
rateLimiter utils.RateLimiter
tags []Tag
}
// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled
// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those
// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of
// sequential requests can be sampled each second.
func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler {
tags := []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
{key: SamplerParamTagKey, value: maxTracesPerSecond},
}
return &rateLimitingSampler{
maxTracesPerSecond: maxTracesPerSecond,
rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)),
tags: tags,
}
}
// IsSampled implements IsSampled() of Sampler.
func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.rateLimiter.CheckCredit(1.0), s.tags
}
func (s *rateLimitingSampler) Close() {
// nothing to do
}
func (s *rateLimitingSampler) Equal(other Sampler) bool {
if o, ok := other.(*rateLimitingSampler); ok {
return s.maxTracesPerSecond == o.maxTracesPerSecond
}
return false
}
// -----------------------
// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and
// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that
// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
//
// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
// samplers return true, the tags for probabilisticSampler will be used.
type GuaranteedThroughputProbabilisticSampler struct {
probabilisticSampler *ProbabilisticSampler
lowerBoundSampler Sampler
tags []Tag
samplingRate float64
lowerBound float64
}
// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
// probabilisticSampler and rateLimitingSampler.
func NewGuaranteedThroughputProbabilisticSampler(
lowerBound, samplingRate float64,
) (*GuaranteedThroughputProbabilisticSampler, error) {
return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
}
func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
s := &GuaranteedThroughputProbabilisticSampler{
lowerBoundSampler: NewRateLimitingSampler(lowerBound),
lowerBound: lowerBound,
}
s.setProbabilisticSampler(samplingRate)
return s
}
func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
if s.probabilisticSampler == nil || s.samplingRate != samplingRate {
s.probabilisticSampler = newProbabilisticSampler(samplingRate)
s.samplingRate = s.probabilisticSampler.SamplingRate()
s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
{key: SamplerParamTagKey, value: s.samplingRate},
}
}
}
// IsSampled implements IsSampled() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
s.lowerBoundSampler.IsSampled(id, operation)
return true, tags
}
sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
return sampled, s.tags
}
// Close implements Close() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) Close() {
s.probabilisticSampler.Close()
s.lowerBoundSampler.Close()
}
// Equal implements Equal() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
// NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
// more information.
return false
}
// this function should only be called while holding a Write lock
func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
s.setProbabilisticSampler(samplingRate)
if s.lowerBound != lowerBound {
s.lowerBoundSampler = NewRateLimitingSampler(lowerBound)
s.lowerBound = lowerBound
}
}
// -----------------------
type adaptiveSampler struct {
sync.RWMutex
samplers map[string]*GuaranteedThroughputProbabilisticSampler
defaultSampler *ProbabilisticSampler
lowerBound float64
maxOperations int
}
// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and
// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all
// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler.
func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) {
return newAdaptiveSampler(strategies, maxOperations), nil
}
func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler {
samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
for _, strategy := range strategies.PerOperationStrategies {
sampler := newGuaranteedThroughputProbabilisticSampler(
strategies.DefaultLowerBoundTracesPerSecond,
strategy.ProbabilisticSampling.SamplingRate,
)
samplers[strategy.Operation] = sampler
}
return &adaptiveSampler{
samplers: samplers,
defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability),
lowerBound: strategies.DefaultLowerBoundTracesPerSecond,
maxOperations: maxOperations,
}
}
func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
s.RLock()
sampler, ok := s.samplers[operation]
if ok {
defer s.RUnlock()
return sampler.IsSampled(id, operation)
}
s.RUnlock()
s.Lock()
defer s.Unlock()
// Check if sampler has already been created
sampler, ok = s.samplers[operation]
if ok {
return sampler.IsSampled(id, operation)
}
// Store only up to maxOperations of unique ops.
if len(s.samplers) >= s.maxOperations {
return s.defaultSampler.IsSampled(id, operation)
}
newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
s.samplers[operation] = newSampler
return newSampler.IsSampled(id, operation)
}
func (s *adaptiveSampler) Close() {
s.Lock()
defer s.Unlock()
for _, sampler := range s.samplers {
sampler.Close()
}
s.defaultSampler.Close()
}
func (s *adaptiveSampler) Equal(other Sampler) bool {
// NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple
// samplers which all need to be initialized before this function can be called for a comparison.
// Therefore, adaptiveSampler uses the update() function to only alter the samplers that need
// changing. Hence this function always returns false so that the update function can be called.
// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
return false
}
func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
s.Lock()
defer s.Unlock()
for _, strategy := range strategies.PerOperationStrategies {
operation := strategy.Operation
samplingRate := strategy.ProbabilisticSampling.SamplingRate
lowerBound := strategies.DefaultLowerBoundTracesPerSecond
if sampler, ok := s.samplers[operation]; ok {
sampler.update(lowerBound, samplingRate)
} else {
sampler := newGuaranteedThroughputProbabilisticSampler(
lowerBound,
samplingRate,
)
s.samplers[operation] = sampler
}
}
s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
}
}
// -----------------------
// RemotelyControlledSampler is a delegating sampler that polls a remote server
// for the appropriate sampling strategy, constructs a corresponding sampler and
// delegates to it for sampling decisions.
type RemotelyControlledSampler struct {
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
closed int64 // 0 - not closed, 1 - closed
sync.RWMutex
samplerOptions
serviceName string
manager sampling.SamplingManager
doneChan chan *sync.WaitGroup
}
type httpSamplingManager struct {
serverURL string
}
func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) {
var out sampling.SamplingStrategyResponse
v := url.Values{}
v.Set("service", serviceName)
if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil {
return nil, err
}
return &out, nil
}
// NewRemotelyControlledSampler creates a sampler that periodically pulls
// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
func NewRemotelyControlledSampler(
serviceName string,
opts ...SamplerOption,
) *RemotelyControlledSampler {
options := applySamplerOptions(opts...)
sampler := &RemotelyControlledSampler{
samplerOptions: options,
serviceName: serviceName,
manager: &httpSamplingManager{serverURL: options.samplingServerURL},
doneChan: make(chan *sync.WaitGroup),
}
go sampler.pollController()
return sampler
}
func applySamplerOptions(opts ...SamplerOption) samplerOptions {
options := samplerOptions{}
for _, option := range opts {
option(&options)
}
if options.sampler == nil {
options.sampler = newProbabilisticSampler(0.001)
}
if options.logger == nil {
options.logger = log.NullLogger
}
if options.maxOperations <= 0 {
options.maxOperations = defaultMaxOperations
}
if options.samplingServerURL == "" {
options.samplingServerURL = defaultSamplingServerURL
}
if options.metrics == nil {
options.metrics = NewNullMetrics()
}
if options.samplingRefreshInterval <= 0 {
options.samplingRefreshInterval = defaultSamplingRefreshInterval
}
return options
}
// IsSampled implements IsSampled() of Sampler.
func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
s.RLock()
defer s.RUnlock()
return s.sampler.IsSampled(id, operation)
}
// Close implements Close() of Sampler.
func (s *RemotelyControlledSampler) Close() {
if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
s.logger.Error("Repeated attempt to close the sampler is ignored")
return
}
var wg sync.WaitGroup
wg.Add(1)
s.doneChan <- &wg
wg.Wait()
}
// Equal implements Equal() of Sampler.
func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
// NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
// more information.
if o, ok := other.(*RemotelyControlledSampler); ok {
s.RLock()
o.RLock()
defer s.RUnlock()
defer o.RUnlock()
return s.sampler.Equal(o.sampler)
}
return false
}
func (s *RemotelyControlledSampler) pollController() {
ticker := time.NewTicker(s.samplingRefreshInterval)
defer ticker.Stop()
s.pollControllerWithTicker(ticker)
}
func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
for {
select {
case <-ticker.C:
s.updateSampler()
case wg := <-s.doneChan:
wg.Done()
return
}
}
}
func (s *RemotelyControlledSampler) getSampler() Sampler {
s.Lock()
defer s.Unlock()
return s.sampler
}
func (s *RemotelyControlledSampler) setSampler(sampler Sampler) {
s.Lock()
defer s.Unlock()
s.sampler = sampler
}
func (s *RemotelyControlledSampler) updateSampler() {
res, err := s.manager.GetSamplingStrategy(s.serviceName)
if err != nil {
s.metrics.SamplerQueryFailure.Inc(1)
return
}
s.Lock()
defer s.Unlock()
s.metrics.SamplerRetrieved.Inc(1)
if strategies := res.GetOperationSampling(); strategies != nil {
s.updateAdaptiveSampler(strategies)
} else {
err = s.updateRateLimitingOrProbabilisticSampler(res)
}
if err != nil {
s.metrics.SamplerUpdateFailure.Inc(1)
s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err)
return
}
s.metrics.SamplerUpdated.Inc(1)
}
// NB: this function should only be called while holding a Write lock
func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) {
if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok {
adaptiveSampler.update(strategies)
} else {
s.sampler = newAdaptiveSampler(strategies, s.maxOperations)
}
}
// NB: this function should only be called while holding a Write lock
func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error {
var newSampler Sampler
if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil {
newSampler = newProbabilisticSampler(probabilistic.SamplingRate)
} else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil {
newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond))
} else {
return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType())
}
if !s.sampler.Equal(newSampler) {
s.sampler = newSampler
}
return nil
}

View File

@ -0,0 +1,81 @@
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"time"
)
// SamplerOption is a function that sets some option on the sampler
type SamplerOption func(options *samplerOptions)
// SamplerOptions is a factory for all available SamplerOption's
var SamplerOptions samplerOptions
type samplerOptions struct {
metrics *Metrics
maxOperations int
sampler Sampler
logger Logger
samplingServerURL string
samplingRefreshInterval time.Duration
}
// Metrics creates a SamplerOption that initializes Metrics on the sampler,
// which is used to emit statistics.
func (samplerOptions) Metrics(m *Metrics) SamplerOption {
return func(o *samplerOptions) {
o.metrics = m
}
}
// MaxOperations creates a SamplerOption that sets the maximum number of
// operations the sampler will keep track of.
func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {
return func(o *samplerOptions) {
o.maxOperations = maxOperations
}
}
// InitialSampler creates a SamplerOption that sets the initial sampler
// to use before a remote sampler is created and used.
func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption {
return func(o *samplerOptions) {
o.sampler = sampler
}
}
// Logger creates a SamplerOption that sets the logger used by the sampler.
func (samplerOptions) Logger(logger Logger) SamplerOption {
return func(o *samplerOptions) {
o.logger = logger
}
}
// SamplingServerURL creates a SamplerOption that sets the sampling server url
// of the local agent that contains the sampling strategies.
func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption {
return func(o *samplerOptions) {
o.samplingServerURL = samplingServerURL
}
}
// SamplingRefreshInterval creates a SamplerOption that sets how often the
// sampler will poll local agent for the appropriate sampling strategy.
func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
return func(o *samplerOptions) {
o.samplingRefreshInterval = samplingRefreshInterval
}
}

249
vendor/github.com/uber/jaeger-client-go/span.go generated vendored Normal file
View File

@ -0,0 +1,249 @@
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jaeger
import (
"sync"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
)
// Span implements opentracing.Span
type Span struct {
sync.RWMutex
tracer *Tracer
context SpanContext
// The name of the "operation" this span is an instance of.
// Known as a "span name" in some implementations.
operationName string
// firstInProcess, if true, indicates that this span is the root of the (sub)tree
// of spans in the current process. In other words it's true for the root spans,
// and the ingress spans when the process joins another trace.
firstInProcess bool
// startTime is the timestamp indicating when the span began, with microseconds precision.
startTime time.Time
// duration returns duration of the span with microseconds precision.
// Zero value means duration is unknown.
duration time.Duration
// tags attached to this span
tags []Tag
// The span's "micro-log"
logs []opentracing.LogRecord
// references for this span
references []Reference
observer ContribSpanObserver
}
// Tag is a simple key value wrapper.
// TODO deprecate in the next major release, use opentracing.Tag instead.
type Tag struct {
key string
value interface{}
}
// SetOperationName sets or changes the operation name.
func (s *Span) SetOperationName(operationName string) opentracing.Span {
s.Lock()
defer s.Unlock()
if s.context.IsSampled() {
s.operationName = operationName
}
s.observer.OnSetOperationName(operationName)
return s
}
// SetTag implements SetTag() of opentracing.Span
func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
s.observer.OnSetTag(key, value)
if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
return s
}
s.Lock()
defer s.Unlock()
if s.context.IsSampled() {
s.setTagNoLocking(key, value)
}
return s
}
func (s *Span) setTagNoLocking(key string, value interface{}) {
s.tags = append(s.tags, Tag{key: key, value: value})
}
// LogFields implements opentracing.Span API
func (s *Span) LogFields(fields ...log.Field) {
s.Lock()
defer s.Unlock()
if !s.context.IsSampled() {
return
}
s.logFieldsNoLocking(fields...)
}
// this function should only be called while holding a Write lock
func (s *Span) logFieldsNoLocking(fields ...log.Field) {
lr := opentracing.LogRecord{
Fields: fields,
Timestamp: time.Now(),
}
s.appendLog(lr)
}
// LogKV implements opentracing.Span API
func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
s.RLock()
sampled := s.context.IsSampled()
s.RUnlock()
if !sampled {
return
}
fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
if err != nil {
s.LogFields(log.Error(err), log.String("function", "LogKV"))
return
}
s.LogFields(fields...)
}
// LogEvent implements opentracing.Span API
func (s *Span) LogEvent(event string) {
s.Log(opentracing.LogData{Event: event})
}
// LogEventWithPayload implements opentracing.Span API
func (s *Span) LogEventWithPayload(event string, payload interface{}) {
s.Log(opentracing.LogData{Event: event, Payload: payload})
}
// Log implements opentracing.Span API
func (s *Span) Log(ld opentracing.LogData) {
s.Lock()
defer s.Unlock()
if s.context.IsSampled() {
if ld.Timestamp.IsZero() {
ld.Timestamp = s.tracer.timeNow()
}
s.appendLog(ld.ToLogRecord())
}
}
// this function should only be called while holding a Write lock
func (s *Span) appendLog(lr opentracing.LogRecord) {
// TODO add logic to limit number of logs per span (issue #46)
s.logs = append(s.logs, lr)
}
// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
s.Lock()
defer s.Unlock()
s.tracer.setBaggage(s, key, value)
return s
}
// BaggageItem implements BaggageItem() of opentracing.SpanContext
func (s *Span) BaggageItem(key string) string {
s.RLock()
defer s.RUnlock()
return s.context.baggage[key]
}
// Finish implements opentracing.Span API
func (s *Span) Finish() {
s.FinishWithOptions(opentracing.FinishOptions{})
}
// FinishWithOptions implements opentracing.Span API
func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
if options.FinishTime.IsZero() {
options.FinishTime = s.tracer.timeNow()
}
s.observer.OnFinish(options)
s.Lock()
if s.context.IsSampled() {
s.duration = options.FinishTime.Sub(s.startTime)
// Note: bulk logs are not subject to maxLogsPerSpan limit
if options.LogRecords != nil {
s.logs = append(s.logs, options.LogRecords...)
}
for _, ld := range options.BulkLogData {
s.logs = append(s.logs, ld.ToLogRecord())
}
}
s.Unlock()
// call reportSpan even for non-sampled traces, to return span to the pool
s.tracer.reportSpan(s)
}
// Context implements opentracing.Span API
func (s *Span) Context() opentracing.SpanContext {
s.Lock()
defer s.Unlock()
return s.context
}
// Tracer implements opentracing.Span API
func (s *Span) Tracer() opentracing.Tracer {
return s.tracer
}
func (s *Span) String() string {
s.RLock()
defer s.RUnlock()
return s.context.String()
}
// OperationName allows retrieving current operation name.
func (s *Span) OperationName() string {
s.RLock()
defer s.RUnlock()
return s.operationName
}
func (s *Span) serviceName() string {
return s.tracer.serviceName
}
// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
func setSamplingPriority(s *Span, value interface{}) bool {
s.Lock()
defer s.Unlock()
val, ok := value.(uint16)
if !ok {
return false
}
if val == 0 {
s.context.flags = s.context.flags & (^flagSampled)
return true
}
if s.tracer.isDebugAllowed(s.operationName) {
s.context.flags = s.context.flags | flagDebug | flagSampled
return true
}
return false
}

View File

@ -0,0 +1,411 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package agent
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
type Agent interface {
// Parameters:
// - Spans
EmitZipkinBatch(spans []*zipkincore.Span) (err error)
// Parameters:
// - Batch
EmitBatch(batch *jaeger.Batch) (err error)
}
type AgentClient struct {
Transport thrift.TTransport
ProtocolFactory thrift.TProtocolFactory
InputProtocol thrift.TProtocol
OutputProtocol thrift.TProtocol
SeqId int32
}
func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
return &AgentClient{Transport: t,
ProtocolFactory: f,
InputProtocol: f.GetProtocol(t),
OutputProtocol: f.GetProtocol(t),
SeqId: 0,
}
}
func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
return &AgentClient{Transport: t,
ProtocolFactory: nil,
InputProtocol: iprot,
OutputProtocol: oprot,
SeqId: 0,
}
}
// Parameters:
// - Spans
func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) {
if err = p.sendEmitZipkinBatch(spans); err != nil {
return
}
return
}
func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil {
return
}
args := AgentEmitZipkinBatchArgs{
Spans: spans,
}
if err = args.Write(oprot); err != nil {
return
}
if err = oprot.WriteMessageEnd(); err != nil {
return
}
return oprot.Flush()
}
// Parameters:
// - Batch
func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) {
if err = p.sendEmitBatch(batch); err != nil {
return
}
return
}
func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
return
}
args := AgentEmitBatchArgs{
Batch: batch,
}
if err = args.Write(oprot); err != nil {
return
}
if err = oprot.WriteMessageEnd(); err != nil {
return
}
return oprot.Flush()
}
type AgentProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler Agent
}
func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewAgentProcessor(handler Agent) *AgentProcessor {
self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
return self0
}
func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return false, err
}
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(seqId, iprot, oprot)
}
iprot.Skip(thrift.STRUCT)
iprot.ReadMessageEnd()
x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
x1.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, x1
}
type agentProcessorEmitZipkinBatch struct {
handler Agent
}
func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := AgentEmitZipkinBatchArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
return false, err
}
iprot.ReadMessageEnd()
var err2 error
if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil {
return true, err2
}
return true, nil
}
type agentProcessorEmitBatch struct {
handler Agent
}
func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := AgentEmitBatchArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
return false, err
}
iprot.ReadMessageEnd()
var err2 error
if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
return true, err2
}
return true, nil
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - Spans
type AgentEmitZipkinBatchArgs struct {
Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"`
}
func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
return &AgentEmitZipkinBatchArgs{}
}
func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
return p.Spans
}
func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*zipkincore.Span, 0, size)
p.Spans = tSlice
for i := 0; i < size; i++ {
_elem2 := &zipkincore.Span{}
if err := _elem2.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
}
p.Spans = append(p.Spans, _elem2)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
}
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Spans {
if err := v.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
}
return err
}
func (p *AgentEmitZipkinBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
}
// Attributes:
// - Batch
type AgentEmitBatchArgs struct {
Batch *jaeger.Batch `thrift:"batch,1" json:"batch"`
}
func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
return &AgentEmitBatchArgs{}
}
var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
if !p.IsSetBatch() {
return AgentEmitBatchArgs_Batch_DEFAULT
}
return p.Batch
}
func (p *AgentEmitBatchArgs) IsSetBatch() bool {
return p.Batch != nil
}
func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
p.Batch = &jaeger.Batch{}
if err := p.Batch.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
}
return nil
}
func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
}
if err := p.Batch.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
}
return err
}
func (p *AgentEmitBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
}

View File

@ -0,0 +1,23 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package agent
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
func init() {
}

View File

@ -0,0 +1,21 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package agent
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
var _ = jaeger.GoUnusedProtection__
var _ = zipkincore.GoUnusedProtection__
var GoUnusedProtection__ int

View File

@ -0,0 +1,435 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package baggage
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
type BaggageRestrictionManager interface {
// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
// Usually, baggageRestrictions apply to all services however there may be situations
// where a baggageKey might only be allowed to be set by a specific service.
//
// Parameters:
// - ServiceName
GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error)
}
type BaggageRestrictionManagerClient struct {
Transport thrift.TTransport
ProtocolFactory thrift.TProtocolFactory
InputProtocol thrift.TProtocol
OutputProtocol thrift.TProtocol
SeqId int32
}
func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
return &BaggageRestrictionManagerClient{Transport: t,
ProtocolFactory: f,
InputProtocol: f.GetProtocol(t),
OutputProtocol: f.GetProtocol(t),
SeqId: 0,
}
}
func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
return &BaggageRestrictionManagerClient{Transport: t,
ProtocolFactory: nil,
InputProtocol: iprot,
OutputProtocol: oprot,
SeqId: 0,
}
}
// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
// Usually, baggageRestrictions apply to all services however there may be situations
// where a baggageKey might only be allowed to be set by a specific service.
//
// Parameters:
// - ServiceName
func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) {
if err = p.sendGetBaggageRestrictions(serviceName); err != nil {
return
}
return p.recvGetBaggageRestrictions()
}
func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil {
return
}
args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{
ServiceName: serviceName,
}
if err = args.Write(oprot); err != nil {
return
}
if err = oprot.WriteMessageEnd(); err != nil {
return
}
return oprot.Flush()
}
func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.InputProtocol = iprot
}
method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
if method != "getBaggageRestrictions" {
err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name")
return
}
if p.SeqId != seqId {
err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
var error1 error
error1, err = error0.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
err = error1
return
}
if mTypeId != thrift.REPLY {
err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type")
return
}
result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
if err = result.Read(iprot); err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
value = result.GetSuccess()
return
}
type BaggageRestrictionManagerProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler BaggageRestrictionManager
}
func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler}
return self2
}
func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return false, err
}
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(seqId, iprot, oprot)
}
iprot.Skip(thrift.STRUCT)
iprot.ReadMessageEnd()
x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
x3.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, x3
}
type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
handler BaggageRestrictionManager
}
func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, err
}
iprot.ReadMessageEnd()
result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
var retval []*BaggageRestriction
var err2 error
if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil {
x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error())
oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return true, err2
} else {
result.Success = retval
}
if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.Flush(); err == nil && err2 != nil {
err = err2
}
if err != nil {
return
}
return true, err
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - ServiceName
type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
ServiceName string `thrift:"serviceName,1" json:"serviceName"`
}
func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
return p.ServiceName
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.ServiceName = v
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
}
if err := oprot.WriteString(string(p.ServiceName)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
}
return err
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
}
// Attributes:
// - Success
type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"`
}
func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
}
var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
return p.Success
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
return p.Success != nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 0:
if err := p.readField0(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*BaggageRestriction, 0, size)
p.Success = tSlice
for i := 0; i < size; i++ {
_elem4 := &BaggageRestriction{}
if err := _elem4.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
}
p.Success = append(p.Success, _elem4)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField0(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
}
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Success {
if err := v.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
}
}
return err
}
func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
}

View File

@ -0,0 +1,18 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package baggage
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
func init() {
}

View File

@ -0,0 +1,154 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package baggage
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
var GoUnusedProtection__ int
// Attributes:
// - BaggageKey
// - MaxValueLength
type BaggageRestriction struct {
BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"`
MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"`
}
func NewBaggageRestriction() *BaggageRestriction {
return &BaggageRestriction{}
}
func (p *BaggageRestriction) GetBaggageKey() string {
return p.BaggageKey
}
func (p *BaggageRestriction) GetMaxValueLength() int32 {
return p.MaxValueLength
}
func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
var issetBaggageKey bool = false
var issetMaxValueLength bool = false
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
issetBaggageKey = true
case 2:
if err := p.readField2(iprot); err != nil {
return err
}
issetMaxValueLength = true
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
if !issetBaggageKey {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"))
}
if !issetMaxValueLength {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"))
}
return nil
}
func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.BaggageKey = v
}
return nil
}
func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error {
if v, err := iprot.ReadI32(); err != nil {
return thrift.PrependError("error reading field 2: ", err)
} else {
p.MaxValueLength = v
}
return nil
}
func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := p.writeField2(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err)
}
if err := oprot.WriteString(string(p.BaggageKey)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err)
}
return err
}
func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err)
}
if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err)
}
return err
}
func (p *BaggageRestriction) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("BaggageRestriction(%+v)", *p)
}

View File

@ -0,0 +1,242 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package jaeger
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
type Agent interface {
// Parameters:
// - Batch
EmitBatch(batch *Batch) (err error)
}
type AgentClient struct {
Transport thrift.TTransport
ProtocolFactory thrift.TProtocolFactory
InputProtocol thrift.TProtocol
OutputProtocol thrift.TProtocol
SeqId int32
}
func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
return &AgentClient{Transport: t,
ProtocolFactory: f,
InputProtocol: f.GetProtocol(t),
OutputProtocol: f.GetProtocol(t),
SeqId: 0,
}
}
func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
return &AgentClient{Transport: t,
ProtocolFactory: nil,
InputProtocol: iprot,
OutputProtocol: oprot,
SeqId: 0,
}
}
// Parameters:
// - Batch
func (p *AgentClient) EmitBatch(batch *Batch) (err error) {
if err = p.sendEmitBatch(batch); err != nil {
return
}
return
}
func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil {
return
}
args := AgentEmitBatchArgs{
Batch: batch,
}
if err = args.Write(oprot); err != nil {
return
}
if err = oprot.WriteMessageEnd(); err != nil {
return
}
return oprot.Flush()
}
type AgentProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler Agent
}
func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewAgentProcessor(handler Agent) *AgentProcessor {
self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
return self6
}
func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return false, err
}
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(seqId, iprot, oprot)
}
iprot.Skip(thrift.STRUCT)
iprot.ReadMessageEnd()
x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
x7.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, x7
}
type agentProcessorEmitBatch struct {
handler Agent
}
func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := AgentEmitBatchArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
return false, err
}
iprot.ReadMessageEnd()
var err2 error
if err2 = p.handler.EmitBatch(args.Batch); err2 != nil {
return true, err2
}
return true, nil
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - Batch
type AgentEmitBatchArgs struct {
Batch *Batch `thrift:"batch,1" json:"batch"`
}
func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
return &AgentEmitBatchArgs{}
}
var AgentEmitBatchArgs_Batch_DEFAULT *Batch
func (p *AgentEmitBatchArgs) GetBatch() *Batch {
if !p.IsSetBatch() {
return AgentEmitBatchArgs_Batch_DEFAULT
}
return p.Batch
}
func (p *AgentEmitBatchArgs) IsSetBatch() bool {
return p.Batch != nil
}
func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error {
p.Batch = &Batch{}
if err := p.Batch.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
}
return nil
}
func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("emitBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
}
if err := p.Batch.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
}
return err
}
func (p *AgentEmitBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
}

View File

@ -0,0 +1,18 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package jaeger
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
func init() {
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package sampling
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
func init() {
}

View File

@ -0,0 +1,410 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package sampling
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
type SamplingManager interface {
// Parameters:
// - ServiceName
GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error)
}
type SamplingManagerClient struct {
Transport thrift.TTransport
ProtocolFactory thrift.TProtocolFactory
InputProtocol thrift.TProtocol
OutputProtocol thrift.TProtocol
SeqId int32
}
func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
return &SamplingManagerClient{Transport: t,
ProtocolFactory: f,
InputProtocol: f.GetProtocol(t),
OutputProtocol: f.GetProtocol(t),
SeqId: 0,
}
}
func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
return &SamplingManagerClient{Transport: t,
ProtocolFactory: nil,
InputProtocol: iprot,
OutputProtocol: oprot,
SeqId: 0,
}
}
// Parameters:
// - ServiceName
func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) {
if err = p.sendGetSamplingStrategy(serviceName); err != nil {
return
}
return p.recvGetSamplingStrategy()
}
func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil {
return
}
args := SamplingManagerGetSamplingStrategyArgs{
ServiceName: serviceName,
}
if err = args.Write(oprot); err != nil {
return
}
if err = oprot.WriteMessageEnd(); err != nil {
return
}
return oprot.Flush()
}
func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.InputProtocol = iprot
}
method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
if method != "getSamplingStrategy" {
err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name")
return
}
if p.SeqId != seqId {
err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
var error2 error
error2, err = error1.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
err = error2
return
}
if mTypeId != thrift.REPLY {
err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type")
return
}
result := SamplingManagerGetSamplingStrategyResult{}
if err = result.Read(iprot); err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
value = result.GetSuccess()
return
}
type SamplingManagerProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler SamplingManager
}
func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler}
return self3
}
func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return false, err
}
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(seqId, iprot, oprot)
}
iprot.Skip(thrift.STRUCT)
iprot.ReadMessageEnd()
x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
x4.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, x4
}
type samplingManagerProcessorGetSamplingStrategy struct {
handler SamplingManager
}
func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := SamplingManagerGetSamplingStrategyArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, err
}
iprot.ReadMessageEnd()
result := SamplingManagerGetSamplingStrategyResult{}
var retval *SamplingStrategyResponse
var err2 error
if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil {
x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error())
oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return true, err2
} else {
result.Success = retval
}
if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.Flush(); err == nil && err2 != nil {
err = err2
}
if err != nil {
return
}
return true, err
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - ServiceName
type SamplingManagerGetSamplingStrategyArgs struct {
ServiceName string `thrift:"serviceName,1" json:"serviceName"`
}
func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
return &SamplingManagerGetSamplingStrategyArgs{}
}
func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
return p.ServiceName
}
func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.ServiceName = v
}
return nil
}
func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
}
if err := oprot.WriteString(string(p.ServiceName)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
}
return err
}
func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
}
// Attributes:
// - Success
type SamplingManagerGetSamplingStrategyResult struct {
Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"`
}
func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
return &SamplingManagerGetSamplingStrategyResult{}
}
var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
if !p.IsSetSuccess() {
return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
}
return p.Success
}
func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
return p.Success != nil
}
func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 0:
if err := p.readField0(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error {
p.Success = &SamplingStrategyResponse{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField0(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
}
if err := p.Success.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
}
}
return err
}
func (p *SamplingManagerGetSamplingStrategyResult) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
}

View File

@ -0,0 +1,873 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package sampling
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
var GoUnusedProtection__ int
type SamplingStrategyType int64
const (
SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
)
func (p SamplingStrategyType) String() string {
switch p {
case SamplingStrategyType_PROBABILISTIC:
return "PROBABILISTIC"
case SamplingStrategyType_RATE_LIMITING:
return "RATE_LIMITING"
}
return "<UNSET>"
}
func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
switch s {
case "PROBABILISTIC":
return SamplingStrategyType_PROBABILISTIC, nil
case "RATE_LIMITING":
return SamplingStrategyType_RATE_LIMITING, nil
}
return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
}
func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
func (p SamplingStrategyType) MarshalText() ([]byte, error) {
return []byte(p.String()), nil
}
func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
q, err := SamplingStrategyTypeFromString(string(text))
if err != nil {
return err
}
*p = q
return nil
}
// Attributes:
// - SamplingRate
type ProbabilisticSamplingStrategy struct {
SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"`
}
func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
return &ProbabilisticSamplingStrategy{}
}
func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
return p.SamplingRate
}
func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
var issetSamplingRate bool = false
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
issetSamplingRate = true
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
if !issetSamplingRate {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"))
}
return nil
}
func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadDouble(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.SamplingRate = v
}
return nil
}
func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err)
}
if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err)
}
return err
}
func (p *ProbabilisticSamplingStrategy) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
}
// Attributes:
// - MaxTracesPerSecond
type RateLimitingSamplingStrategy struct {
MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"`
}
func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
return &RateLimitingSamplingStrategy{}
}
func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
return p.MaxTracesPerSecond
}
func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
var issetMaxTracesPerSecond bool = false
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
issetMaxTracesPerSecond = true
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
if !issetMaxTracesPerSecond {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"))
}
return nil
}
func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadI16(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.MaxTracesPerSecond = v
}
return nil
}
func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err)
}
if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err)
}
return err
}
func (p *RateLimitingSamplingStrategy) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
}
// Attributes:
// - Operation
// - ProbabilisticSampling
type OperationSamplingStrategy struct {
Operation string `thrift:"operation,1,required" json:"operation"`
ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"`
}
func NewOperationSamplingStrategy() *OperationSamplingStrategy {
return &OperationSamplingStrategy{}
}
func (p *OperationSamplingStrategy) GetOperation() string {
return p.Operation
}
var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
if !p.IsSetProbabilisticSampling() {
return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
}
return p.ProbabilisticSampling
}
func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
return p.ProbabilisticSampling != nil
}
func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
var issetOperation bool = false
var issetProbabilisticSampling bool = false
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
issetOperation = true
case 2:
if err := p.readField2(iprot); err != nil {
return err
}
issetProbabilisticSampling = true
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
if !issetOperation {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"))
}
if !issetProbabilisticSampling {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"))
}
return nil
}
func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadString(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.Operation = v
}
return nil
}
func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error {
p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
if err := p.ProbabilisticSampling.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
}
return nil
}
func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := p.writeField2(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err)
}
if err := oprot.WriteString(string(p.Operation)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err)
}
return err
}
func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
}
if err := p.ProbabilisticSampling.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
}
return err
}
func (p *OperationSamplingStrategy) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
}
// Attributes:
// - DefaultSamplingProbability
// - DefaultLowerBoundTracesPerSecond
// - PerOperationStrategies
// - DefaultUpperBoundTracesPerSecond
type PerOperationSamplingStrategies struct {
DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"`
DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"`
PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"`
DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"`
}
func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
return &PerOperationSamplingStrategies{}
}
func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
return p.DefaultSamplingProbability
}
func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
return p.DefaultLowerBoundTracesPerSecond
}
func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
return p.PerOperationStrategies
}
var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
if !p.IsSetDefaultUpperBoundTracesPerSecond() {
return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
}
return *p.DefaultUpperBoundTracesPerSecond
}
func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
return p.DefaultUpperBoundTracesPerSecond != nil
}
func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
var issetDefaultSamplingProbability bool = false
var issetDefaultLowerBoundTracesPerSecond bool = false
var issetPerOperationStrategies bool = false
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
issetDefaultSamplingProbability = true
case 2:
if err := p.readField2(iprot); err != nil {
return err
}
issetDefaultLowerBoundTracesPerSecond = true
case 3:
if err := p.readField3(iprot); err != nil {
return err
}
issetPerOperationStrategies = true
case 4:
if err := p.readField4(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
if !issetDefaultSamplingProbability {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"))
}
if !issetDefaultLowerBoundTracesPerSecond {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"))
}
if !issetPerOperationStrategies {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"))
}
return nil
}
func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadDouble(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
p.DefaultSamplingProbability = v
}
return nil
}
func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error {
if v, err := iprot.ReadDouble(); err != nil {
return thrift.PrependError("error reading field 2: ", err)
} else {
p.DefaultLowerBoundTracesPerSecond = v
}
return nil
}
func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*OperationSamplingStrategy, 0, size)
p.PerOperationStrategies = tSlice
for i := 0; i < size; i++ {
_elem0 := &OperationSamplingStrategy{}
if err := _elem0.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
}
p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error {
if v, err := iprot.ReadDouble(); err != nil {
return thrift.PrependError("error reading field 4: ", err)
} else {
p.DefaultUpperBoundTracesPerSecond = &v
}
return nil
}
func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := p.writeField2(oprot); err != nil {
return err
}
if err := p.writeField3(oprot); err != nil {
return err
}
if err := p.writeField4(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err)
}
if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err)
}
return err
}
func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err)
}
if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err)
}
return err
}
func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err)
}
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.PerOperationStrategies {
if err := v.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err)
}
return err
}
func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) {
if p.IsSetDefaultUpperBoundTracesPerSecond() {
if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err)
}
if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err)
}
}
return err
}
func (p *PerOperationSamplingStrategies) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
}
// Attributes:
// - StrategyType
// - ProbabilisticSampling
// - RateLimitingSampling
// - OperationSampling
type SamplingStrategyResponse struct {
StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"`
ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"`
RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"`
OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"`
}
func NewSamplingStrategyResponse() *SamplingStrategyResponse {
return &SamplingStrategyResponse{}
}
func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
return p.StrategyType
}
var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
if !p.IsSetProbabilisticSampling() {
return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
}
return p.ProbabilisticSampling
}
var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
if !p.IsSetRateLimitingSampling() {
return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
}
return p.RateLimitingSampling
}
var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
if !p.IsSetOperationSampling() {
return SamplingStrategyResponse_OperationSampling_DEFAULT
}
return p.OperationSampling
}
func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
return p.ProbabilisticSampling != nil
}
func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
return p.RateLimitingSampling != nil
}
func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
return p.OperationSampling != nil
}
func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
var issetStrategyType bool = false
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
issetStrategyType = true
case 2:
if err := p.readField2(iprot); err != nil {
return err
}
case 3:
if err := p.readField3(iprot); err != nil {
return err
}
case 4:
if err := p.readField4(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
if !issetStrategyType {
return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"))
}
return nil
}
func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadI32(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
temp := SamplingStrategyType(v)
p.StrategyType = temp
}
return nil
}
func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error {
p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
if err := p.ProbabilisticSampling.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
}
return nil
}
func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error {
p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
if err := p.RateLimitingSampling.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
}
return nil
}
func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error {
p.OperationSampling = &PerOperationSamplingStrategies{}
if err := p.OperationSampling.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
}
return nil
}
func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := p.writeField2(oprot); err != nil {
return err
}
if err := p.writeField3(oprot); err != nil {
return err
}
if err := p.writeField4(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err)
}
if err := oprot.WriteI32(int32(p.StrategyType)); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err)
}
return err
}
func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) {
if p.IsSetProbabilisticSampling() {
if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err)
}
if err := p.ProbabilisticSampling.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err)
}
}
return err
}
func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) {
if p.IsSetRateLimitingSampling() {
if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err)
}
if err := p.RateLimitingSampling.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err)
}
}
return err
}
func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) {
if p.IsSetOperationSampling() {
if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err)
}
if err := p.OperationSampling.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err)
}
}
return err
}
func (p *SamplingStrategyResponse) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
}

View File

@ -0,0 +1,32 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package zipkincore
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
const CLIENT_SEND = "cs"
const CLIENT_RECV = "cr"
const SERVER_SEND = "ss"
const SERVER_RECV = "sr"
const WIRE_SEND = "ws"
const WIRE_RECV = "wr"
const CLIENT_SEND_FRAGMENT = "csf"
const CLIENT_RECV_FRAGMENT = "crf"
const SERVER_SEND_FRAGMENT = "ssf"
const SERVER_RECV_FRAGMENT = "srf"
const LOCAL_COMPONENT = "lc"
const CLIENT_ADDR = "ca"
const SERVER_ADDR = "sa"
func init() {
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,446 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package zipkincore
import (
"bytes"
"fmt"
"github.com/uber/jaeger-client-go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
type ZipkinCollector interface {
// Parameters:
// - Spans
SubmitZipkinBatch(spans []*Span) (r []*Response, err error)
}
type ZipkinCollectorClient struct {
Transport thrift.TTransport
ProtocolFactory thrift.TProtocolFactory
InputProtocol thrift.TProtocol
OutputProtocol thrift.TProtocol
SeqId int32
}
func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
return &ZipkinCollectorClient{Transport: t,
ProtocolFactory: f,
InputProtocol: f.GetProtocol(t),
OutputProtocol: f.GetProtocol(t),
SeqId: 0,
}
}
func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
return &ZipkinCollectorClient{Transport: t,
ProtocolFactory: nil,
InputProtocol: iprot,
OutputProtocol: oprot,
SeqId: 0,
}
}
// Parameters:
// - Spans
func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) {
if err = p.sendSubmitZipkinBatch(spans); err != nil {
return
}
return p.recvSubmitZipkinBatch()
}
func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil {
return
}
args := ZipkinCollectorSubmitZipkinBatchArgs{
Spans: spans,
}
if err = args.Write(oprot); err != nil {
return
}
if err = oprot.WriteMessageEnd(); err != nil {
return
}
return oprot.Flush()
}
func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.InputProtocol = iprot
}
method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
if method != "submitZipkinBatch" {
err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name")
return
}
if p.SeqId != seqId {
err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
var error3 error
error3, err = error2.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
err = error3
return
}
if mTypeId != thrift.REPLY {
err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type")
return
}
result := ZipkinCollectorSubmitZipkinBatchResult{}
if err = result.Read(iprot); err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
value = result.GetSuccess()
return
}
type ZipkinCollectorProcessor struct {
processorMap map[string]thrift.TProcessorFunction
handler ZipkinCollector
}
func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
p.processorMap[key] = processor
}
func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
processor, ok = p.processorMap[key]
return processor, ok
}
func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
return p.processorMap
}
func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
return self4
}
func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
name, _, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return false, err
}
if processor, ok := p.GetProcessorFunction(name); ok {
return processor.Process(seqId, iprot, oprot)
}
iprot.Skip(thrift.STRUCT)
iprot.ReadMessageEnd()
x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
x5.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, x5
}
type zipkinCollectorProcessorSubmitZipkinBatch struct {
handler ZipkinCollector
}
func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
args := ZipkinCollectorSubmitZipkinBatchArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return false, err
}
iprot.ReadMessageEnd()
result := ZipkinCollectorSubmitZipkinBatchResult{}
var retval []*Response
var err2 error
if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil {
x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return true, err2
} else {
result.Success = retval
}
if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
err = err2
}
if err2 = oprot.Flush(); err == nil && err2 != nil {
err = err2
}
if err != nil {
return
}
return true, err
}
// HELPER FUNCTIONS AND STRUCTURES
// Attributes:
// - Spans
type ZipkinCollectorSubmitZipkinBatchArgs struct {
Spans []*Span `thrift:"spans,1" json:"spans"`
}
func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
return &ZipkinCollectorSubmitZipkinBatchArgs{}
}
func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
return p.Spans
}
func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 1:
if err := p.readField1(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*Span, 0, size)
p.Spans = tSlice
for i := 0; i < size; i++ {
_elem6 := &Span{}
if err := _elem6.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err)
}
p.Spans = append(p.Spans, _elem6)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField1(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
}
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Spans {
if err := v.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
}
return err
}
func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
}
// Attributes:
// - Success
type ZipkinCollectorSubmitZipkinBatchResult struct {
Success []*Response `thrift:"success,0" json:"success,omitempty"`
}
func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
return &ZipkinCollectorSubmitZipkinBatchResult{}
}
var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
return p.Success
}
func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
return p.Success != nil
}
func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
for {
_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
if fieldTypeId == thrift.STOP {
break
}
switch fieldId {
case 0:
if err := p.readField0(iprot); err != nil {
return err
}
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
}
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
return nil
}
func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return thrift.PrependError("error reading list begin: ", err)
}
tSlice := make([]*Response, 0, size)
p.Success = tSlice
for i := 0; i < size; i++ {
_elem7 := &Response{}
if err := _elem7.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err)
}
p.Success = append(p.Success, _elem7)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
}
return nil
}
func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if err := p.writeField0(oprot); err != nil {
return err
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
if err := oprot.WriteStructEnd(); err != nil {
return thrift.PrependError("write struct stop error: ", err)
}
return nil
}
func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
}
if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.Success {
if err := v.Write(oprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
}
}
if err := oprot.WriteListEnd(); err != nil {
return thrift.PrependError("error writing list end: ", err)
}
if err := oprot.WriteFieldEnd(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
}
}
return err
}
func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
}

Some files were not shown because too many files have changed in this diff Show More