Removed old mesos deps.

This commit is contained in:
Guangya Liu 2017-07-10 03:48:15 -04:00
parent 498b034492
commit 9ef82665ab
44 changed files with 72 additions and 67075 deletions

166
Godeps/Godeps.json generated
View File

@ -415,7 +415,7 @@
},
{
"ImportPath": "github.com/codegangsta/negroni",
"Comment": "v0.1-62-g8d75e11",
"Comment": "v0.1.0-62-g8d75e11",
"Rev": "8d75e11374a1928608c906fe745b538483e7aeb2"
},
{
@ -827,12 +827,12 @@
},
{
"ImportPath": "github.com/docker/distribution/digest",
"Comment": "v2.4.0-rc.1-38-gcd27f179",
"Comment": "v2.4.0-rc.1-38-gcd27f17",
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
},
{
"ImportPath": "github.com/docker/distribution/reference",
"Comment": "v2.4.0-rc.1-38-gcd27f179",
"Comment": "v2.4.0-rc.1-38-gcd27f17",
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
},
{
@ -1080,127 +1080,127 @@
},
{
"ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/compare",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/description",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/embedcheck",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/enumstringer",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/equal",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/face",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/gostring",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/marshalto",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/populate",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/size",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/stringer",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/testgen",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/union",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/plugin/unmarshal",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/sortkeys",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
"ImportPath": "github.com/gogo/protobuf/vanity/command",
"Comment": "v0.4-3-gc0656edd",
"Comment": "v0.4-3-gc0656ed",
"Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
},
{
@ -1824,31 +1824,6 @@
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
},
{
"ImportPath": "github.com/mesos/mesos-go/detector",
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/detector/zoo",
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/mesosproto",
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/mesosutil",
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/mesos/mesos-go/upid",
"Comment": "before-0.26-protos-33-g45c8b08",
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
},
{
"ImportPath": "github.com/miekg/coredns/middleware/etcd/msg",
"Comment": "v003",
@ -2081,82 +2056,82 @@
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/keys",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/label",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/selinux",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/utils",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
"Comment": "v1.0.0-rc2-49-gd223e2a",
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
},
{
@ -2165,6 +2140,7 @@
},
{
"ImportPath": "github.com/pelletier/go-buffruneio",
"Comment": "v0.1.0",
"Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d"
},
{
@ -2226,107 +2202,107 @@
},
{
"ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/flavors",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/images",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/servers",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tenants",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tokens",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v3/tokens",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/openstack/utils",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/pagination",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/servers",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/testhelper",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
"ImportPath": "github.com/rackspace/gophercloud/testhelper/client",
"Comment": "v1.0.0-1012-ge00690e8",
"Comment": "v1.0.0-1012-ge00690e",
"Rev": "e00690e87603abe613e9f02c816c7c4bef82e063"
},
{
@ -2353,10 +2329,6 @@
"Comment": "v1.4-2-g300106c",
"Rev": "300106c228d52c8941d4b3de6054a6062a86dda3"
},
{
"ImportPath": "github.com/samuel/go-zookeeper/zk",
"Rev": "177002e16a0061912f02377e2dd8951a8b3551bc"
},
{
"ImportPath": "github.com/seccomp/libseccomp-golang",
"Rev": "1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1"
@ -2572,6 +2544,7 @@
},
{
"ImportPath": "github.com/xiang90/probing",
"Comment": "0.0.1",
"Rev": "07dd2e8dfe18522e9c447ba95f2fe95262f63bb2"
},
{
@ -2878,22 +2851,27 @@
},
{
"ImportPath": "gopkg.in/gcfg.v1",
"Comment": "v1.0.0",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
},
{
"ImportPath": "gopkg.in/gcfg.v1/scanner",
"Comment": "v1.0.0",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
},
{
"ImportPath": "gopkg.in/gcfg.v1/token",
"Comment": "v1.0.0",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
},
{
"ImportPath": "gopkg.in/gcfg.v1/types",
"Comment": "v1.0.0",
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
},
{
"ImportPath": "gopkg.in/inf.v0",
"Comment": "v0.9.0",
"Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
},
{

1073
Godeps/LICENSES generated

File diff suppressed because it is too large Load Diff

5
vendor/BUILD vendored
View File

@ -258,10 +258,6 @@ filegroup(
"//vendor/github.com/mailru/easyjson/jlexer:all-srcs",
"//vendor/github.com/mailru/easyjson/jwriter:all-srcs",
"//vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:all-srcs",
"//vendor/github.com/mesos/mesos-go/detector:all-srcs",
"//vendor/github.com/mesos/mesos-go/mesosproto:all-srcs",
"//vendor/github.com/mesos/mesos-go/mesosutil:all-srcs",
"//vendor/github.com/mesos/mesos-go/upid:all-srcs",
"//vendor/github.com/miekg/coredns/middleware/etcd/msg:all-srcs",
"//vendor/github.com/miekg/dns:all-srcs",
"//vendor/github.com/mistifyio/go-zfs:all-srcs",
@ -292,7 +288,6 @@ filegroup(
"//vendor/github.com/robfig/cron:all-srcs",
"//vendor/github.com/rubiojr/go-vhd/vhd:all-srcs",
"//vendor/github.com/russross/blackfriday:all-srcs",
"//vendor/github.com/samuel/go-zookeeper/zk:all-srcs",
"//vendor/github.com/seccomp/libseccomp-golang:all-srcs",
"//vendor/github.com/shurcooL/sanitized_anchor_name:all-srcs",
"//vendor/github.com/spf13/afero:all-srcs",

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,13 +0,0 @@
Copyright 2013-2015, Mesosphere, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,43 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"factory.go",
"interface.go",
"standalone.go",
],
tags = ["automanaged"],
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/mesos/mesos-go/mesosproto:go_default_library",
"//vendor/github.com/mesos/mesos-go/mesosutil:go_default_library",
"//vendor/github.com/mesos/mesos-go/upid:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/mesos/mesos-go/detector/zoo:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,24 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
The detector package houses implementation of master detectors.
The default implementation is the zookeeper master detector.
It uses zookeeper to detect the lead Mesos master during startup/failover.
*/
package detector

View File

@ -1,155 +0,0 @@
package detector
import (
"encoding/binary"
"errors"
"fmt"
"io/ioutil"
"net"
"strconv"
"strings"
"sync"
"github.com/gogo/protobuf/proto"
log "github.com/golang/glog"
mesos "github.com/mesos/mesos-go/mesosproto"
util "github.com/mesos/mesos-go/mesosutil"
"github.com/mesos/mesos-go/upid"
)
var (
pluginLock sync.Mutex
plugins = map[string]PluginFactory{}
EmptySpecError = errors.New("empty master specification")
defaultFactory = PluginFactory(func(spec string, _ ...Option) (Master, error) {
if len(spec) == 0 {
return nil, EmptySpecError
}
if strings.Index(spec, "@") < 0 {
spec = "master@" + spec
}
if pid, err := upid.Parse(spec); err == nil {
return NewStandalone(CreateMasterInfo(pid)), nil
} else {
return nil, err
}
})
)
type PluginFactory func(string, ...Option) (Master, error)
// associates a plugin implementation with a Master specification prefix.
// packages that provide plugins are expected to invoke this func within
// their init() implementation. schedulers that wish to support plugins may
// anonymously import ("_") a package the auto-registers said plugins.
func Register(prefix string, f PluginFactory) error {
if prefix == "" {
return fmt.Errorf("illegal prefix: '%v'", prefix)
}
if f == nil {
return fmt.Errorf("nil plugin factories are not allowed")
}
pluginLock.Lock()
defer pluginLock.Unlock()
if _, found := plugins[prefix]; found {
return fmt.Errorf("detection plugin already registered for prefix '%s'", prefix)
}
plugins[prefix] = f
return nil
}
// Create a new detector given the provided specification. Examples are:
//
// - file://{path_to_local_file}
// - {ipaddress}:{port}
// - master@{ip_address}:{port}
// - master({id})@{ip_address}:{port}
//
// Support for the file:// prefix is intentionally hardcoded so that it may
// not be inadvertently overridden by a custom plugin implementation. Custom
// plugins are supported via the Register and MatchingPlugin funcs.
//
// Furthermore it is expected that master detectors returned from this func
// are not yet running and will only begin to spawn requisite background
// processing upon, or some time after, the first invocation of their Detect.
//
func New(spec string, options ...Option) (m Master, err error) {
if strings.HasPrefix(spec, "file://") {
var body []byte
path := spec[7:]
body, err = ioutil.ReadFile(path)
if err != nil {
log.V(1).Infof("failed to read from file at '%s'", path)
} else {
m, err = New(string(body), options...)
}
} else if f, ok := MatchingPlugin(spec); ok {
m, err = f(spec, options...)
} else {
m, err = defaultFactory(spec, options...)
}
return
}
func MatchingPlugin(spec string) (PluginFactory, bool) {
pluginLock.Lock()
defer pluginLock.Unlock()
for prefix, f := range plugins {
if strings.HasPrefix(spec, prefix) {
return f, true
}
}
return nil, false
}
// Super-useful utility func that attempts to build a mesos.MasterInfo from a
// upid.UPID specification. An attempt is made to determine the IP address of
// the UPID's Host and any errors during such resolution will result in a nil
// returned result. A nil result is also returned upon errors parsing the Port
// specification of the UPID.
//
// TODO(jdef) make this a func of upid.UPID so that callers can invoke somePid.MasterInfo()?
//
func CreateMasterInfo(pid *upid.UPID) *mesos.MasterInfo {
if pid == nil {
return nil
}
port, err := strconv.Atoi(pid.Port)
if err != nil {
log.Errorf("failed to parse port: %v", err)
return nil
}
//TODO(jdef) what about (future) ipv6 support?
var ipv4 net.IP
if ipv4 = net.ParseIP(pid.Host); ipv4 != nil {
// This is needed for the people cross-compiling from macos to linux.
// The cross-compiled version of net.LookupIP() fails to handle plain IPs.
// See https://github.com/mesos/mesos-go/pull/117
} else if addrs, err := net.LookupIP(pid.Host); err == nil {
for _, ip := range addrs {
if ip = ip.To4(); ip != nil {
ipv4 = ip
break
}
}
if ipv4 == nil {
log.Errorf("host does not resolve to an IPv4 address: %v", pid.Host)
return nil
}
} else {
log.Errorf("failed to lookup IPs for host '%v': %v", pid.Host, err)
return nil
}
packedip := binary.BigEndian.Uint32(ipv4) // network byte order is big-endian
mi := util.NewMasterInfo(pid.ID, packedip, uint32(port))
mi.Pid = proto.String(pid.String())
if pid.Host != "" {
mi.Hostname = proto.String(pid.Host)
}
return mi
}

View File

@ -1,71 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package detector
import (
mesos "github.com/mesos/mesos-go/mesosproto"
)
type MasterChanged interface {
// Invoked when the master changes
OnMasterChanged(*mesos.MasterInfo)
}
// AllMasters defines an optional interface that, if implemented by the same
// struct as implements MasterChanged, will receive an additional callbacks
// independently of leadership changes. it's possible that, as a result of a
// leadership change, both the OnMasterChanged and UpdatedMasters callbacks
// would be invoked.
//
// **NOTE:** Detector implementations are not required to support this optional
// interface. Please RTFM of the detector implementation that you want to use.
type AllMasters interface {
// UpdatedMasters is invoked upon a change in the membership of mesos
// masters, and is useful to clients that wish to know the entire set
// of Mesos masters currently running.
UpdatedMasters([]*mesos.MasterInfo)
}
// func/interface adapter
type OnMasterChanged func(*mesos.MasterInfo)
func (f OnMasterChanged) OnMasterChanged(mi *mesos.MasterInfo) {
f(mi)
}
// An abstraction of a Master detector which can be used to
// detect the leading master from a group.
type Master interface {
// Detect new master election. Every time a new master is elected, the
// detector will alert the observer. The first call to Detect is expected
// to kickstart any background detection processing (and not before then).
// If detection startup fails, or the listener cannot be added, then an
// error is returned.
Detect(MasterChanged) error
// returns a chan that, when closed, indicates the detector has terminated
Done() <-chan struct{}
// cancel the detector. it's ok to call this multiple times, or even if
// Detect() hasn't been invoked yet.
Cancel()
}
// functional option type for detectors
type Option func(interface{}) Option

View File

@ -1,244 +0,0 @@
package detector
import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"strconv"
"sync"
"time"
log "github.com/golang/glog"
mesos "github.com/mesos/mesos-go/mesosproto"
"github.com/mesos/mesos-go/upid"
"golang.org/x/net/context"
)
const (
defaultMesosHttpClientTimeout = 10 * time.Second //TODO(jdef) configurable via fiag?
defaultMesosLeaderSyncInterval = 30 * time.Second //TODO(jdef) configurable via fiag?
defaultMesosMasterPort = 5050
)
// enables easier unit testing
type fetcherFunc func(ctx context.Context, address string) (*upid.UPID, error)
type Standalone struct {
ch chan *mesos.MasterInfo
client *http.Client
tr *http.Transport
pollOnce sync.Once
initial *mesos.MasterInfo
done chan struct{}
cancelOnce sync.Once
leaderSyncInterval time.Duration
httpClientTimeout time.Duration
assumedMasterPort int
poller func(pf fetcherFunc)
fetchPid fetcherFunc
}
// Create a new stand alone master detector.
func NewStandalone(mi *mesos.MasterInfo) *Standalone {
log.V(2).Infof("creating new standalone detector for %+v", mi)
stand := &Standalone{
ch: make(chan *mesos.MasterInfo),
tr: &http.Transport{},
initial: mi,
done: make(chan struct{}),
leaderSyncInterval: defaultMesosLeaderSyncInterval,
httpClientTimeout: defaultMesosHttpClientTimeout,
assumedMasterPort: defaultMesosMasterPort,
}
stand.poller = stand._poller
stand.fetchPid = stand._fetchPid
return stand
}
func (s *Standalone) String() string {
return fmt.Sprintf("{initial: %+v}", s.initial)
}
// Detecting the new master.
func (s *Standalone) Detect(o MasterChanged) error {
log.V(2).Info("Detect()")
s.pollOnce.Do(func() {
log.V(1).Info("spinning up asyc master detector poller")
// delayed initialization allows unit tests to modify timeouts before detection starts
s.client = &http.Client{
Transport: s.tr,
Timeout: s.httpClientTimeout,
}
go s.poller(s.fetchPid)
})
if o != nil {
log.V(1).Info("spawning asyc master detector listener")
go func() {
log.V(2).Infof("waiting for polled to send updates")
pollWaiter:
for {
select {
case mi, ok := <-s.ch:
if !ok {
break pollWaiter
}
log.V(1).Infof("detected master change: %+v", mi)
o.OnMasterChanged(mi)
case <-s.done:
return
}
}
o.OnMasterChanged(nil)
}()
} else {
log.Warningf("detect called with a nil master change listener")
}
return nil
}
func (s *Standalone) Done() <-chan struct{} {
return s.done
}
func (s *Standalone) Cancel() {
s.cancelOnce.Do(func() { close(s.done) })
}
// poll for changes to master leadership via current leader's /state endpoint.
// we poll the `initial` leader, aborting if none was specified.
//
// TODO(jdef) follow the leader: change who we poll based on the prior leader
// TODO(jdef) somehow determine all masters in cluster from the /state?
//
func (s *Standalone) _poller(pf fetcherFunc) {
defer func() {
defer s.Cancel()
log.Warning("shutting down standalone master detection")
}()
if s.initial == nil {
log.Errorf("aborting master poller since initial master info is nil")
return
}
addr := s.initial.GetHostname()
if len(addr) == 0 {
if s.initial.GetIp() == 0 {
log.Warningf("aborted mater poller since initial master info has no host")
return
}
ip := make([]byte, 4)
binary.BigEndian.PutUint32(ip, s.initial.GetIp())
addr = net.IP(ip).To4().String()
}
port := uint32(s.assumedMasterPort)
if s.initial.Port != nil && *s.initial.Port != 0 {
port = *s.initial.Port
}
addr = net.JoinHostPort(addr, strconv.Itoa(int(port)))
log.V(1).Infof("polling for master leadership at '%v'", addr)
var lastpid *upid.UPID
for {
startedAt := time.Now()
ctx, cancel := context.WithTimeout(context.Background(), s.leaderSyncInterval)
if pid, err := pf(ctx, addr); err == nil {
if !pid.Equal(lastpid) {
log.V(2).Infof("detected leadership change from '%v' to '%v'", lastpid, pid)
lastpid = pid
elapsed := time.Now().Sub(startedAt)
mi := CreateMasterInfo(pid)
select {
case s.ch <- mi: // noop
case <-time.After(s.leaderSyncInterval - elapsed):
// no one heard the master change, oh well - poll again
goto continuePolling
case <-s.done:
cancel()
return
}
} else {
log.V(2).Infof("no change to master leadership: '%v'", lastpid)
}
} else if err == context.DeadlineExceeded {
if lastpid != nil {
lastpid = nil
select {
case s.ch <- nil: // lost master
case <-s.done: // no need to cancel ctx
return
}
}
goto continuePolling
} else {
select {
case <-s.done:
cancel()
return
default:
if err != context.Canceled {
log.Error(err)
}
}
}
if remaining := s.leaderSyncInterval - time.Now().Sub(startedAt); remaining > 0 {
log.V(3).Infof("master leader poller sleeping for %v", remaining)
time.Sleep(remaining)
}
continuePolling:
cancel()
}
}
// assumes that address is in host:port format
func (s *Standalone) _fetchPid(ctx context.Context, address string) (*upid.UPID, error) {
//TODO(jdef) need SSL support
uri := fmt.Sprintf("http://%s/state", address)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
var pid *upid.UPID
err = s.httpDo(ctx, req, func(res *http.Response, err error) error {
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return fmt.Errorf("HTTP request failed with code %d: %v", res.StatusCode, res.Status)
}
blob, err1 := ioutil.ReadAll(res.Body)
if err1 != nil {
return err1
}
log.V(3).Infof("Got mesos state, content length %v", len(blob))
type State struct {
Leader string `json:"leader"` // ex: master(1)@10.22.211.18:5050
}
state := &State{}
err = json.Unmarshal(blob, state)
if err != nil {
return err
}
pid, err = upid.Parse(state.Leader)
return err
})
return pid, err
}
type responseHandler func(*http.Response, error) error
// hacked from https://blog.golang.org/context
func (s *Standalone) httpDo(ctx context.Context, req *http.Request, f responseHandler) error {
// Run the HTTP request in a goroutine and pass the response to f.
ch := make(chan error, 1)
go func() { ch <- f(s.client.Do(req)) }()
select {
case <-ctx.Done():
s.tr.CancelRequest(req)
<-ch // Wait for f to return.
return ctx.Err()
case err := <-ch:
return err
}
}

View File

@ -1,40 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"client2.go",
"detect.go",
"doc.go",
"plugin.go",
"types.go",
],
tags = ["automanaged"],
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/mesos/mesos-go/detector:go_default_library",
"//vendor/github.com/mesos/mesos-go/mesosproto:go_default_library",
"//vendor/github.com/samuel/go-zookeeper/zk:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,88 +0,0 @@
package zoo
import (
"sync"
"time"
"github.com/samuel/go-zookeeper/zk"
)
const (
defaultSessionTimeout = 60 * time.Second
CurrentPath = "."
)
var zkSessionTimeout = defaultSessionTimeout
type client2 struct {
*zk.Conn
path string
done chan struct{} // signal chan, closes when the underlying connection terminates
stopOnce sync.Once
}
func connect2(hosts []string, path string) (*client2, error) {
c, ev, err := zk.Connect(hosts, zkSessionTimeout)
if err != nil {
return nil, err
}
done := make(chan struct{})
go func() {
// close the 'done' chan when the zk event chan closes (signals termination of zk connection)
defer close(done)
for {
if _, ok := <-ev; !ok {
return
}
}
}()
return &client2{
Conn: c,
path: path,
done: done,
}, nil
}
func (c *client2) Stopped() <-chan struct{} {
return c.done
}
func (c *client2) Stop() {
c.stopOnce.Do(c.Close)
}
func (c *client2) Data(path string) (data []byte, err error) {
data, _, err = c.Get(path)
return
}
func (c *client2) WatchChildren(path string) (string, <-chan []string, <-chan error) {
errCh := make(chan error, 1)
snap := make(chan []string)
watchPath := c.path
if path != "" && path != CurrentPath {
watchPath = watchPath + path
}
go func() {
defer close(errCh)
for {
children, _, ev, err := c.ChildrenW(watchPath)
if err != nil {
errCh <- err
return
}
select {
case snap <- children:
case <-c.done:
return
}
e := <-ev // wait for the next watch-related event
if e.Err != nil {
errCh <- e.Err
return
}
}
}()
return watchPath, snap, errCh
}

View File

@ -1,396 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package zoo
import (
"encoding/json"
"fmt"
"math"
"net/url"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gogo/protobuf/proto"
log "github.com/golang/glog"
"github.com/mesos/mesos-go/detector"
mesos "github.com/mesos/mesos-go/mesosproto"
)
const (
// prefix for nodes listed at the ZK URL path
nodePrefix = "info_"
nodeJSONPrefix = "json.info_"
defaultMinDetectorCyclePeriod = 1 * time.Second
)
type (
ZKInterface interface {
Stopped() <-chan struct{}
Stop()
Data(string) ([]byte, error)
WatchChildren(string) (string, <-chan []string, <-chan error)
}
infoCodec func(path, node string) (*mesos.MasterInfo, error)
// Detector uses ZooKeeper to detect new leading master.
MasterDetector struct {
// detection should not signal master change listeners more frequently than this
cancel func()
client ZKInterface
done chan struct{}
// latch: only install, at most, one ignoreChanged listener; see MasterDetector.Detect
ignoreInstalled int32
leaderNode string
minDetectorCyclePeriod time.Duration
// guard against concurrent invocations of bootstrapFunc
bootstrapLock sync.RWMutex
bootstrapFunc func(ZKInterface, <-chan struct{}) (ZKInterface, error) // for one-time zk client initiation
}
)
// reasonable default for a noop change listener
var ignoreChanged = detector.OnMasterChanged(func(*mesos.MasterInfo) {})
// MinCyclePeriod is a functional option that determines the highest frequency of master change notifications
func MinCyclePeriod(d time.Duration) detector.Option {
return func(di interface{}) detector.Option {
md := di.(*MasterDetector)
old := md.minDetectorCyclePeriod
md.minDetectorCyclePeriod = d
return MinCyclePeriod(old)
}
}
func Bootstrap(f func(ZKInterface, <-chan struct{}) (ZKInterface, error)) detector.Option {
return func(di interface{}) detector.Option {
md := di.(*MasterDetector)
old := md.bootstrapFunc
md.bootstrapFunc = f
return Bootstrap(old)
}
}
// Internal constructor function
func NewMasterDetector(zkurls string, options ...detector.Option) (*MasterDetector, error) {
zkHosts, zkPath, err := parseZk(zkurls)
if err != nil {
log.Fatalln("Failed to parse url", err)
return nil, err
}
detector := &MasterDetector{
minDetectorCyclePeriod: defaultMinDetectorCyclePeriod,
done: make(chan struct{}),
cancel: func() {},
}
detector.bootstrapFunc = func(client ZKInterface, _ <-chan struct{}) (ZKInterface, error) {
if client == nil {
return connect2(zkHosts, zkPath)
}
return client, nil
}
// apply options last so that they can override default behavior
for _, opt := range options {
opt(detector)
}
log.V(2).Infoln("Created new detector to watch", zkHosts, zkPath)
return detector, nil
}
func parseZk(zkurls string) ([]string, string, error) {
u, err := url.Parse(zkurls)
if err != nil {
log.V(1).Infof("failed to parse url: %v", err)
return nil, "", err
}
if u.Scheme != "zk" {
return nil, "", fmt.Errorf("invalid url scheme for zk url: '%v'", u.Scheme)
}
return strings.Split(u.Host, ","), u.Path, nil
}
// returns a chan that, when closed, indicates termination of the detector
func (md *MasterDetector) Done() <-chan struct{} {
return md.done
}
func (md *MasterDetector) Cancel() {
md.bootstrapLock.RLock()
defer md.bootstrapLock.RUnlock()
md.cancel()
}
func (md *MasterDetector) childrenChanged(path string, list []string, obs detector.MasterChanged) {
md.notifyMasterChanged(path, list, obs)
md.notifyAllMasters(path, list, obs)
}
func (md *MasterDetector) notifyMasterChanged(path string, list []string, obs detector.MasterChanged) {
// mesos v0.24 writes JSON only, v0.23 writes json and protobuf, v0.22 and prior only write protobuf
topNode, codec := md.selectTopNode(list)
if md.leaderNode == topNode {
log.V(2).Infof("ignoring children-changed event, leader has not changed: %v", path)
return
}
log.V(2).Infof("changing leader node from %q -> %q", md.leaderNode, topNode)
md.leaderNode = topNode
var masterInfo *mesos.MasterInfo
if md.leaderNode != "" {
var err error
if masterInfo, err = codec(path, topNode); err != nil {
log.Errorln(err.Error())
}
}
log.V(2).Infof("detected master info: %+v", masterInfo)
logPanic(func() { obs.OnMasterChanged(masterInfo) })
}
// logPanic safely executes the given func, recovering from and logging a panic if one occurs.
func logPanic(f func()) {
defer func() {
if r := recover(); r != nil {
log.Errorf("recovered from client panic: %v", r)
}
}()
f()
}
func (md *MasterDetector) pullMasterInfo(path, node string) (*mesos.MasterInfo, error) {
data, err := md.client.Data(fmt.Sprintf("%s/%s", path, node))
if err != nil {
return nil, fmt.Errorf("failed to retrieve leader data: %v", err)
}
masterInfo := &mesos.MasterInfo{}
err = proto.Unmarshal(data, masterInfo)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal protobuf MasterInfo data from zookeeper: %v", err)
}
return masterInfo, nil
}
func (md *MasterDetector) pullMasterJsonInfo(path, node string) (*mesos.MasterInfo, error) {
data, err := md.client.Data(fmt.Sprintf("%s/%s", path, node))
if err != nil {
return nil, fmt.Errorf("failed to retrieve leader data: %v", err)
}
masterInfo := &mesos.MasterInfo{}
err = json.Unmarshal(data, masterInfo)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal json MasterInfo data from zookeeper: %v", err)
}
return masterInfo, nil
}
func (md *MasterDetector) notifyAllMasters(path string, list []string, obs detector.MasterChanged) {
all, ok := obs.(detector.AllMasters)
if !ok {
// not interested in entire master list
return
}
// mesos v0.24 writes JSON only, v0.23 writes json and protobuf, v0.22 and prior only write protobuf
masters := map[string]*mesos.MasterInfo{}
tryStore := func(node string, codec infoCodec) {
info, err := codec(path, node)
if err != nil {
log.Errorln(err.Error())
} else {
masters[info.GetId()] = info
}
}
for _, node := range list {
// compare https://github.com/apache/mesos/blob/0.23.0/src/master/detector.cpp#L437
if strings.HasPrefix(node, nodePrefix) {
tryStore(node, md.pullMasterInfo)
} else if strings.HasPrefix(node, nodeJSONPrefix) {
tryStore(node, md.pullMasterJsonInfo)
} else {
continue
}
}
masterList := make([]*mesos.MasterInfo, 0, len(masters))
for _, v := range masters {
masterList = append(masterList, v)
}
log.V(2).Infof("notifying of master membership change: %+v", masterList)
logPanic(func() { all.UpdatedMasters(masterList) })
}
func (md *MasterDetector) callBootstrap() (e error) {
log.V(2).Infoln("invoking detector boostrap")
md.bootstrapLock.Lock()
defer md.bootstrapLock.Unlock()
clientConfigured := md.client != nil
md.client, e = md.bootstrapFunc(md.client, md.done)
if e == nil && !clientConfigured && md.client != nil {
// chain the lifetime of this detector to that of the newly created client impl
client := md.client
md.cancel = client.Stop
go func() {
defer close(md.done)
<-client.Stopped()
}()
}
return
}
// the first call to Detect will kickstart a connection to zookeeper. a nil change listener may
// be spec'd, result of which is a detector that will still listen for master changes and record
// leaderhip changes internally but no listener would be notified. Detect may be called more than
// once, and each time the spec'd listener will be added to the list of those receiving notifications.
func (md *MasterDetector) Detect(f detector.MasterChanged) (err error) {
// kickstart zk client connectivity
if err := md.callBootstrap(); err != nil {
log.V(3).Infoln("failed to execute bootstrap function", err.Error())
return err
}
if f == nil {
// only ever install, at most, one ignoreChanged listener. multiple instances of it
// just consume resources and generate misleading log messages.
if !atomic.CompareAndSwapInt32(&md.ignoreInstalled, 0, 1) {
log.V(3).Infoln("ignoreChanged listener already installed")
return
}
f = ignoreChanged
}
log.V(3).Infoln("spawning detect()")
go md.detect(f)
return nil
}
func (md *MasterDetector) detect(f detector.MasterChanged) {
log.V(3).Infoln("detecting children at", CurrentPath)
detectLoop:
for {
select {
case <-md.Done():
return
default:
}
log.V(3).Infoln("watching children at", CurrentPath)
path, childrenCh, errCh := md.client.WatchChildren(CurrentPath)
rewatch := false
for {
started := time.Now()
select {
case children := <-childrenCh:
md.childrenChanged(path, children, f)
case err, ok := <-errCh:
// check for a tie first (required for predictability (tests)); the downside of
// doing this is that a listener might get two callbacks back-to-back ("new leader",
// followed by "no leader").
select {
case children := <-childrenCh:
md.childrenChanged(path, children, f)
default:
}
if ok {
log.V(1).Infoln("child watch ended with error, master lost; error was:", err.Error())
} else {
// detector shutdown likely...
log.V(1).Infoln("child watch ended, master lost")
}
select {
case <-md.Done():
return
default:
if md.leaderNode != "" {
log.V(2).Infof("changing leader node from %q -> \"\"", md.leaderNode)
md.leaderNode = ""
f.OnMasterChanged(nil)
}
}
rewatch = true
}
// rate-limit master changes
if elapsed := time.Now().Sub(started); elapsed > 0 {
log.V(2).Infoln("resting before next detection cycle")
select {
case <-md.Done():
return
case <-time.After(md.minDetectorCyclePeriod - elapsed): // noop
}
}
if rewatch {
continue detectLoop
}
}
}
}
func (md *MasterDetector) selectTopNode(list []string) (topNode string, codec infoCodec) {
// mesos v0.24 writes JSON only, v0.23 writes json and protobuf, v0.22 and prior only write protobuf
topNode = selectTopNodePrefix(list, nodeJSONPrefix)
codec = md.pullMasterJsonInfo
if topNode == "" {
topNode = selectTopNodePrefix(list, nodePrefix)
codec = md.pullMasterInfo
if topNode != "" {
log.Warningf("Leading master is using a Protobuf binary format when registering "+
"with Zookeeper (%s): this will be deprecated as of Mesos 0.24 (see MESOS-2340).",
topNode)
}
}
return
}
func selectTopNodePrefix(list []string, pre string) (node string) {
var leaderSeq uint64 = math.MaxUint64
for _, v := range list {
if !strings.HasPrefix(v, pre) {
continue // only care about participants
}
seqStr := strings.TrimPrefix(v, pre)
seq, err := strconv.ParseUint(seqStr, 10, 64)
if err != nil {
log.Warningf("unexpected zk node format '%s': %v", seqStr, err)
continue
}
if seq < leaderSeq {
leaderSeq = seq
node = v
}
}
if node == "" {
log.V(3).Infoln("No top node found.")
} else {
log.V(3).Infof("Top node selected: '%s'", node)
}
return node
}

View File

@ -1,3 +0,0 @@
// Zookeeper-based mesos-master leaderhip detection.
// Implements support for optional detector.AllMasters interface.
package zoo

View File

@ -1,11 +0,0 @@
package zoo
import (
"github.com/mesos/mesos-go/detector"
)
func init() {
detector.Register("zk://", detector.PluginFactory(func(spec string, options ...detector.Option) (detector.Master, error) {
return NewMasterDetector(spec, options...)
}))
}

View File

@ -1,27 +0,0 @@
package zoo
import (
"github.com/samuel/go-zookeeper/zk"
)
// Connector Interface to facade zk.Conn type
// since github.com/samuel/go-zookeeper/zk does not provide an interface
// for the zk.Conn object, this allows for mocking and easier testing.
type Connector interface {
Close()
Children(string) ([]string, *zk.Stat, error)
ChildrenW(string) ([]string, *zk.Stat, <-chan zk.Event, error)
Get(string) ([]byte, *zk.Stat, error)
}
//Factory is an adapter to trap the creation of zk.Conn instances
//since the official zk API does not expose an interface for zk.Conn.
type Factory interface {
create() (Connector, <-chan zk.Event, error)
}
type asFactory func() (Connector, <-chan zk.Event, error)
func (f asFactory) create() (Connector, <-chan zk.Event, error) {
return f()
}

View File

@ -1,33 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"authentication.pb.go",
"internal.pb.go",
"mesos.pb.go",
"messages.pb.go",
],
tags = ["automanaged"],
deps = ["//vendor/github.com/gogo/protobuf/proto:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,9 +0,0 @@
PROTO_PATH := ${GOPATH}/src:.
PROTO_PATH := ${PROTO_PATH}:${GOPATH}/src/github.com/gogo/protobuf/protobuf
PROTO_PATH := ${PROTO_PATH}:${GOPATH}/src/github.com/gogo/protobuf/gogoproto
.PHONY: all
all:
protoc --proto_path=${PROTO_PATH} --gogo_out=. *.proto
protoc --proto_path=${PROTO_PATH} --gogo_out=. ./scheduler/*.proto

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mesosproto;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.gostring_all) = true;
option (gogoproto.equal_all) = true;
option (gogoproto.verbose_equal_all) = true;
option (gogoproto.goproto_stringer_all) = false;
option (gogoproto.stringer_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.testgen_all) = true;
option (gogoproto.benchgen_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
message AuthenticateMessage {
required string pid = 1; // PID that needs to be authenticated.
}
message AuthenticationMechanismsMessage {
repeated string mechanisms = 1; // List of available SASL mechanisms.
}
message AuthenticationStartMessage {
required string mechanism = 1;
optional bytes data = 2;
}
message AuthenticationStepMessage {
required bytes data = 1;
}
message AuthenticationCompletedMessage {}
message AuthenticationFailedMessage {}
message AuthenticationErrorMessage {
optional string error = 1;
}

View File

@ -1,103 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: internal.proto
// DO NOT EDIT!
package mesosproto
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// For use with detector callbacks
type InternalMasterChangeDetected struct {
// will be present if there's a new master, otherwise nil
Master *MasterInfo `protobuf:"bytes,1,opt,name=master" json:"master,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *InternalMasterChangeDetected) Reset() { *m = InternalMasterChangeDetected{} }
func (m *InternalMasterChangeDetected) String() string { return proto.CompactTextString(m) }
func (*InternalMasterChangeDetected) ProtoMessage() {}
func (m *InternalMasterChangeDetected) GetMaster() *MasterInfo {
if m != nil {
return m.Master
}
return nil
}
type InternalTryAuthentication struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *InternalTryAuthentication) Reset() { *m = InternalTryAuthentication{} }
func (m *InternalTryAuthentication) String() string { return proto.CompactTextString(m) }
func (*InternalTryAuthentication) ProtoMessage() {}
type InternalAuthenticationResult struct {
// true only if the authentication process completed and login was successful
Success *bool `protobuf:"varint,1,req,name=success" json:"success,omitempty"`
// true if the authentication process completed, successfully or not
Completed *bool `protobuf:"varint,2,req,name=completed" json:"completed,omitempty"`
// master pid that this result pertains to
Pid *string `protobuf:"bytes,3,req,name=pid" json:"pid,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *InternalAuthenticationResult) Reset() { *m = InternalAuthenticationResult{} }
func (m *InternalAuthenticationResult) String() string { return proto.CompactTextString(m) }
func (*InternalAuthenticationResult) ProtoMessage() {}
func (m *InternalAuthenticationResult) GetSuccess() bool {
if m != nil && m.Success != nil {
return *m.Success
}
return false
}
func (m *InternalAuthenticationResult) GetCompleted() bool {
if m != nil && m.Completed != nil {
return *m.Completed
}
return false
}
func (m *InternalAuthenticationResult) GetPid() string {
if m != nil && m.Pid != nil {
return *m.Pid
}
return ""
}
type InternalNetworkError struct {
// master pid that this event pertains to
Pid *string `protobuf:"bytes,1,req,name=pid" json:"pid,omitempty"`
// driver session UUID
Session *string `protobuf:"bytes,2,opt,name=session" json:"session,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *InternalNetworkError) Reset() { *m = InternalNetworkError{} }
func (m *InternalNetworkError) String() string { return proto.CompactTextString(m) }
func (*InternalNetworkError) ProtoMessage() {}
func (m *InternalNetworkError) GetPid() string {
if m != nil && m.Pid != nil {
return *m.Pid
}
return ""
}
func (m *InternalNetworkError) GetSession() string {
if m != nil && m.Session != nil {
return *m.Session
}
return ""
}

View File

@ -1,30 +0,0 @@
package mesosproto;
import "mesos.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
// For use with detector callbacks
message InternalMasterChangeDetected {
// will be present if there's a new master, otherwise nil
optional MasterInfo master = 1;
}
message InternalTryAuthentication {
// empty message, serves as a signal to the scheduler bindings
}
message InternalAuthenticationResult {
// true only if the authentication process completed and login was successful
required bool success = 1;
// true if the authentication process completed, successfully or not
required bool completed = 2;
// master pid that this result pertains to
required string pid = 3;
}
message InternalNetworkError {
// master pid that this event pertains to
required string pid = 1;
// driver session UUID
optional string session = 2;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,705 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mesosproto;
import "mesos.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.gostring_all) = true;
option (gogoproto.equal_all) = true;
option (gogoproto.verbose_equal_all) = true;
option (gogoproto.goproto_stringer_all) = false;
option (gogoproto.stringer_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.testgen_all) = true;
option (gogoproto.benchgen_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
// TODO(benh): Consider splitting these messages into different "packages"
// which represent which messages get handled by which components (e.g., the
// "mesos.executor" package includes messages that the executor handles).
// TODO(bmahler): Add executor_uuid here, and send it to the master. This will
// allow us to expose executor work directories for tasks in the webui when
// looking from the master level. Currently only the slave knows which run the
// task belongs to.
/**
* Describes a task, similar to `TaskInfo`.
*
* `Task` is used in some of the Mesos messages found below.
* `Task` is used instead of `TaskInfo` if:
* 1) we need additional IDs, such as a specific
* framework, executor, or agent; or
* 2) we do not need the additional data, such as the command run by the
* task, the specific containerization, or health checks. These
* additional fields may be large and unnecessary for some Mesos messages.
*
* `Task` is generally constructed from a `TaskInfo`. See protobuf::createTask.
*/
message Task {
required string name = 1;
required TaskID task_id = 2;
required FrameworkID framework_id = 3;
optional ExecutorID executor_id = 4;
required SlaveID slave_id = 5;
required TaskState state = 6; // Latest state of the task.
repeated Resource resources = 7;
repeated TaskStatus statuses = 8;
// These fields correspond to the state and uuid of the latest
// status update forwarded to the master.
// NOTE: Either both the fields must be set or both must be unset.
optional TaskState status_update_state = 9;
optional bytes status_update_uuid = 10;
optional Labels labels = 11;
// Service discovery information for the task. It is not interpreted
// or acted upon by Mesos. It is up to a service discovery system
// to use this information as needed and to handle tasks without
// service discovery information.
optional DiscoveryInfo discovery = 12;
}
// TODO(vinod): Create a new UUID message type.
/**
* Describes a task's status.
*
* `StatusUpdate` is used in some of the Mesos messages found below.
* The master and agent use `StatusUpdate` to wrap a `TaskStatus` when
* passing it from the agent to the framework that spawned the task.
*
* See protobuf::createStatusUpdate.
*/
message StatusUpdate {
required FrameworkID framework_id = 1;
optional ExecutorID executor_id = 2;
optional SlaveID slave_id = 3;
// Since 0.23.0 we set 'status.uuid' in the executor
// driver for all retryable status updates.
required TaskStatus status = 4;
required double timestamp = 5;
// Since 0.26.0 this is deprecated in favor of 'status.uuid'.
optional bytes uuid = 6;
// This corresponds to the latest state of the task according to the
// agent. Note that this state might be different than the state in
// 'status' because status update manager queues updates. In other
// words, 'status' corresponds to the update at top of the queue and
// 'latest_state' corresponds to the update at bottom of the queue.
optional TaskState latest_state = 7;
}
/**
* Encapsulates how we checkpoint a `StatusUpdate` to disk.
*
* See the StatusUpdateManager and slave/state.cpp.
*/
message StatusUpdateRecord {
enum Type {
UPDATE = 0;
ACK = 1;
}
required Type type = 1;
// Required if type == UPDATE.
optional StatusUpdate update = 2;
// Required if type == ACK.
optional bytes uuid = 3;
}
// TODO(josephw): Check if this can be removed. This appears to be
// for backwards compatibility with very early versions of Mesos.
message SubmitSchedulerRequest
{
required string name = 1;
}
// TODO(josephw): Remove for the same reason as `SubmitSchedulerRequest`.
message SubmitSchedulerResponse
{
required bool okay = 1;
}
/**
* Sends a free-form message from the executor to the framework.
* Mesos forwards the message, if necessary, via the agents and the master.
*
* See scheduler::Event::Message.
*/
message ExecutorToFrameworkMessage {
required SlaveID slave_id = 1;
required FrameworkID framework_id = 2;
required ExecutorID executor_id = 3;
required bytes data = 4;
}
/**
* Sends a free-form message from the framework to the executor.
* Mesos forwards the message, if necessary, via the agents and the master.
*
* See scheduler::Call::Message.
*/
message FrameworkToExecutorMessage {
required SlaveID slave_id = 1;
required FrameworkID framework_id = 2;
required ExecutorID executor_id = 3;
required bytes data = 4;
}
/**
* Subscribes the framework with the master to receive events.
*
* Used by the pre-Event/Call Mesos scheduler driver.
* See scheduler::Call::Subscribe.
*/
message RegisterFrameworkMessage {
required FrameworkInfo framework = 1;
}
/**
* Subscribes the framework with the master to receive events.
* This is used when the framework has previously registered and
* the master changes to a newly elected master.
*
* Used by the pre-Event/Call Mesos scheduler driver.
* See scheduler::Call::Subscribe.
*/
message ReregisterFrameworkMessage {
required FrameworkInfo framework = 2;
required bool failover = 3;
}
/**
* Notifies the framework that the master has registered it.
* The `framework_id` holds a unique ID for distinguishing this framework.
*
* See scheduler::Event::Subscribed.
*/
message FrameworkRegisteredMessage {
required FrameworkID framework_id = 1;
required MasterInfo master_info = 2;
}
/**
* Notifies the framework that the master has reregistered it.
* This message is used in the same conditions as `ReregisterFrameworkMessage`.
*
* See scheduler::Event::Subscribed.
*/
message FrameworkReregisteredMessage {
required FrameworkID framework_id = 1;
required MasterInfo master_info = 2;
}
/**
* Stops the framework and shuts down all its tasks and executors.
*
* Used by the pre-Event/Call Mesos scheduler driver.
* See scheduler::Call::Teardown.
*/
message UnregisterFrameworkMessage {
required FrameworkID framework_id = 1;
}
/**
* Aborts the scheduler driver and prevents further callbacks to the driver.
*
* Used exclusively by the pre-Event/Call Mesos scheduler driver.
*/
message DeactivateFrameworkMessage {
required FrameworkID framework_id = 1;
}
/**
* Requests specific resources from Mesos's allocator.
* If the allocator supports resource requests, any corresponding
* resources will be sent like a normal resource offer.
*
* Used by the pre-Event/Call Mesos scheduler driver.
* See scheduler::Call::Request.
*/
message ResourceRequestMessage {
required FrameworkID framework_id = 1;
repeated Request requests = 2;
}
/**
* Sends resources offers to the scheduler.
*
* See scheduler::Event::Offers.
*/
message ResourceOffersMessage {
repeated Offer offers = 1;
repeated string pids = 2;
// The `inverse_offers` field is added here because we currently use it in
// `master.cpp` when constructing the message to send to schedulers. We use
// the original version of the proto API until we do a full refactor of all
// the messages being sent.
// It is not fully implemented in the old scheduler; only the V1 scheduler
// currently implements inverse offers.
repeated InverseOffer inverse_offers = 3;
}
/**
* Launches tasks using resources from the specified offers.
*
* Used by the pre-Event/Call Mesos scheduler driver.
* See scheduler::Call::Accept and scheduler::Call::Decline.
*/
message LaunchTasksMessage {
required FrameworkID framework_id = 1;
repeated TaskInfo tasks = 3;
required Filters filters = 5;
repeated OfferID offer_ids = 6;
}
/**
* Notifies the scheduler that a particular offer is not longer valid.
*
* See scheduler::Event::Rescind.
*/
message RescindResourceOfferMessage {
required OfferID offer_id = 1;
}
/**
* Removes all filters previously set by the scheduler.
*
* Used by the pre-Event/Call Mesos scheduler driver.
* See scheduler::Call::Revive.
*/
message ReviveOffersMessage {
required FrameworkID framework_id = 1;
}
/**
* Depending on the `TaskInfo`, this message either notifies an existing
* executor to run the task, or starts a new executor and runs the task.
* This message is sent when scheduler::Call::Accept is sent with
* Offer::Operation::Launch.
*
* See executor::Event::Launch.
*/
message RunTaskMessage {
// TODO(karya): Remove framework_id after MESOS-2559 has shipped.
optional FrameworkID framework_id = 1 [deprecated = true];
required FrameworkInfo framework = 2;
required TaskInfo task = 4;
// The pid of the framework. This was moved to 'optional' in
// 0.24.0 to support schedulers using the HTTP API. For now, we
// continue to always set pid since it was required in 0.23.x.
// When 'pid' is unset, or set to empty string, the agent will
// forward executor messages through the master. For schedulers
// still using the driver, this will remain set.
optional string pid = 3;
}
/**
* Kills a specific task.
*
* See scheduler::Call::Kill and executor::Event::Kill.
*/
message KillTaskMessage {
// TODO(bmahler): Include the SlaveID here to improve the Master's
// ability to respond for non-activated agents.
required FrameworkID framework_id = 1;
required TaskID task_id = 2;
}
/**
* Sends a task status update to the scheduler.
*
* See scheduler::Event::Update.
*/
message StatusUpdateMessage {
required StatusUpdate update = 1;
// If present, scheduler driver automatically sends an acknowledgement
// to the `pid`. This only applies to the pre-Event/Call Mesos
// scheduler driver.
optional string pid = 2;
}
/**
* This message is used by the scheduler to acknowledge the receipt of a status
* update. Mesos forwards the acknowledgement to the executor running the task.
*
* See scheduler::Call::Acknowledge and executor::Event::Acknowledged.
*/
message StatusUpdateAcknowledgementMessage {
required SlaveID slave_id = 1;
required FrameworkID framework_id = 2;
required TaskID task_id = 3;
required bytes uuid = 4;
}
/**
* Notifies the scheduler that the agent was lost.
*
* See scheduler::Event::Failure.
*/
message LostSlaveMessage {
required SlaveID slave_id = 1;
}
/**
* Allows the scheduler to query the status for non-terminal tasks.
* This causes the master to send back the latest task status for
* each task in `statuses`, if possible. Tasks that are no longer
* known will result in a `TASK_LOST` update. If `statuses` is empty,
* then the master will send the latest status for each task
* currently known.
*/
message ReconcileTasksMessage {
required FrameworkID framework_id = 1;
repeated TaskStatus statuses = 2; // Should be non-terminal only.
}
/**
* Notifies the framework about errors during registration.
*
* See scheduler::Event::Error.
*/
message FrameworkErrorMessage {
required string message = 2;
}
/**
* Registers the agent with the master.
*
* If registration fails, a `ShutdownMessage` is sent to the agent.
* Failure conditions are documented inline in Master::registerSlave.
*/
message RegisterSlaveMessage {
required SlaveInfo slave = 1;
// Resources that are checkpointed by the agent (e.g., persistent
// volume or dynamic reservation). Frameworks need to release
// checkpointed resources explicitly.
repeated Resource checkpointed_resources = 3;
// NOTE: This is a hack for the master to detect the agent's
// version. If unset the agent is < 0.21.0.
// TODO(bmahler): Do proper versioning: MESOS-986.
optional string version = 2;
}
/**
* Registers the agent with the master.
* This is used when the agent has previously registered and
* the master changes to a newly elected master.
*
* If registration fails, a `ShutdownMessage` is sent to the agent.
* Failure conditions are documented inline in Master::reregisterSlave.
*/
message ReregisterSlaveMessage {
required SlaveInfo slave = 2;
// Resources that are checkpointed by the agent (e.g., persistent
// volume or dynamic reservation). Frameworks need to release
// checkpointed resources explicitly.
repeated Resource checkpointed_resources = 7;
repeated ExecutorInfo executor_infos = 4;
repeated Task tasks = 3;
repeated Archive.Framework completed_frameworks = 5;
// NOTE: This is a hack for the master to detect the agent's
// version. If unset the agent is < 0.21.0.
// TODO(bmahler): Do proper versioning: MESOS-986.
optional string version = 6;
}
/**
* Notifies the agent that the master has registered it.
* The `slave_id` holds a unique ID for distinguishing this agent.
*/
message SlaveRegisteredMessage {
required SlaveID slave_id = 1;
optional MasterSlaveConnection connection = 2;
}
/**
* Notifies the agent that the master has reregistered it.
* This message is used in the same conditions as `ReregisterSlaveMessage`.
*/
message SlaveReregisteredMessage {
required SlaveID slave_id = 1;
// Contains a list of non-terminal tasks that the master believes to
// be running on the agent. The agent should respond `TASK_LOST` to
// any tasks that are unknown, so the master knows to remove those.
repeated ReconcileTasksMessage reconciliations = 2;
optional MasterSlaveConnection connection = 3;
}
/**
* This message is sent by the agent to the master during agent shutdown.
* The master updates its state to reflect the removed agent.
*/
message UnregisterSlaveMessage {
required SlaveID slave_id = 1;
}
/**
* Describes the connection between the master and agent.
*/
message MasterSlaveConnection {
// Product of max_slave_ping_timeouts * slave_ping_timeout.
// If no pings are received within the total timeout,
// the master will remove the agent.
optional double total_ping_timeout_seconds = 1;
}
/**
* This message is periodically sent by the master to the agent.
* If the agent is connected to the master, "connected" is true.
*/
message PingSlaveMessage {
required bool connected = 1;
}
/**
* This message is sent by the agent to the master in response to the
* `PingSlaveMessage`.
*/
message PongSlaveMessage {}
/**
* Tells an agent to shut down all executors of the given framework.
*/
message ShutdownFrameworkMessage {
required FrameworkID framework_id = 1;
}
/**
* Tells an agent (and consequently the executor) to shutdown an executor.
*/
message ShutdownExecutorMessage {
// TODO(vinod): Make these fields required. These are made optional
// for backwards compatibility between 0.23.0 agent and pre 0.23.0
// executor driver.
optional ExecutorID executor_id = 1;
optional FrameworkID framework_id = 2;
}
/**
* Broadcasts updated framework information from master to all agents.
*/
message UpdateFrameworkMessage {
required FrameworkID framework_id = 1;
// See the comment on RunTaskMessage.pid.
optional string pid = 2;
}
/**
* This message is sent to the agent whenever there is an update of
* the resources that need to be checkpointed (e.g., persistent volume
* or dynamic reservation).
*/
message CheckpointResourcesMessage {
repeated Resource resources = 1;
}
/**
* This message is sent by the agent to the master to inform the
* master about the total amount of oversubscribed (allocated and
* allocatable) resources.
*/
message UpdateSlaveMessage {
required SlaveID slave_id = 1;
repeated Resource oversubscribed_resources = 2;
}
/**
* Subscribes the executor with the agent to receive events.
*
* See executor::Call::Subscribe.
*/
message RegisterExecutorMessage {
required FrameworkID framework_id = 1;
required ExecutorID executor_id = 2;
}
/**
* Notifies the executor that the agent has registered it.
*
* See executor::Event::Subscribed.
*/
message ExecutorRegisteredMessage {
required ExecutorInfo executor_info = 2;
required FrameworkID framework_id = 3;
required FrameworkInfo framework_info = 4;
required SlaveID slave_id = 5;
required SlaveInfo slave_info = 6;
}
/**
* Notifies the executor that the agent has reregistered it.
*
* See executor::Event::Subscribed.
*/
message ExecutorReregisteredMessage {
required SlaveID slave_id = 1;
required SlaveInfo slave_info = 2;
}
/**
* Notifies the scheduler about terminated executors.
*
* See scheduler::Event::Failure.
*/
message ExitedExecutorMessage {
required SlaveID slave_id = 1;
required FrameworkID framework_id = 2;
required ExecutorID executor_id = 3;
required int32 status = 4;
}
/**
* Reestablishes the connection between executor and agent after agent failover.
* This message originates from the agent.
*/
message ReconnectExecutorMessage {
required SlaveID slave_id = 1;
}
/**
* Subscribes the executor with the agent to receive events.
* This is used after a disconnection. The executor must include
* any unacknowledged tasks or updates.
*
* See executor::Call::Subscribe.
*/
message ReregisterExecutorMessage {
required ExecutorID executor_id = 1;
required FrameworkID framework_id = 2;
repeated TaskInfo tasks = 3;
repeated StatusUpdate updates = 4;
}
/**
* Sends a free-form message from the master to an agent.
* The agent should gracefully terminate in response, which includes
* shutting down all executors and tasks on the agent.
*/
message ShutdownMessage {
optional string message = 1;
}
// TODO(adam-mesos): Move this to an 'archive' package.
/**
* Describes Completed Frameworks, etc. for archival.
*/
message Archive {
message Framework {
required FrameworkInfo framework_info = 1;
optional string pid = 2;
repeated Task tasks = 3;
}
repeated Framework frameworks = 1;
}
/**
* Message describing task current health status that is sent by
* the task health checker to the command executor.
* The command executor reports the task status back to the
* on each receive. If the health checker configured failure
* condition meets, then `kill_task` flag will be set to true which
* the executor on message receive will kill the task.
*/
message TaskHealthStatus {
required TaskID task_id = 1;
required bool healthy = 2;
// Flag to initiate task kill.
optional bool kill_task = 3 [default = false];
// Number of consecutive counts in current status.
// This will not be populated if task is healthy.
optional int32 consecutive_failures = 4;
}
/**
* Message to signal completion of an event within a module.
*/
message HookExecuted {
optional string module = 1;
}

View File

@ -1,36 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"constants.go",
"mesosprotoutil.go",
"node.go",
],
tags = ["automanaged"],
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/mesos/mesos-go/mesosproto:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,6 +0,0 @@
package mesosutil
const (
// MesosVersion indicates the supported mesos version.
MesosVersion = "0.24.0"
)

View File

@ -1,220 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mesosutil
import (
"github.com/gogo/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
)
func NewValueRange(begin, end uint64) *mesos.Value_Range {
return &mesos.Value_Range{Begin: proto.Uint64(begin), End: proto.Uint64(end)}
}
func FilterResources(resources []*mesos.Resource, filter func(*mesos.Resource) bool) (result []*mesos.Resource) {
for _, resource := range resources {
if filter(resource) {
result = append(result, resource)
}
}
return result
}
func AddResourceReservation(resource *mesos.Resource, principal string, role string) *mesos.Resource {
resource.Reservation = &mesos.Resource_ReservationInfo{Principal: proto.String(principal)}
resource.Role = proto.String(role)
return resource
}
func NewScalarResourceWithReservation(name string, value float64, principal string, role string) *mesos.Resource {
return AddResourceReservation(NewScalarResource(name, value), principal, role)
}
func NewRangesResourceWithReservation(name string, ranges []*mesos.Value_Range, principal string, role string) *mesos.Resource {
return AddResourceReservation(NewRangesResource(name, ranges), principal, role)
}
func NewSetResourceWithReservation(name string, items []string, principal string, role string) *mesos.Resource {
return AddResourceReservation(NewSetResource(name, items), principal, role)
}
func NewVolumeResourceWithReservation(val float64, containerPath string, persistenceId string, mode *mesos.Volume_Mode, principal string, role string) *mesos.Resource {
return AddResourceReservation(NewVolumeResource(val, containerPath, persistenceId, mode), principal, role)
}
func NewScalarResource(name string, val float64) *mesos.Resource {
return &mesos.Resource{
Name: proto.String(name),
Type: mesos.Value_SCALAR.Enum(),
Scalar: &mesos.Value_Scalar{Value: proto.Float64(val)},
}
}
func NewRangesResource(name string, ranges []*mesos.Value_Range) *mesos.Resource {
return &mesos.Resource{
Name: proto.String(name),
Type: mesos.Value_RANGES.Enum(),
Ranges: &mesos.Value_Ranges{Range: ranges},
}
}
func NewSetResource(name string, items []string) *mesos.Resource {
return &mesos.Resource{
Name: proto.String(name),
Type: mesos.Value_SET.Enum(),
Set: &mesos.Value_Set{Item: items},
}
}
func NewVolumeResource(val float64, containerPath string, persistenceId string, mode *mesos.Volume_Mode) *mesos.Resource {
resource := NewScalarResource("disk", val)
resource.Disk = &mesos.Resource_DiskInfo{
Persistence: &mesos.Resource_DiskInfo_Persistence{Id: proto.String(persistenceId)},
Volume: &mesos.Volume{ContainerPath: proto.String(containerPath), Mode: mode},
}
return resource
}
func NewFrameworkID(id string) *mesos.FrameworkID {
return &mesos.FrameworkID{Value: proto.String(id)}
}
func NewFrameworkInfo(user, name string, frameworkId *mesos.FrameworkID) *mesos.FrameworkInfo {
return &mesos.FrameworkInfo{
User: proto.String(user),
Name: proto.String(name),
Id: frameworkId,
}
}
func NewMasterInfo(id string, ip, port uint32) *mesos.MasterInfo {
return &mesos.MasterInfo{
Id: proto.String(id),
Ip: proto.Uint32(ip),
Port: proto.Uint32(port),
}
}
func NewOfferID(id string) *mesos.OfferID {
return &mesos.OfferID{Value: proto.String(id)}
}
func NewOffer(offerId *mesos.OfferID, frameworkId *mesos.FrameworkID, slaveId *mesos.SlaveID, hostname string) *mesos.Offer {
return &mesos.Offer{
Id: offerId,
FrameworkId: frameworkId,
SlaveId: slaveId,
Hostname: proto.String(hostname),
}
}
func FilterOffersResources(offers []*mesos.Offer, filter func(*mesos.Resource) bool) (result []*mesos.Resource) {
for _, offer := range offers {
result = FilterResources(offer.Resources, filter)
}
return result
}
func NewSlaveID(id string) *mesos.SlaveID {
return &mesos.SlaveID{Value: proto.String(id)}
}
func NewTaskID(id string) *mesos.TaskID {
return &mesos.TaskID{Value: proto.String(id)}
}
func NewTaskInfo(
name string,
taskId *mesos.TaskID,
slaveId *mesos.SlaveID,
resources []*mesos.Resource,
) *mesos.TaskInfo {
return &mesos.TaskInfo{
Name: proto.String(name),
TaskId: taskId,
SlaveId: slaveId,
Resources: resources,
}
}
func NewTaskStatus(taskId *mesos.TaskID, state mesos.TaskState) *mesos.TaskStatus {
return &mesos.TaskStatus{
TaskId: taskId,
State: mesos.TaskState(state).Enum(),
}
}
func NewStatusUpdate(frameworkId *mesos.FrameworkID, taskStatus *mesos.TaskStatus, timestamp float64, uuid []byte) *mesos.StatusUpdate {
return &mesos.StatusUpdate{
FrameworkId: frameworkId,
Status: taskStatus,
Timestamp: proto.Float64(timestamp),
Uuid: uuid,
}
}
func NewCommandInfo(command string) *mesos.CommandInfo {
return &mesos.CommandInfo{Value: proto.String(command)}
}
func NewExecutorID(id string) *mesos.ExecutorID {
return &mesos.ExecutorID{Value: proto.String(id)}
}
func NewExecutorInfo(execId *mesos.ExecutorID, command *mesos.CommandInfo) *mesos.ExecutorInfo {
return &mesos.ExecutorInfo{
ExecutorId: execId,
Command: command,
}
}
func NewCreateOperation(volumes []*mesos.Resource) *mesos.Offer_Operation {
return &mesos.Offer_Operation{
Type: mesos.Offer_Operation_CREATE.Enum(),
Create: &mesos.Offer_Operation_Create{Volumes: volumes},
}
}
func NewDestroyOperation(volumes []*mesos.Resource) *mesos.Offer_Operation {
return &mesos.Offer_Operation{
Type: mesos.Offer_Operation_DESTROY.Enum(),
Destroy: &mesos.Offer_Operation_Destroy{Volumes: volumes},
}
}
func NewReserveOperation(resources []*mesos.Resource) *mesos.Offer_Operation {
return &mesos.Offer_Operation{
Type: mesos.Offer_Operation_RESERVE.Enum(),
Reserve: &mesos.Offer_Operation_Reserve{Resources: resources},
}
}
func NewUnreserveOperation(resources []*mesos.Resource) *mesos.Offer_Operation {
return &mesos.Offer_Operation{
Type: mesos.Offer_Operation_UNRESERVE.Enum(),
Unreserve: &mesos.Offer_Operation_Unreserve{Resources: resources},
}
}
func NewLaunchOperation(tasks []*mesos.TaskInfo) *mesos.Offer_Operation {
return &mesos.Offer_Operation{
Type: mesos.Offer_Operation_LAUNCH.Enum(),
Launch: &mesos.Offer_Operation_Launch{TaskInfos: tasks},
}
}

View File

@ -1,29 +0,0 @@
package mesosutil
import (
"os"
"os/exec"
"strings"
log "github.com/golang/glog"
)
//TODO(jdef) copied from kubernetes/pkg/util/node.go
func GetHostname(hostnameOverride string) string {
hostname := hostnameOverride
if hostname == "" {
// Note: We use exec here instead of os.Hostname() because we
// want the FQDN, and this is the easiest way to get it.
fqdn, err := exec.Command("hostname", "-f").Output()
if err != nil || len(fqdn) == 0 {
log.Errorf("Couldn't determine hostname fqdn, failing back to hostname: %v", err)
hostname, err = os.Hostname()
if err != nil {
log.Fatalf("Error getting hostname: %v", err)
}
} else {
hostname = string(fqdn)
}
}
return strings.TrimSpace(hostname)
}

View File

@ -1,30 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"upid.go",
],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,4 +0,0 @@
/*
Package upid defines the UPID type and some utilities of the UPID.
*/
package upid

View File

@ -1,63 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package upid
import (
"fmt"
"net"
"strings"
)
// UPID is a equivalent of the UPID in libprocess.
type UPID struct {
ID string
Host string
Port string
}
// Parse parses the UPID from the input string.
func Parse(input string) (*UPID, error) {
upid := new(UPID)
splits := strings.Split(input, "@")
if len(splits) != 2 {
return nil, fmt.Errorf("Expect one `@' in the input")
}
upid.ID = splits[0]
if _, err := net.ResolveTCPAddr("tcp4", splits[1]); err != nil {
return nil, err
}
upid.Host, upid.Port, _ = net.SplitHostPort(splits[1])
return upid, nil
}
// String returns the string representation.
func (u UPID) String() string {
return fmt.Sprintf("%s@%s:%s", u.ID, u.Host, u.Port)
}
// Equal returns true if two upid is equal
func (u *UPID) Equal(upid *UPID) bool {
if u == nil {
return upid == nil
} else {
return upid != nil && u.ID == upid.ID && u.Host == upid.Host && u.Port == upid.Port
}
}

View File

@ -1,25 +0,0 @@
Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,37 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"conn.go",
"constants.go",
"flw.go",
"lock.go",
"server_help.go",
"server_java.go",
"structs.go",
"tracer.go",
"util.go",
],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,868 +0,0 @@
// Package zk is a native Go client library for the ZooKeeper orchestration service.
package zk
/*
TODO:
* make sure a ping response comes back in a reasonable time
Possible watcher events:
* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err}
*/
import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// ErrNoServer indicates that an operation cannot be completed
// because attempts to connect to all servers in the list failed.
var ErrNoServer = errors.New("zk: could not connect to a server")
// ErrInvalidPath indicates that an operation was being attempted on
// an invalid path. (e.g. empty path)
var ErrInvalidPath = errors.New("zk: invalid path")
// DefaultLogger uses the stdlib log package for logging.
var DefaultLogger Logger = defaultLogger{}
const (
bufferSize = 1536 * 1024
eventChanSize = 6
sendChanSize = 16
protectedPrefix = "_c_"
)
type watchType int
const (
watchTypeData = iota
watchTypeExist = iota
watchTypeChild = iota
)
type watchPathType struct {
path string
wType watchType
}
type Dialer func(network, address string, timeout time.Duration) (net.Conn, error)
// Logger is an interface that can be implemented to provide custom log output.
type Logger interface {
Printf(string, ...interface{})
}
type Conn struct {
lastZxid int64
sessionID int64
state State // must be 32-bit aligned
xid uint32
timeout int32 // session timeout in milliseconds
passwd []byte
dialer Dialer
servers []string
serverIndex int // remember last server that was tried during connect to round-robin attempts to servers
lastServerIndex int // index of the last server that was successfully connected to and authenticated with
conn net.Conn
eventChan chan Event
shouldQuit chan struct{}
pingInterval time.Duration
recvTimeout time.Duration
connectTimeout time.Duration
sendChan chan *request
requests map[int32]*request // Xid -> pending request
requestsLock sync.Mutex
watchers map[watchPathType][]chan Event
watchersLock sync.Mutex
// Debug (used by unit tests)
reconnectDelay time.Duration
logger Logger
}
type request struct {
xid int32
opcode int32
pkt interface{}
recvStruct interface{}
recvChan chan response
// Because sending and receiving happen in separate go routines, there's
// a possible race condition when creating watches from outside the read
// loop. We must ensure that a watcher gets added to the list synchronously
// with the response from the server on any request that creates a watch.
// In order to not hard code the watch logic for each opcode in the recv
// loop the caller can use recvFunc to insert some synchronously code
// after a response.
recvFunc func(*request, *responseHeader, error)
}
type response struct {
zxid int64
err error
}
type Event struct {
Type EventType
State State
Path string // For non-session events, the path of the watched node.
Err error
Server string // For connection events
}
// Connect establishes a new connection to a pool of zookeeper servers
// using the default net.Dialer. See ConnectWithDialer for further
// information about session timeout.
func Connect(servers []string, sessionTimeout time.Duration) (*Conn, <-chan Event, error) {
return ConnectWithDialer(servers, sessionTimeout, nil)
}
// ConnectWithDialer establishes a new connection to a pool of zookeeper
// servers. The provided session timeout sets the amount of time for which
// a session is considered valid after losing connection to a server. Within
// the session timeout it's possible to reestablish a connection to a different
// server and keep the same session. This is means any ephemeral nodes and
// watches are maintained.
func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) {
if len(servers) == 0 {
return nil, nil, errors.New("zk: server list must not be empty")
}
recvTimeout := sessionTimeout * 2 / 3
srvs := make([]string, len(servers))
for i, addr := range servers {
if strings.Contains(addr, ":") {
srvs[i] = addr
} else {
srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
}
}
// Randomize the order of the servers to avoid creating hotspots
stringShuffle(srvs)
ec := make(chan Event, eventChanSize)
if dialer == nil {
dialer = net.DialTimeout
}
conn := Conn{
dialer: dialer,
servers: srvs,
serverIndex: 0,
lastServerIndex: -1,
conn: nil,
state: StateDisconnected,
eventChan: ec,
shouldQuit: make(chan struct{}),
recvTimeout: recvTimeout,
pingInterval: recvTimeout / 2,
connectTimeout: 1 * time.Second,
sendChan: make(chan *request, sendChanSize),
requests: make(map[int32]*request),
watchers: make(map[watchPathType][]chan Event),
passwd: emptyPassword,
timeout: int32(sessionTimeout.Nanoseconds() / 1e6),
logger: DefaultLogger,
// Debug
reconnectDelay: 0,
}
go func() {
conn.loop()
conn.flushRequests(ErrClosing)
conn.invalidateWatches(ErrClosing)
close(conn.eventChan)
}()
return &conn, ec, nil
}
func (c *Conn) Close() {
close(c.shouldQuit)
select {
case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
case <-time.After(time.Second):
}
}
// States returns the current state of the connection.
func (c *Conn) State() State {
return State(atomic.LoadInt32((*int32)(&c.state)))
}
// SetLogger sets the logger to be used for printing errors.
// Logger is an interface provided by this package.
func (c *Conn) SetLogger(l Logger) {
c.logger = l
}
func (c *Conn) setState(state State) {
atomic.StoreInt32((*int32)(&c.state), int32(state))
select {
case c.eventChan <- Event{Type: EventSession, State: state, Server: c.servers[c.serverIndex]}:
default:
// panic("zk: event channel full - it must be monitored and never allowed to be full")
}
}
func (c *Conn) connect() error {
c.setState(StateConnecting)
for {
c.serverIndex = (c.serverIndex + 1) % len(c.servers)
if c.serverIndex == c.lastServerIndex {
c.flushUnsentRequests(ErrNoServer)
select {
case <-time.After(time.Second):
// pass
case <-c.shouldQuit:
c.setState(StateDisconnected)
c.flushUnsentRequests(ErrClosing)
return ErrClosing
}
} else if c.lastServerIndex < 0 {
// lastServerIndex defaults to -1 to avoid a delay on the initial connect
c.lastServerIndex = 0
}
zkConn, err := c.dialer("tcp", c.servers[c.serverIndex], c.connectTimeout)
if err == nil {
c.conn = zkConn
c.setState(StateConnected)
return nil
}
c.logger.Printf("Failed to connect to %s: %+v", c.servers[c.serverIndex], err)
}
}
func (c *Conn) loop() {
for {
if err := c.connect(); err != nil {
// c.Close() was called
return
}
err := c.authenticate()
switch {
case err == ErrSessionExpired:
c.invalidateWatches(err)
case err != nil && c.conn != nil:
c.conn.Close()
case err == nil:
c.lastServerIndex = c.serverIndex
closeChan := make(chan struct{}) // channel to tell send loop stop
var wg sync.WaitGroup
wg.Add(1)
go func() {
c.sendLoop(c.conn, closeChan)
c.conn.Close() // causes recv loop to EOF/exit
wg.Done()
}()
wg.Add(1)
go func() {
err = c.recvLoop(c.conn)
if err == nil {
panic("zk: recvLoop should never return nil error")
}
close(closeChan) // tell send loop to exit
wg.Done()
}()
wg.Wait()
}
c.setState(StateDisconnected)
// Yeesh
if err != io.EOF && err != ErrSessionExpired && !strings.Contains(err.Error(), "use of closed network connection") {
c.logger.Printf(err.Error())
}
select {
case <-c.shouldQuit:
c.flushRequests(ErrClosing)
return
default:
}
if err != ErrSessionExpired {
err = ErrConnectionClosed
}
c.flushRequests(err)
if c.reconnectDelay > 0 {
select {
case <-c.shouldQuit:
return
case <-time.After(c.reconnectDelay):
}
}
}
}
func (c *Conn) flushUnsentRequests(err error) {
for {
select {
default:
return
case req := <-c.sendChan:
req.recvChan <- response{-1, err}
}
}
}
// Send error to all pending requests and clear request map
func (c *Conn) flushRequests(err error) {
c.requestsLock.Lock()
for _, req := range c.requests {
req.recvChan <- response{-1, err}
}
c.requests = make(map[int32]*request)
c.requestsLock.Unlock()
}
// Send error to all watchers and clear watchers map
func (c *Conn) invalidateWatches(err error) {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
if len(c.watchers) >= 0 {
for pathType, watchers := range c.watchers {
ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err}
for _, ch := range watchers {
ch <- ev
close(ch)
}
}
c.watchers = make(map[watchPathType][]chan Event)
}
}
func (c *Conn) sendSetWatches() {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
if len(c.watchers) == 0 {
return
}
req := &setWatchesRequest{
RelativeZxid: c.lastZxid,
DataWatches: make([]string, 0),
ExistWatches: make([]string, 0),
ChildWatches: make([]string, 0),
}
n := 0
for pathType, watchers := range c.watchers {
if len(watchers) == 0 {
continue
}
switch pathType.wType {
case watchTypeData:
req.DataWatches = append(req.DataWatches, pathType.path)
case watchTypeExist:
req.ExistWatches = append(req.ExistWatches, pathType.path)
case watchTypeChild:
req.ChildWatches = append(req.ChildWatches, pathType.path)
}
n++
}
if n == 0 {
return
}
go func() {
res := &setWatchesResponse{}
_, err := c.request(opSetWatches, req, res, nil)
if err != nil {
c.logger.Printf("Failed to set previous watches: %s", err.Error())
}
}()
}
func (c *Conn) authenticate() error {
buf := make([]byte, 256)
// connect request
n, err := encodePacket(buf[4:], &connectRequest{
ProtocolVersion: protocolVersion,
LastZxidSeen: c.lastZxid,
TimeOut: c.timeout,
SessionID: c.sessionID,
Passwd: c.passwd,
})
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[:4], uint32(n))
c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = c.conn.Write(buf[:n+4])
c.conn.SetWriteDeadline(time.Time{})
if err != nil {
return err
}
c.sendSetWatches()
// connect response
// package length
c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10))
_, err = io.ReadFull(c.conn, buf[:4])
c.conn.SetReadDeadline(time.Time{})
if err != nil {
// Sometimes zookeeper just drops connection on invalid session data,
// we prefer to drop session and start from scratch when that event
// occurs instead of dropping into loop of connect/disconnect attempts
c.sessionID = 0
c.passwd = emptyPassword
c.lastZxid = 0
c.setState(StateExpired)
return ErrSessionExpired
}
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
buf = make([]byte, blen)
}
_, err = io.ReadFull(c.conn, buf[:blen])
if err != nil {
return err
}
r := connectResponse{}
_, err = decodePacket(buf[:blen], &r)
if err != nil {
return err
}
if r.SessionID == 0 {
c.sessionID = 0
c.passwd = emptyPassword
c.lastZxid = 0
c.setState(StateExpired)
return ErrSessionExpired
}
c.timeout = r.TimeOut
c.sessionID = r.SessionID
c.passwd = r.Passwd
c.setState(StateHasSession)
return nil
}
func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan struct{}) error {
pingTicker := time.NewTicker(c.pingInterval)
defer pingTicker.Stop()
buf := make([]byte, bufferSize)
for {
select {
case req := <-c.sendChan:
header := &requestHeader{req.xid, req.opcode}
n, err := encodePacket(buf[4:], header)
if err != nil {
req.recvChan <- response{-1, err}
continue
}
n2, err := encodePacket(buf[4+n:], req.pkt)
if err != nil {
req.recvChan <- response{-1, err}
continue
}
n += n2
binary.BigEndian.PutUint32(buf[:4], uint32(n))
c.requestsLock.Lock()
select {
case <-closeChan:
req.recvChan <- response{-1, ErrConnectionClosed}
c.requestsLock.Unlock()
return ErrConnectionClosed
default:
}
c.requests[req.xid] = req
c.requestsLock.Unlock()
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = conn.Write(buf[:n+4])
conn.SetWriteDeadline(time.Time{})
if err != nil {
req.recvChan <- response{-1, err}
conn.Close()
return err
}
case <-pingTicker.C:
n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing})
if err != nil {
panic("zk: opPing should never fail to serialize")
}
binary.BigEndian.PutUint32(buf[:4], uint32(n))
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
_, err = conn.Write(buf[:n+4])
conn.SetWriteDeadline(time.Time{})
if err != nil {
conn.Close()
return err
}
case <-closeChan:
return nil
}
}
}
func (c *Conn) recvLoop(conn net.Conn) error {
buf := make([]byte, bufferSize)
for {
// package length
conn.SetReadDeadline(time.Now().Add(c.recvTimeout))
_, err := io.ReadFull(conn, buf[:4])
if err != nil {
return err
}
blen := int(binary.BigEndian.Uint32(buf[:4]))
if cap(buf) < blen {
buf = make([]byte, blen)
}
_, err = io.ReadFull(conn, buf[:blen])
conn.SetReadDeadline(time.Time{})
if err != nil {
return err
}
res := responseHeader{}
_, err = decodePacket(buf[:16], &res)
if err != nil {
return err
}
if res.Xid == -1 {
res := &watcherEvent{}
_, err := decodePacket(buf[16:16+blen], res)
if err != nil {
return err
}
ev := Event{
Type: res.Type,
State: res.State,
Path: res.Path,
Err: nil,
}
select {
case c.eventChan <- ev:
default:
}
wTypes := make([]watchType, 0, 2)
switch res.Type {
case EventNodeCreated:
wTypes = append(wTypes, watchTypeExist)
case EventNodeDeleted, EventNodeDataChanged:
wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild)
case EventNodeChildrenChanged:
wTypes = append(wTypes, watchTypeChild)
}
c.watchersLock.Lock()
for _, t := range wTypes {
wpt := watchPathType{res.Path, t}
if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
for _, ch := range watchers {
ch <- ev
close(ch)
}
delete(c.watchers, wpt)
}
}
c.watchersLock.Unlock()
} else if res.Xid == -2 {
// Ping response. Ignore.
} else if res.Xid < 0 {
c.logger.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid)
} else {
if res.Zxid > 0 {
c.lastZxid = res.Zxid
}
c.requestsLock.Lock()
req, ok := c.requests[res.Xid]
if ok {
delete(c.requests, res.Xid)
}
c.requestsLock.Unlock()
if !ok {
c.logger.Printf("Response for unknown request with xid %d", res.Xid)
} else {
if res.Err != 0 {
err = res.Err.toError()
} else {
_, err = decodePacket(buf[16:16+blen], req.recvStruct)
}
if req.recvFunc != nil {
req.recvFunc(req, &res, err)
}
req.recvChan <- response{res.Zxid, err}
if req.opcode == opClose {
return io.EOF
}
}
}
}
}
func (c *Conn) nextXid() int32 {
return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff)
}
func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event {
c.watchersLock.Lock()
defer c.watchersLock.Unlock()
ch := make(chan Event, 1)
wpt := watchPathType{path, watchType}
c.watchers[wpt] = append(c.watchers[wpt], ch)
return ch
}
func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response {
rq := &request{
xid: c.nextXid(),
opcode: opcode,
pkt: req,
recvStruct: res,
recvChan: make(chan response, 1),
recvFunc: recvFunc,
}
c.sendChan <- rq
return rq.recvChan
}
func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
r := <-c.queueRequest(opcode, req, res, recvFunc)
return r.zxid, r.err
}
func (c *Conn) AddAuth(scheme string, auth []byte) error {
_, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil)
return err
}
func (c *Conn) Children(path string) ([]string, *Stat, error) {
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
return res.Children, &res.Stat, err
}
func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &getChildren2Response{}
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeChild)
}
})
if err != nil {
return nil, nil, nil, err
}
return res.Children, &res.Stat, ech, err
}
func (c *Conn) Get(path string) ([]byte, *Stat, error) {
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
return res.Data, &res.Stat, err
}
// GetW returns the contents of a znode and sets a watch
func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &getDataResponse{}
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeData)
}
})
if err != nil {
return nil, nil, nil, err
}
return res.Data, &res.Stat, ech, err
}
func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
if path == "" {
return nil, ErrInvalidPath
}
res := &setDataResponse{}
_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
return &res.Stat, err
}
func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) {
res := &createResponse{}
_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
return res.Path, err
}
// CreateProtectedEphemeralSequential fixes a race condition if the server crashes
// after it creates the node. On reconnect the session may still be valid so the
// ephemeral node still exists. Therefore, on reconnect we need to check if a node
// with a GUID generated on create exists.
func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) {
var guid [16]byte
_, err := io.ReadFull(rand.Reader, guid[:16])
if err != nil {
return "", err
}
guidStr := fmt.Sprintf("%x", guid)
parts := strings.Split(path, "/")
parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1])
rootPath := strings.Join(parts[:len(parts)-1], "/")
protectedPath := strings.Join(parts, "/")
var newPath string
for i := 0; i < 3; i++ {
newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl)
switch err {
case ErrSessionExpired:
// No need to search for the node since it can't exist. Just try again.
case ErrConnectionClosed:
children, _, err := c.Children(rootPath)
if err != nil {
return "", err
}
for _, p := range children {
parts := strings.Split(p, "/")
if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) {
if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr {
return rootPath + "/" + p, nil
}
}
}
case nil:
return newPath, nil
default:
return "", err
}
}
return "", err
}
func (c *Conn) Delete(path string, version int32) error {
_, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil)
return err
}
func (c *Conn) Exists(path string) (bool, *Stat, error) {
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
exists := true
if err == ErrNoNode {
exists = false
err = nil
}
return exists, &res.Stat, err
}
func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) {
var ech <-chan Event
res := &existsResponse{}
_, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
if err == nil {
ech = c.addWatcher(path, watchTypeData)
} else if err == ErrNoNode {
ech = c.addWatcher(path, watchTypeExist)
}
})
exists := true
if err == ErrNoNode {
exists = false
err = nil
}
if err != nil {
return false, nil, nil, err
}
return exists, &res.Stat, ech, err
}
func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
res := &getAclResponse{}
_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
return res.Acl, &res.Stat, err
}
func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
res := &setAclResponse{}
_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
return &res.Stat, err
}
func (c *Conn) Sync(path string) (string, error) {
res := &syncResponse{}
_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
return res.Path, err
}
type MultiResponse struct {
Stat *Stat
String string
}
// Multi executes multiple ZooKeeper operations or none of them. The provided
// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or
// *CheckVersionRequest.
func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
req := &multiRequest{
Ops: make([]multiRequestOp, 0, len(ops)),
DoneHeader: multiHeader{Type: -1, Done: true, Err: -1},
}
for _, op := range ops {
var opCode int32
switch op.(type) {
case *CreateRequest:
opCode = opCreate
case *SetDataRequest:
opCode = opSetData
case *DeleteRequest:
opCode = opDelete
case *CheckVersionRequest:
opCode = opCheck
default:
return nil, fmt.Errorf("uknown operation type %T", op)
}
req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op})
}
res := &multiResponse{}
_, err := c.request(opMulti, req, res, nil)
mr := make([]MultiResponse, len(res.Ops))
for i, op := range res.Ops {
mr[i] = MultiResponse{Stat: op.Stat, String: op.String}
}
return mr, err
}

View File

@ -1,240 +0,0 @@
package zk
import (
"errors"
)
const (
protocolVersion = 0
DefaultPort = 2181
)
const (
opNotify = 0
opCreate = 1
opDelete = 2
opExists = 3
opGetData = 4
opSetData = 5
opGetAcl = 6
opSetAcl = 7
opGetChildren = 8
opSync = 9
opPing = 11
opGetChildren2 = 12
opCheck = 13
opMulti = 14
opClose = -11
opSetAuth = 100
opSetWatches = 101
// Not in protocol, used internally
opWatcherEvent = -2
)
const (
EventNodeCreated = EventType(1)
EventNodeDeleted = EventType(2)
EventNodeDataChanged = EventType(3)
EventNodeChildrenChanged = EventType(4)
EventSession = EventType(-1)
EventNotWatching = EventType(-2)
)
var (
eventNames = map[EventType]string{
EventNodeCreated: "EventNodeCreated",
EventNodeDeleted: "EventNodeDeleted",
EventNodeDataChanged: "EventNodeDataChanged",
EventNodeChildrenChanged: "EventNodeChildrenChanged",
EventSession: "EventSession",
EventNotWatching: "EventNotWatching",
}
)
const (
StateUnknown = State(-1)
StateDisconnected = State(0)
StateConnecting = State(1)
StateAuthFailed = State(4)
StateConnectedReadOnly = State(5)
StateSaslAuthenticated = State(6)
StateExpired = State(-112)
// StateAuthFailed = State(-113)
StateConnected = State(100)
StateHasSession = State(101)
)
const (
FlagEphemeral = 1
FlagSequence = 2
)
var (
stateNames = map[State]string{
StateUnknown: "StateUnknown",
StateDisconnected: "StateDisconnected",
StateConnectedReadOnly: "StateConnectedReadOnly",
StateSaslAuthenticated: "StateSaslAuthenticated",
StateExpired: "StateExpired",
StateAuthFailed: "StateAuthFailed",
StateConnecting: "StateConnecting",
StateConnected: "StateConnected",
StateHasSession: "StateHasSession",
}
)
type State int32
func (s State) String() string {
if name := stateNames[s]; name != "" {
return name
}
return "Unknown"
}
type ErrCode int32
var (
ErrConnectionClosed = errors.New("zk: connection closed")
ErrUnknown = errors.New("zk: unknown error")
ErrAPIError = errors.New("zk: api error")
ErrNoNode = errors.New("zk: node does not exist")
ErrNoAuth = errors.New("zk: not authenticated")
ErrBadVersion = errors.New("zk: version conflict")
ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children")
ErrNodeExists = errors.New("zk: node already exists")
ErrNotEmpty = errors.New("zk: node has children")
ErrSessionExpired = errors.New("zk: session has been expired by the server")
ErrInvalidACL = errors.New("zk: invalid ACL specified")
ErrAuthFailed = errors.New("zk: client authentication failed")
ErrClosing = errors.New("zk: zookeeper is closing")
ErrNothing = errors.New("zk: no server responsees to process")
ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored")
// ErrInvalidCallback = errors.New("zk: invalid callback specified")
errCodeToError = map[ErrCode]error{
0: nil,
errAPIError: ErrAPIError,
errNoNode: ErrNoNode,
errNoAuth: ErrNoAuth,
errBadVersion: ErrBadVersion,
errNoChildrenForEphemerals: ErrNoChildrenForEphemerals,
errNodeExists: ErrNodeExists,
errNotEmpty: ErrNotEmpty,
errSessionExpired: ErrSessionExpired,
// errInvalidCallback: ErrInvalidCallback,
errInvalidAcl: ErrInvalidACL,
errAuthFailed: ErrAuthFailed,
errClosing: ErrClosing,
errNothing: ErrNothing,
errSessionMoved: ErrSessionMoved,
}
)
func (e ErrCode) toError() error {
if err, ok := errCodeToError[e]; ok {
return err
}
return ErrUnknown
}
const (
errOk = 0
// System and server-side errors
errSystemError = -1
errRuntimeInconsistency = -2
errDataInconsistency = -3
errConnectionLoss = -4
errMarshallingError = -5
errUnimplemented = -6
errOperationTimeout = -7
errBadArguments = -8
errInvalidState = -9
// API errors
errAPIError = ErrCode(-100)
errNoNode = ErrCode(-101) // *
errNoAuth = ErrCode(-102)
errBadVersion = ErrCode(-103) // *
errNoChildrenForEphemerals = ErrCode(-108)
errNodeExists = ErrCode(-110) // *
errNotEmpty = ErrCode(-111)
errSessionExpired = ErrCode(-112)
errInvalidCallback = ErrCode(-113)
errInvalidAcl = ErrCode(-114)
errAuthFailed = ErrCode(-115)
errClosing = ErrCode(-116)
errNothing = ErrCode(-117)
errSessionMoved = ErrCode(-118)
)
// Constants for ACL permissions
const (
PermRead = 1 << iota
PermWrite
PermCreate
PermDelete
PermAdmin
PermAll = 0x1f
)
var (
emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
opNames = map[int32]string{
opNotify: "notify",
opCreate: "create",
opDelete: "delete",
opExists: "exists",
opGetData: "getData",
opSetData: "setData",
opGetAcl: "getACL",
opSetAcl: "setACL",
opGetChildren: "getChildren",
opSync: "sync",
opPing: "ping",
opGetChildren2: "getChildren2",
opCheck: "check",
opMulti: "multi",
opClose: "close",
opSetAuth: "setAuth",
opSetWatches: "setWatches",
opWatcherEvent: "watcherEvent",
}
)
type EventType int32
func (t EventType) String() string {
if name := eventNames[t]; name != "" {
return name
}
return "Unknown"
}
// Mode is used to build custom server modes (leader|follower|standalone).
type Mode uint8
func (m Mode) String() string {
if name := modeNames[m]; name != "" {
return name
}
return "unknown"
}
const (
ModeUnknown Mode = iota
ModeLeader Mode = iota
ModeFollower Mode = iota
ModeStandalone Mode = iota
)
var (
modeNames = map[Mode]string{
ModeLeader: "leader",
ModeFollower: "follower",
ModeStandalone: "standalone",
}
)

View File

@ -1,290 +0,0 @@
package zk
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"math/big"
"net"
"regexp"
"strconv"
"time"
)
// FLWSrvr is a FourLetterWord helper function. In particular, this function pulls the srvr output
// from the zookeeper instances and parses the output. A slice of *ServerStats structs are returned
// as well as a boolean value to indicate whether this function processed successfully.
//
// If the boolean value is false there was a problem. If the *ServerStats slice is empty or nil,
// then the error happened before we started to obtain 'srvr' values. Otherwise, one of the
// servers had an issue and the "Error" value in the struct should be inspected to determine
// which server had the issue.
func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
// different parts of the regular expression that are required to parse the srvr output
var (
zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)`
zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)`
zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)`
zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)`
)
// build the regex from the pieces above
re, err := regexp.Compile(fmt.Sprintf(`(?m:\A%v.*\n%v.*\n%v.*\n%v)`, zrVer, zrLat, zrNet, zrState))
if err != nil {
return nil, false
}
imOk := true
servers = FormatServers(servers)
ss := make([]*ServerStats, len(servers))
for i := range ss {
response, err := fourLetterWord(servers[i], "srvr", timeout)
if err != nil {
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
matches := re.FindAllStringSubmatch(string(response), -1)
if matches == nil {
err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
match := matches[0][1:]
// determine current server
var srvrMode Mode
switch match[10] {
case "leader":
srvrMode = ModeLeader
case "follower":
srvrMode = ModeFollower
case "standalone":
srvrMode = ModeStandalone
default:
srvrMode = ModeUnknown
}
buildTime, err := time.Parse("01/02/2006 15:04 MST", match[1])
if err != nil {
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
parsedInt, err := strconv.ParseInt(match[9], 0, 64)
if err != nil {
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
// the ZxID value is an int64 with two int32s packed inside
// the high int32 is the epoch (i.e., number of leader elections)
// the low int32 is the counter
epoch := int32(parsedInt >> 32)
counter := int32(parsedInt & 0xFFFFFFFF)
// within the regex above, these values must be numerical
// so we can avoid useless checking of the error return value
minLatency, _ := strconv.ParseInt(match[2], 0, 64)
avgLatency, _ := strconv.ParseInt(match[3], 0, 64)
maxLatency, _ := strconv.ParseInt(match[4], 0, 64)
recv, _ := strconv.ParseInt(match[5], 0, 64)
sent, _ := strconv.ParseInt(match[6], 0, 64)
cons, _ := strconv.ParseInt(match[7], 0, 64)
outs, _ := strconv.ParseInt(match[8], 0, 64)
ncnt, _ := strconv.ParseInt(match[11], 0, 64)
ss[i] = &ServerStats{
Sent: sent,
Received: recv,
NodeCount: ncnt,
MinLatency: minLatency,
AvgLatency: avgLatency,
MaxLatency: maxLatency,
Connections: cons,
Outstanding: outs,
Epoch: epoch,
Counter: counter,
BuildTime: buildTime,
Mode: srvrMode,
Version: match[0],
}
}
return ss, imOk
}
// FLWRuok is a FourLetterWord helper function. In particular, this function
// pulls the ruok output from each server.
func FLWRuok(servers []string, timeout time.Duration) []bool {
servers = FormatServers(servers)
oks := make([]bool, len(servers))
for i := range oks {
response, err := fourLetterWord(servers[i], "ruok", timeout)
if err != nil {
continue
}
if bytes.Equal(response[:4], []byte("imok")) {
oks[i] = true
}
}
return oks
}
// FLWCons is a FourLetterWord helper function. In particular, this function
// pulls the ruok output from each server.
//
// As with FLWSrvr, the boolean value indicates whether one of the requests had
// an issue. The Clients struct has an Error value that can be checked.
func FLWCons(servers []string, timeout time.Duration) ([]*ServerClients, bool) {
var (
zrAddr = `^ /((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):(?:\d+))\[\d+\]`
zrPac = `\(queued=(\d+),recved=(\d+),sent=(\d+),sid=(0x[A-Za-z0-9]+),lop=(\w+),est=(\d+),to=(\d+),`
zrSesh = `lcxid=(0x[A-Za-z0-9]+),lzxid=(0x[A-Za-z0-9]+),lresp=(\d+),llat=(\d+),minlat=(\d+),avglat=(\d+),maxlat=(\d+)\)`
)
re, err := regexp.Compile(fmt.Sprintf("%v%v%v", zrAddr, zrPac, zrSesh))
if err != nil {
return nil, false
}
servers = FormatServers(servers)
sc := make([]*ServerClients, len(servers))
imOk := true
for i := range sc {
response, err := fourLetterWord(servers[i], "cons", timeout)
if err != nil {
sc[i] = &ServerClients{Error: err}
imOk = false
continue
}
scan := bufio.NewScanner(bytes.NewReader(response))
var clients []*ServerClient
for scan.Scan() {
line := scan.Bytes()
if len(line) == 0 {
continue
}
m := re.FindAllStringSubmatch(string(line), -1)
if m == nil {
err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
sc[i] = &ServerClients{Error: err}
imOk = false
continue
}
match := m[0][1:]
queued, _ := strconv.ParseInt(match[1], 0, 64)
recvd, _ := strconv.ParseInt(match[2], 0, 64)
sent, _ := strconv.ParseInt(match[3], 0, 64)
sid, _ := strconv.ParseInt(match[4], 0, 64)
est, _ := strconv.ParseInt(match[6], 0, 64)
timeout, _ := strconv.ParseInt(match[7], 0, 32)
lresp, _ := strconv.ParseInt(match[10], 0, 64)
llat, _ := strconv.ParseInt(match[11], 0, 32)
minlat, _ := strconv.ParseInt(match[12], 0, 32)
avglat, _ := strconv.ParseInt(match[13], 0, 32)
maxlat, _ := strconv.ParseInt(match[14], 0, 32)
// zookeeper returns a value, '0xffffffffffffffff', as the
// Lzxid for PING requests in the 'cons' output.
// unfortunately, in Go that is an invalid int64 and is not represented
// as -1.
// However, converting the string value to a big.Int and then back to
// and int64 properly sets the value to -1
lzxid, ok := new(big.Int).SetString(match[9], 0)
var errVal error
if !ok {
errVal = fmt.Errorf("failed to convert lzxid value to big.Int")
imOk = false
}
lcxid, ok := new(big.Int).SetString(match[8], 0)
if !ok && errVal == nil {
errVal = fmt.Errorf("failed to convert lcxid value to big.Int")
imOk = false
}
clients = append(clients, &ServerClient{
Queued: queued,
Received: recvd,
Sent: sent,
SessionID: sid,
Lcxid: lcxid.Int64(),
Lzxid: lzxid.Int64(),
Timeout: int32(timeout),
LastLatency: int32(llat),
MinLatency: int32(minlat),
AvgLatency: int32(avglat),
MaxLatency: int32(maxlat),
Established: time.Unix(est, 0),
LastResponse: time.Unix(lresp, 0),
Addr: match[0],
LastOperation: match[5],
Error: errVal,
})
}
sc[i] = &ServerClients{Clients: clients}
}
return sc, imOk
}
func fourLetterWord(server, command string, timeout time.Duration) ([]byte, error) {
conn, err := net.DialTimeout("tcp", server, timeout)
if err != nil {
return nil, err
}
// the zookeeper server should automatically close this socket
// once the command has been processed, but better safe than sorry
defer conn.Close()
conn.SetWriteDeadline(time.Now().Add(timeout))
_, err = conn.Write([]byte(command))
if err != nil {
return nil, err
}
conn.SetReadDeadline(time.Now().Add(timeout))
resp, err := ioutil.ReadAll(conn)
if err != nil {
return nil, err
}
return resp, nil
}

View File

@ -1,142 +0,0 @@
package zk
import (
"errors"
"fmt"
"strconv"
"strings"
)
var (
// ErrDeadlock is returned by Lock when trying to lock twice without unlocking first
ErrDeadlock = errors.New("zk: trying to acquire a lock twice")
// ErrNotLocked is returned by Unlock when trying to release a lock that has not first be acquired.
ErrNotLocked = errors.New("zk: not locked")
)
// Lock is a mutual exclusion lock.
type Lock struct {
c *Conn
path string
acl []ACL
lockPath string
seq int
}
// NewLock creates a new lock instance using the provided connection, path, and acl.
// The path must be a node that is only used by this lock. A lock instances starts
// unlocked until Lock() is called.
func NewLock(c *Conn, path string, acl []ACL) *Lock {
return &Lock{
c: c,
path: path,
acl: acl,
}
}
func parseSeq(path string) (int, error) {
parts := strings.Split(path, "-")
return strconv.Atoi(parts[len(parts)-1])
}
// Lock attempts to acquire the lock. It will wait to return until the lock
// is acquired or an error occurs. If this instance already has the lock
// then ErrDeadlock is returned.
func (l *Lock) Lock() error {
if l.lockPath != "" {
return ErrDeadlock
}
prefix := fmt.Sprintf("%s/lock-", l.path)
path := ""
var err error
for i := 0; i < 3; i++ {
path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl)
if err == ErrNoNode {
// Create parent node.
parts := strings.Split(l.path, "/")
pth := ""
for _, p := range parts[1:] {
pth += "/" + p
_, err := l.c.Create(pth, []byte{}, 0, l.acl)
if err != nil && err != ErrNodeExists {
return err
}
}
} else if err == nil {
break
} else {
return err
}
}
if err != nil {
return err
}
seq, err := parseSeq(path)
if err != nil {
return err
}
for {
children, _, err := l.c.Children(l.path)
if err != nil {
return err
}
lowestSeq := seq
prevSeq := 0
prevSeqPath := ""
for _, p := range children {
s, err := parseSeq(p)
if err != nil {
return err
}
if s < lowestSeq {
lowestSeq = s
}
if s < seq && s > prevSeq {
prevSeq = s
prevSeqPath = p
}
}
if seq == lowestSeq {
// Acquired the lock
break
}
// Wait on the node next in line for the lock
_, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath)
if err != nil && err != ErrNoNode {
return err
} else if err != nil && err == ErrNoNode {
// try again
continue
}
ev := <-ch
if ev.Err != nil {
return ev.Err
}
}
l.seq = seq
l.lockPath = path
return nil
}
// Unlock releases an acquired lock. If the lock is not currently acquired by
// this Lock instance than ErrNotLocked is returned.
func (l *Lock) Unlock() error {
if l.lockPath == "" {
return ErrNotLocked
}
if err := l.c.Delete(l.lockPath, -1); err != nil {
return err
}
l.lockPath = ""
l.seq = 0
return nil
}

View File

@ -1,119 +0,0 @@
package zk
import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"time"
)
type TestServer struct {
Port int
Path string
Srv *Server
}
type TestCluster struct {
Path string
Servers []TestServer
}
func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) {
tmpPath, err := ioutil.TempDir("", "gozk")
if err != nil {
return nil, err
}
success := false
startPort := int(rand.Int31n(6000) + 10000)
cluster := &TestCluster{Path: tmpPath}
defer func() {
if !success {
cluster.Stop()
}
}()
for serverN := 0; serverN < size; serverN++ {
srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN))
if err := os.Mkdir(srvPath, 0700); err != nil {
return nil, err
}
port := startPort + serverN*3
cfg := ServerConfig{
ClientPort: port,
DataDir: srvPath,
}
for i := 0; i < size; i++ {
cfg.Servers = append(cfg.Servers, ServerConfigServer{
ID: i + 1,
Host: "127.0.0.1",
PeerPort: startPort + i*3 + 1,
LeaderElectionPort: startPort + i*3 + 2,
})
}
cfgPath := filepath.Join(srvPath, "zoo.cfg")
fi, err := os.Create(cfgPath)
if err != nil {
return nil, err
}
err = cfg.Marshall(fi)
fi.Close()
if err != nil {
return nil, err
}
fi, err = os.Create(filepath.Join(srvPath, "myid"))
if err != nil {
return nil, err
}
_, err = fmt.Fprintf(fi, "%d\n", serverN+1)
fi.Close()
if err != nil {
return nil, err
}
srv := &Server{
ConfigPath: cfgPath,
Stdout: stdout,
Stderr: stderr,
}
if err := srv.Start(); err != nil {
return nil, err
}
cluster.Servers = append(cluster.Servers, TestServer{
Path: srvPath,
Port: cfg.ClientPort,
Srv: srv,
})
}
success = true
time.Sleep(time.Second) // Give the server time to become active. Should probably actually attempt to connect to verify.
return cluster, nil
}
func (ts *TestCluster) Connect(idx int) (*Conn, error) {
zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", ts.Servers[idx].Port)}, time.Second*15)
return zk, err
}
func (ts *TestCluster) ConnectAll() (*Conn, <-chan Event, error) {
return ts.ConnectAllTimeout(time.Second * 15)
}
func (ts *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) {
hosts := make([]string, len(ts.Servers))
for i, srv := range ts.Servers {
hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port)
}
zk, ch, err := Connect(hosts, sessionTimeout)
return zk, ch, err
}
func (ts *TestCluster) Stop() error {
for _, srv := range ts.Servers {
srv.Srv.Stop()
}
defer os.RemoveAll(ts.Path)
return nil
}

View File

@ -1,136 +0,0 @@
package zk
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
)
type ErrMissingServerConfigField string
func (e ErrMissingServerConfigField) Error() string {
return fmt.Sprintf("zk: missing server config field '%s'", string(e))
}
const (
DefaultServerTickTime = 2000
DefaultServerInitLimit = 10
DefaultServerSyncLimit = 5
DefaultServerAutoPurgeSnapRetainCount = 3
DefaultPeerPort = 2888
DefaultLeaderElectionPort = 3888
)
type ServerConfigServer struct {
ID int
Host string
PeerPort int
LeaderElectionPort int
}
type ServerConfig struct {
TickTime int // Number of milliseconds of each tick
InitLimit int // Number of ticks that the initial synchronization phase can take
SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement
DataDir string // Direcrory where the snapshot is stored
ClientPort int // Port at which clients will connect
AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir
AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge)
Servers []ServerConfigServer
}
func (sc ServerConfig) Marshall(w io.Writer) error {
if sc.DataDir == "" {
return ErrMissingServerConfigField("dataDir")
}
fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir)
if sc.TickTime <= 0 {
sc.TickTime = DefaultServerTickTime
}
fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime)
if sc.InitLimit <= 0 {
sc.InitLimit = DefaultServerInitLimit
}
fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit)
if sc.SyncLimit <= 0 {
sc.SyncLimit = DefaultServerSyncLimit
}
fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit)
if sc.ClientPort <= 0 {
sc.ClientPort = DefaultPort
}
fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort)
if sc.AutoPurgePurgeInterval > 0 {
if sc.AutoPurgeSnapRetainCount <= 0 {
sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount
}
fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount)
fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval)
}
if len(sc.Servers) > 0 {
for _, srv := range sc.Servers {
if srv.PeerPort <= 0 {
srv.PeerPort = DefaultPeerPort
}
if srv.LeaderElectionPort <= 0 {
srv.LeaderElectionPort = DefaultLeaderElectionPort
}
fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort)
}
}
return nil
}
var jarSearchPaths = []string{
"zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
"../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
"/usr/share/java/zookeeper-*.jar",
"/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
"/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar",
}
func findZookeeperFatJar() string {
var paths []string
zkPath := os.Getenv("ZOOKEEPER_PATH")
if zkPath == "" {
paths = jarSearchPaths
} else {
paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")}
}
for _, path := range paths {
matches, _ := filepath.Glob(path)
// TODO: could sort by version and pick latest
if len(matches) > 0 {
return matches[0]
}
}
return ""
}
type Server struct {
JarPath string
ConfigPath string
Stdout, Stderr io.Writer
cmd *exec.Cmd
}
func (srv *Server) Start() error {
if srv.JarPath == "" {
srv.JarPath = findZookeeperFatJar()
if srv.JarPath == "" {
return fmt.Errorf("zk: unable to find server jar")
}
}
srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath)
srv.cmd.Stdout = srv.Stdout
srv.cmd.Stderr = srv.Stderr
return srv.cmd.Start()
}
func (srv *Server) Stop() error {
srv.cmd.Process.Signal(os.Kill)
return srv.cmd.Wait()
}

View File

@ -1,640 +0,0 @@
package zk
import (
"encoding/binary"
"errors"
"log"
"reflect"
"runtime"
"time"
)
var (
ErrUnhandledFieldType = errors.New("zk: unhandled field type")
ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct")
ErrShortBuffer = errors.New("zk: buffer too small")
)
type defaultLogger struct{}
func (defaultLogger) Printf(format string, a ...interface{}) {
log.Printf(format, a...)
}
type ACL struct {
Perms int32
Scheme string
ID string
}
type Stat struct {
Czxid int64 // The zxid of the change that caused this znode to be created.
Mzxid int64 // The zxid of the change that last modified this znode.
Ctime int64 // The time in milliseconds from epoch when this znode was created.
Mtime int64 // The time in milliseconds from epoch when this znode was last modified.
Version int32 // The number of changes to the data of this znode.
Cversion int32 // The number of changes to the children of this znode.
Aversion int32 // The number of changes to the ACL of this znode.
EphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero.
DataLength int32 // The length of the data field of this znode.
NumChildren int32 // The number of children of this znode.
Pzxid int64 // last modified children
}
// ServerClient is the information for a single Zookeeper client and its session.
// This is used to parse/extract the output fo the `cons` command.
type ServerClient struct {
Queued int64
Received int64
Sent int64
SessionID int64
Lcxid int64
Lzxid int64
Timeout int32
LastLatency int32
MinLatency int32
AvgLatency int32
MaxLatency int32
Established time.Time
LastResponse time.Time
Addr string
LastOperation string // maybe?
Error error
}
// ServerClients is a struct for the FLWCons() function. It's used to provide
// the list of Clients.
//
// This is needed because FLWCons() takes multiple servers.
type ServerClients struct {
Clients []*ServerClient
Error error
}
// ServerStats is the information pulled from the Zookeeper `stat` command.
type ServerStats struct {
Sent int64
Received int64
NodeCount int64
MinLatency int64
AvgLatency int64
MaxLatency int64
Connections int64
Outstanding int64
Epoch int32
Counter int32
BuildTime time.Time
Mode Mode
Version string
Error error
}
type requestHeader struct {
Xid int32
Opcode int32
}
type responseHeader struct {
Xid int32
Zxid int64
Err ErrCode
}
type multiHeader struct {
Type int32
Done bool
Err ErrCode
}
type auth struct {
Type int32
Scheme string
Auth []byte
}
// Generic request structs
type pathRequest struct {
Path string
}
type PathVersionRequest struct {
Path string
Version int32
}
type pathWatchRequest struct {
Path string
Watch bool
}
type pathResponse struct {
Path string
}
type statResponse struct {
Stat Stat
}
//
type CheckVersionRequest PathVersionRequest
type closeRequest struct{}
type closeResponse struct{}
type connectRequest struct {
ProtocolVersion int32
LastZxidSeen int64
TimeOut int32
SessionID int64
Passwd []byte
}
type connectResponse struct {
ProtocolVersion int32
TimeOut int32
SessionID int64
Passwd []byte
}
type CreateRequest struct {
Path string
Data []byte
Acl []ACL
Flags int32
}
type createResponse pathResponse
type DeleteRequest PathVersionRequest
type deleteResponse struct{}
type errorResponse struct {
Err int32
}
type existsRequest pathWatchRequest
type existsResponse statResponse
type getAclRequest pathRequest
type getAclResponse struct {
Acl []ACL
Stat Stat
}
type getChildrenRequest pathRequest
type getChildrenResponse struct {
Children []string
}
type getChildren2Request pathWatchRequest
type getChildren2Response struct {
Children []string
Stat Stat
}
type getDataRequest pathWatchRequest
type getDataResponse struct {
Data []byte
Stat Stat
}
type getMaxChildrenRequest pathRequest
type getMaxChildrenResponse struct {
Max int32
}
type getSaslRequest struct {
Token []byte
}
type pingRequest struct{}
type pingResponse struct{}
type setAclRequest struct {
Path string
Acl []ACL
Version int32
}
type setAclResponse statResponse
type SetDataRequest struct {
Path string
Data []byte
Version int32
}
type setDataResponse statResponse
type setMaxChildren struct {
Path string
Max int32
}
type setSaslRequest struct {
Token string
}
type setSaslResponse struct {
Token string
}
type setWatchesRequest struct {
RelativeZxid int64
DataWatches []string
ExistWatches []string
ChildWatches []string
}
type setWatchesResponse struct{}
type syncRequest pathRequest
type syncResponse pathResponse
type setAuthRequest auth
type setAuthResponse struct{}
type multiRequestOp struct {
Header multiHeader
Op interface{}
}
type multiRequest struct {
Ops []multiRequestOp
DoneHeader multiHeader
}
type multiResponseOp struct {
Header multiHeader
String string
Stat *Stat
}
type multiResponse struct {
Ops []multiResponseOp
DoneHeader multiHeader
}
func (r *multiRequest) Encode(buf []byte) (int, error) {
total := 0
for _, op := range r.Ops {
op.Header.Done = false
n, err := encodePacketValue(buf[total:], reflect.ValueOf(op))
if err != nil {
return total, err
}
total += n
}
r.DoneHeader.Done = true
n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader))
if err != nil {
return total, err
}
total += n
return total, nil
}
func (r *multiRequest) Decode(buf []byte) (int, error) {
r.Ops = make([]multiRequestOp, 0)
r.DoneHeader = multiHeader{-1, true, -1}
total := 0
for {
header := &multiHeader{}
n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
if err != nil {
return total, err
}
total += n
if header.Done {
r.DoneHeader = *header
break
}
req := requestStructForOp(header.Type)
if req == nil {
return total, ErrAPIError
}
n, err = decodePacketValue(buf[total:], reflect.ValueOf(req))
if err != nil {
return total, err
}
total += n
r.Ops = append(r.Ops, multiRequestOp{*header, req})
}
return total, nil
}
func (r *multiResponse) Decode(buf []byte) (int, error) {
r.Ops = make([]multiResponseOp, 0)
r.DoneHeader = multiHeader{-1, true, -1}
total := 0
for {
header := &multiHeader{}
n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
if err != nil {
return total, err
}
total += n
if header.Done {
r.DoneHeader = *header
break
}
res := multiResponseOp{Header: *header}
var w reflect.Value
switch header.Type {
default:
return total, ErrAPIError
case opCreate:
w = reflect.ValueOf(&res.String)
case opSetData:
res.Stat = new(Stat)
w = reflect.ValueOf(res.Stat)
case opCheck, opDelete:
}
if w.IsValid() {
n, err := decodePacketValue(buf[total:], w)
if err != nil {
return total, err
}
total += n
}
r.Ops = append(r.Ops, res)
}
return total, nil
}
type watcherEvent struct {
Type EventType
State State
Path string
}
type decoder interface {
Decode(buf []byte) (int, error)
}
type encoder interface {
Encode(buf []byte) (int, error)
}
func decodePacket(buf []byte, st interface{}) (n int, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
err = ErrShortBuffer
} else {
panic(r)
}
}
}()
v := reflect.ValueOf(st)
if v.Kind() != reflect.Ptr || v.IsNil() {
return 0, ErrPtrExpected
}
return decodePacketValue(buf, v)
}
func decodePacketValue(buf []byte, v reflect.Value) (int, error) {
rv := v
kind := v.Kind()
if kind == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
kind = v.Kind()
}
n := 0
switch kind {
default:
return n, ErrUnhandledFieldType
case reflect.Struct:
if de, ok := rv.Interface().(decoder); ok {
return de.Decode(buf)
} else if de, ok := v.Interface().(decoder); ok {
return de.Decode(buf)
} else {
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
n2, err := decodePacketValue(buf[n:], field)
n += n2
if err != nil {
return n, err
}
}
}
case reflect.Bool:
v.SetBool(buf[n] != 0)
n++
case reflect.Int32:
v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4])))
n += 4
case reflect.Int64:
v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8])))
n += 8
case reflect.String:
ln := int(binary.BigEndian.Uint32(buf[n : n+4]))
v.SetString(string(buf[n+4 : n+4+ln]))
n += 4 + ln
case reflect.Slice:
switch v.Type().Elem().Kind() {
default:
count := int(binary.BigEndian.Uint32(buf[n : n+4]))
n += 4
values := reflect.MakeSlice(v.Type(), count, count)
v.Set(values)
for i := 0; i < count; i++ {
n2, err := decodePacketValue(buf[n:], values.Index(i))
n += n2
if err != nil {
return n, err
}
}
case reflect.Uint8:
ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4])))
if ln < 0 {
n += 4
v.SetBytes(nil)
} else {
bytes := make([]byte, ln)
copy(bytes, buf[n+4:n+4+ln])
v.SetBytes(bytes)
n += 4 + ln
}
}
}
return n, nil
}
func encodePacket(buf []byte, st interface{}) (n int, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
err = ErrShortBuffer
} else {
panic(r)
}
}
}()
v := reflect.ValueOf(st)
if v.Kind() != reflect.Ptr || v.IsNil() {
return 0, ErrPtrExpected
}
return encodePacketValue(buf, v)
}
func encodePacketValue(buf []byte, v reflect.Value) (int, error) {
rv := v
for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
v = v.Elem()
}
n := 0
switch v.Kind() {
default:
return n, ErrUnhandledFieldType
case reflect.Struct:
if en, ok := rv.Interface().(encoder); ok {
return en.Encode(buf)
} else if en, ok := v.Interface().(encoder); ok {
return en.Encode(buf)
} else {
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
n2, err := encodePacketValue(buf[n:], field)
n += n2
if err != nil {
return n, err
}
}
}
case reflect.Bool:
if v.Bool() {
buf[n] = 1
} else {
buf[n] = 0
}
n++
case reflect.Int32:
binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int()))
n += 4
case reflect.Int64:
binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int()))
n += 8
case reflect.String:
str := v.String()
binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str)))
copy(buf[n+4:n+4+len(str)], []byte(str))
n += 4 + len(str)
case reflect.Slice:
switch v.Type().Elem().Kind() {
default:
count := v.Len()
startN := n
n += 4
for i := 0; i < count; i++ {
n2, err := encodePacketValue(buf[n:], v.Index(i))
n += n2
if err != nil {
return n, err
}
}
binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count))
case reflect.Uint8:
if v.IsNil() {
binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff))
n += 4
} else {
bytes := v.Bytes()
binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes)))
copy(buf[n+4:n+4+len(bytes)], bytes)
n += 4 + len(bytes)
}
}
}
return n, nil
}
func requestStructForOp(op int32) interface{} {
switch op {
case opClose:
return &closeRequest{}
case opCreate:
return &CreateRequest{}
case opDelete:
return &DeleteRequest{}
case opExists:
return &existsRequest{}
case opGetAcl:
return &getAclRequest{}
case opGetChildren:
return &getChildrenRequest{}
case opGetChildren2:
return &getChildren2Request{}
case opGetData:
return &getDataRequest{}
case opPing:
return &pingRequest{}
case opSetAcl:
return &setAclRequest{}
case opSetData:
return &SetDataRequest{}
case opSetWatches:
return &setWatchesRequest{}
case opSync:
return &syncRequest{}
case opSetAuth:
return &setAuthRequest{}
case opCheck:
return &CheckVersionRequest{}
case opMulti:
return &multiRequest{}
}
return nil
}
func responseStructForOp(op int32) interface{} {
switch op {
case opClose:
return &closeResponse{}
case opCreate:
return &createResponse{}
case opDelete:
return &deleteResponse{}
case opExists:
return &existsResponse{}
case opGetAcl:
return &getAclResponse{}
case opGetChildren:
return &getChildrenResponse{}
case opGetChildren2:
return &getChildren2Response{}
case opGetData:
return &getDataResponse{}
case opPing:
return &pingResponse{}
case opSetAcl:
return &setAclResponse{}
case opSetData:
return &setDataResponse{}
case opSetWatches:
return &setWatchesResponse{}
case opSync:
return &syncResponse{}
case opWatcherEvent:
return &watcherEvent{}
case opSetAuth:
return &setAuthResponse{}
// case opCheck:
// return &checkVersionResponse{}
case opMulti:
return &multiResponse{}
}
return nil
}

View File

@ -1,148 +0,0 @@
package zk
import (
"encoding/binary"
"fmt"
"io"
"net"
"sync"
)
var (
requests = make(map[int32]int32) // Map of Xid -> Opcode
requestsLock = &sync.Mutex{}
)
func trace(conn1, conn2 net.Conn, client bool) {
defer conn1.Close()
defer conn2.Close()
buf := make([]byte, 10*1024)
init := true
for {
_, err := io.ReadFull(conn1, buf[:4])
if err != nil {
fmt.Println("1>", client, err)
return
}
blen := int(binary.BigEndian.Uint32(buf[:4]))
_, err = io.ReadFull(conn1, buf[4:4+blen])
if err != nil {
fmt.Println("2>", client, err)
return
}
var cr interface{}
opcode := int32(-1)
readHeader := true
if client {
if init {
cr = &connectRequest{}
readHeader = false
} else {
xid := int32(binary.BigEndian.Uint32(buf[4:8]))
opcode = int32(binary.BigEndian.Uint32(buf[8:12]))
requestsLock.Lock()
requests[xid] = opcode
requestsLock.Unlock()
cr = requestStructForOp(opcode)
if cr == nil {
fmt.Printf("Unknown opcode %d\n", opcode)
}
}
} else {
if init {
cr = &connectResponse{}
readHeader = false
} else {
xid := int32(binary.BigEndian.Uint32(buf[4:8]))
zxid := int64(binary.BigEndian.Uint64(buf[8:16]))
errnum := int32(binary.BigEndian.Uint32(buf[16:20]))
if xid != -1 || zxid != -1 {
requestsLock.Lock()
found := false
opcode, found = requests[xid]
if !found {
opcode = 0
}
delete(requests, xid)
requestsLock.Unlock()
} else {
opcode = opWatcherEvent
}
cr = responseStructForOp(opcode)
if cr == nil {
fmt.Printf("Unknown opcode %d\n", opcode)
}
if errnum != 0 {
cr = &struct{}{}
}
}
}
opname := "."
if opcode != -1 {
opname = opNames[opcode]
}
if cr == nil {
fmt.Printf("%+v %s %+v\n", client, opname, buf[4:4+blen])
} else {
n := 4
hdrStr := ""
if readHeader {
var hdr interface{}
if client {
hdr = &requestHeader{}
} else {
hdr = &responseHeader{}
}
if n2, err := decodePacket(buf[n:n+blen], hdr); err != nil {
fmt.Println(err)
} else {
n += n2
}
hdrStr = fmt.Sprintf(" %+v", hdr)
}
if _, err := decodePacket(buf[n:n+blen], cr); err != nil {
fmt.Println(err)
}
fmt.Printf("%+v %s%s %+v\n", client, opname, hdrStr, cr)
}
init = false
written, err := conn2.Write(buf[:4+blen])
if err != nil {
fmt.Println("3>", client, err)
return
} else if written != 4+blen {
fmt.Printf("Written != read: %d != %d\n", written, blen)
return
}
}
}
func handleConnection(addr string, conn net.Conn) {
zkConn, err := net.Dial("tcp", addr)
if err != nil {
fmt.Println(err)
return
}
go trace(conn, zkConn, true)
trace(zkConn, conn, false)
}
func StartTracer(listenAddr, serverAddr string) {
ln, err := net.Listen("tcp", listenAddr)
if err != nil {
panic(err)
}
for {
conn, err := ln.Accept()
if err != nil {
fmt.Println(err)
continue
}
go handleConnection(serverAddr, conn)
}
}

View File

@ -1,54 +0,0 @@
package zk
import (
"crypto/sha1"
"encoding/base64"
"fmt"
"math/rand"
"strconv"
"strings"
)
// AuthACL produces an ACL list containing a single ACL which uses the
// provided permissions, with the scheme "auth", and ID "", which is used
// by ZooKeeper to represent any authenticated user.
func AuthACL(perms int32) []ACL {
return []ACL{{perms, "auth", ""}}
}
// WorldACL produces an ACL list containing a single ACL which uses the
// provided permissions, with the scheme "world", and ID "anyone", which
// is used by ZooKeeper to represent any user at all.
func WorldACL(perms int32) []ACL {
return []ACL{{perms, "world", "anyone"}}
}
func DigestACL(perms int32, user, password string) []ACL {
userPass := []byte(fmt.Sprintf("%s:%s", user, password))
h := sha1.New()
if n, err := h.Write(userPass); err != nil || n != len(userPass) {
panic("SHA1 failed")
}
digest := base64.StdEncoding.EncodeToString(h.Sum(nil))
return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}}
}
// FormatServers takes a slice of addresses, and makes sure they are in a format
// that resembles <addr>:<port>. If the server has no port provided, the
// DefaultPort constant is added to the end.
func FormatServers(servers []string) []string {
for i := range servers {
if !strings.Contains(servers[i], ":") {
servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort)
}
}
return servers
}
// stringShuffle performs a Fisher-Yates shuffle on a slice of strings
func stringShuffle(s []string) {
for i := len(s) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
s[i], s[j] = s[j], s[i]
}
}