From 366c1d0c6c77d8190c7494c6e901d33e900ceeff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Mert=20Y=C4=B1ld=C4=B1ran?= Date: Thu, 2 Sep 2021 14:34:06 +0300 Subject: [PATCH] Refactor Mizu, define an extension API and add new protocols: AMQP, Kafka (#224) * Separate HTTP related code into `extensions/http` as a Go plugin * Move `extensions` folder into `tap` folder * Move HTTP files into `tap/extensions/lib` for now * Replace `orcaman/concurrent-map` with `sync.Map` * Remove `grpc_assembler.go` * Remove `github.com/up9inc/mizu/tap/extensions/http/lib` * Add a build script to automatically build extensions from a known path and load them * Start to define the extension API * Implement the `run()` function for the TCP stream * Add support of defining multiple ports to the extension API * Set the extension name inside the extension * Declare the `Dissect` function in the extension API * Dissect HTTP request from inside the HTTP extension * Make the distinction of outbound and inbound ports * Dissect HTTP response from inside the HTTP extension * Bring back the HTTP request-response pair matcher * Return a `*api.RequestResponsePair` from the dissection * Bring back the gRPC-HTTP/2 parser * Fix the issues in `handleHTTP1ClientStream` and `handleHTTP1ServerStream` * Call a function pointer to emit dissected data back to the `tap` package * roee changes - trying to fix agent to work with the "api" object) - ***still not working*** * small mistake in the conflicts * Fix the issues that are introduced by the merge conflict * Add `Emitter` interface to the API and send `OutputChannelItem`(s) to `OutputChannel` * Fix the `HTTP1` handlers * Set `ConnectionInfo` in HTTP handlers * Fix the `Dockerfile` to build the extensions * remove some unwanted code * no message * Re-enable `getStreamProps` function * Migrate back from `gopacket/tcpassembly` to `gopacket/reassembly` * Introduce `HTTPPayload` struct and `HTTPPayloader` interface to `MarshalJSON()` all the data structures that are returned by the HTTP protocol * Read `socketHarOutChannel` instead of `filteredHarChannel` * Connect `OutputChannelItem` to the last WebSocket means that finally the web UI started to work again * Add `.env.example` to React app * Marshal and unmarshal `*http.Request`, `*http.Response` pairs * Move `loadExtensions` into `main.go` and map extensions into `extensionsMap` * Add `Summarize()` method to the `Dissector` interface * Add `Analyze` method to the `Dissector` interface and `MizuEntry` to the extension API * Add `Protocol` struct and make it effect the UI * Refactor `BaseEntryDetails` struct and display the source and destination ports in the UI * Display the protocol name inside the details layout * Add `Represent` method to the `Dissector` interface and manipulate the UI through this method * Make the protocol color affect the details layout color and write protocol abbreviation vertically * Remove everything HTTP related from the `tap` package and make the extension system fully functional * Fix the TypeScript warnings * Bring in the files related AMQP into `amqp` directory * Add `--nodefrag` flag to the tapper and bring in the main AMQP code * Implement the AMQP `BasicPublish` and fix some issues in the UI when the response payload is missing * Implement `representBasicPublish` method * Fix several minor issues * Implement the AMQP `BasicDeliver` * Implement the AMQP `QueueDeclare` * Implement the AMQP `ExchangeDeclare` * Implement the AMQP `ConnectionStart` * Implement the AMQP `ConnectionClose` * Implement the AMQP `QueueBind` * Implement the AMQP `BasicConsume` * Fix an issue in `ConnectionStart` * Fix a linter error * Bring in the files related Kafka into `kafka` directory * Fix the build errors in Kafka Go files * Implement `Dissect` method of Kafka and adapt request-response pair matcher to asynchronous client-server stream * Do the "Is reversed?" checked inside `getStreamProps` and fix an issue in Kafka `Dissect` method * Implement `Analyze`, `Summarize` methods of Kafka * Implement the representations for Kafka `Metadata`, `RequestHeader` and `ResponseHeader` * Refactor the AMQP and Kafka implementations to create the summary string only inside the `Analyze` method * Implement the representations for Kafka `ApiVersions` * Implement the representations for Kafka `Produce` * Implement the representations for Kafka `Fetch` * Implement the representations for Kafka `ListOffsets`, `CreateTopics` and `DeleteTopics` * Fix the encoding of AMQP `BasicPublish` and `BasicDeliver` body * Remove the unnecessary logging * Remove more logging * Introduce `Version` field to `Protocol` struct for dynamically switching the HTTP protocol to HTTP/2 * Fix the issues in analysis and representation of HTTP/2 (gRPC) protocol * Fix the issues in summary section of details layout for HTTP/2 (gRPC) protocol * Fix the read errors that freezes the sniffer in HTTP and Kafka * Fix the issues in HTTP POST data * Fix one more issue in HTTP POST data * Fix an infinite loop in Kafka * Fix another freezing issue in Kafka * Revert "UI Infra - Support multiple entry types + refactoring (#211)" This reverts commit f74a52d4dcba55aca0917f9deaf7da55a20a6f8e. * Fix more issues that are introduced by the merge * Fix the status code in the summary section * adding the cleaner again (why we removed it?). add TODO: on the extension loop . * fix dockerfile (remove deleting .env file) - it is found in dockerignore and fails to build if the file not exists * fix GetEntrties ("/entries" endpoint) - working with "tapApi.BaseEntryDetail" (moved from shared) * Fix an issue in the UI summary section * Refactor the protocol payload structs * Fix a log message in the passive tapper * Adapt `APP_PORTS` environment variable to the new extension system and change its format to `APP_PORTS='{"http": ["8001"]}' ` * Revert "fix dockerfile (remove deleting .env file) - it is found in dockerignore and fails to build if the file not exists" This reverts commit 4f514ae1f478b1823ab5383abe7fb48fcd2d2983. * Bring in the necessary changes from f74a52d4dcba55aca0917f9deaf7da55a20a6f8e * Open the API server URL in the web browser as soon as Mizu is ready * Make the TCP reader consists of a single Go routine (instead of two) and try to dissect in both client and server mode by rewinding * Swap `TcpID` without overwriting it * Sort extension by priority * Try to dissect with looping through all the extensions * fix getStreamProps function. (it should be passed from CLI as it was before). * Turn TCP reader back into two Goroutines (client and server) * typo * Learn `isClient` from the TCP stream * Set `viewer` style `overflow: "auto"` * Fix the memory leaks in AMQP and Kafka dissectors * Revert some of the changes in be7c65eb6d3fb657a059707da3ca559937e59739 * Remove `allExtensionPorts` since it's no longer needed * Remove `APP_PORTS` since it's no longer needed * Fix all of the minor issues in the React code * Check Kafka header size and fail-fast * Break the dissectors loop upon a successful dissection * Don't break the dissector loop. Protocols might collide * Improve the HTTP request-response counter (still not perfect) * Make the HTTP request-response counter perfect * Revert "Revert some of the changes in be7c65eb6d3fb657a059707da3ca559937e59739" This reverts commit 08e7d786d8d011d4c74a0835dcd91a39617783a2. * Bring back `filterItems` and `isHealthCheckByUserAgent` functions * Remove some development artifacts * remove unused and commented lines that are not relevant * Fix the performance in TCP stream factory. Make it create two `tcpReader`(s) per extension * Change a log to debug * Make `*api.CounterPair` a field of `tcpReader` * Set `isTapTarget` to always `true` again since `filterAuthorities` implementation has problems * Remove a variable that's only used for logging even though not introduced by this branch * Bring back the `NumberOfRules` field of `ApplicableRules` struct * Remove the unused `NewEntry` function * Move `k8sResolver == nil` check to a more appropriate place * default healthChecksUserAgentHeaders should be empty array (like the default config value) * remove spam console.log * Rules button cause app to crash (access the service via incorrect property) * Ignore all .env* files in docker build. * Better caching in dockerfile: only copy go.mod before go mod download. * Check for errors while loading an extension * Add a comment about why `Protocol` is not a pointer * Bring back the call to `deleteOlderThan` * Remove the `nil` check * Reduce the maximum allowed AMQP message from 128MB to 1MB * Fix an error that only occurs when a Kafka broker is initiating * Revert the change in b2abd7b990eb6bd6b14372b2f14f34e28248d572 * Fix the service name resolution in all protocols * Remove the `anydirection` flag and fix the issue in `filterAuthorities` * Pass `sync.Map` by reference to `deleteOlderThan` method * Fix the packet capture issue in standalone mode that's introduced by the removal of `anydirection` * Temporarily resolve the memory exhaustion in AMQP * Fix a nil pointer dereference error * Fix the CLI build error * Fix a memory leak that's identified by `pprof` Co-authored-by: Roee Gadot Co-authored-by: Nimrod Gilboa Markevich --- .dockerignore | 2 +- .gitignore | 7 + Dockerfile | 6 +- Makefile | 6 +- README.md | 1 - agent/go.mod | 6 +- agent/go.sum | 19 +- agent/main.go | 172 +- agent/pkg/api/main.go | 86 +- agent/pkg/api/socket_server_handlers.go | 8 +- agent/pkg/controllers/entries_controller.go | 44 +- agent/pkg/database/main.go | 24 +- agent/pkg/database/size_enforcer.go | 15 +- agent/pkg/models/models.go | 87 +- agent/pkg/resolver/resolver.go | 3 +- .../messageSensitiveDataCleaner.go | 200 - build_extensions.sh | 12 + cli/cmd/common.go | 29 +- cli/cmd/tapRunner.go | 16 +- cli/cmd/viewRunner.go | 10 +- cli/config/configStructs/tapConfig.go | 9 - cli/kubernetes/provider.go | 26 +- shared/models.go | 2 +- tap/api/api.go | 165 + tap/api/go.mod | 3 + tap/cleaner.go | 32 +- tap/extensions/amqp/go.mod | 9 + tap/extensions/amqp/helpers.go | 664 ++++ tap/extensions/amqp/main.go | 341 ++ tap/extensions/amqp/read.go | 460 +++ tap/extensions/amqp/spec091.go | 3307 +++++++++++++++++ tap/extensions/amqp/structs.go | 17 + tap/extensions/amqp/types.go | 431 +++ tap/extensions/amqp/write.go | 416 +++ tap/extensions/http/go.mod | 13 + tap/extensions/http/go.sum | 12 + tap/{ => extensions/http}/grpc_assembler.go | 46 +- tap/extensions/http/handlers.go | 164 + tap/extensions/http/main.go | 384 ++ tap/extensions/http/matcher.go | 105 + tap/extensions/http/structs.go | 55 + tap/extensions/kafka/buffer.go | 645 ++++ tap/extensions/kafka/cluster.go | 143 + tap/extensions/kafka/compression.go | 30 + tap/extensions/kafka/decode.go | 598 +++ tap/extensions/kafka/discard.go | 50 + tap/extensions/kafka/encode.go | 645 ++++ tap/extensions/kafka/error.go | 91 + tap/extensions/kafka/go.mod | 10 + tap/extensions/kafka/go.sum | 35 + tap/extensions/kafka/helpers.go | 648 ++++ tap/extensions/kafka/main.go | 231 ++ tap/extensions/kafka/matcher.go | 58 + tap/extensions/kafka/protocol.go | 480 +++ tap/extensions/kafka/protocol_make.go | 219 ++ tap/extensions/kafka/read.go | 639 ++++ tap/extensions/kafka/record.go | 314 ++ tap/extensions/kafka/record_bytes.go | 43 + tap/extensions/kafka/reflect.go | 101 + tap/extensions/kafka/request.go | 290 ++ tap/extensions/kafka/response.go | 343 ++ tap/extensions/kafka/structs.go | 1000 +++++ tap/go.mod | 13 +- tap/go.sum | 34 +- tap/har_writer.go | 274 -- tap/http_matcher.go | 122 - tap/http_reader.go | 305 -- tap/passive_tapper.go | 91 +- tap/settings.go | 12 - tap/tcp_reader.go | 103 + tap/tcp_stream.go | 37 +- tap/tcp_stream_factory.go | 149 +- ui/.env.example | 2 + ui/src/App.sass | 2 +- ui/src/App.tsx | 5 +- ui/src/components/EntriesList.tsx | 19 +- ui/src/components/EntryDetailed.tsx | 72 + .../EntryDetailed/EntryDetailed.module.sass | 23 - .../EntryDetailed/EntryDetailed.tsx | 56 - .../EntryDetailed/EntrySections.module.sass | 3 +- .../EntryDetailed/EntrySections.tsx | 160 +- .../EntryDetailed/EntryViewer.module.sass | 60 + .../components/EntryDetailed/EntryViewer.tsx | 87 + .../Kafka/KafkaEntryDetailsContent.tsx | 6 - .../Kafka/KafkaEntryDetailsTitle.tsx | 6 - .../Rest/RestEntryDetailsContent.tsx | 43 - .../Rest/RestEntryDetailsTitle.tsx | 26 - .../EntryListItem/EntryListItem.module.sass | 53 +- .../EntryListItem/EntryListItem.tsx | 143 +- .../EntryListItem/KafkaEntryContent.tsx | 15 - .../EntryListItem/RestEntryContent.tsx | 82 - ui/src/components/Filters.tsx | 4 +- ui/src/components/TrafficPage.tsx | 75 +- ui/src/components/UI/EndpointPath.tsx | 6 +- ui/src/components/UI/FilterSelect.tsx | 6 +- ui/src/components/UI/Protocol.tsx | 53 + ui/src/components/UI/StatusCode.tsx | 17 +- ui/src/components/UI/Tabs.tsx | 8 +- .../UI/style/FilterSelect.module.sass | 4 +- .../components/UI/style/Protocol.module.sass | 25 + ui/src/components/UI/style/StatusBar.sass | 4 +- .../UI/style/StatusCode.module.sass | 5 +- ui/src/components/UI/style/misc.module.sass | 4 +- ui/src/components/assets/kafkaIcon.svg | 16 - ui/src/components/assets/restIcon.svg | 9 - .../components/style/EntriesList.module.sass | 2 +- ui/src/components/style/Filters.module.sass | 2 +- ui/src/components/style/TrafficPage.sass | 26 +- ui/src/helpers/api.js | 10 +- ui/src/helpers/utils.ts | 35 - ui/src/index.sass | 2 +- 111 files changed, 14396 insertions(+), 1947 deletions(-) delete mode 100644 agent/pkg/sensitiveDataFiltering/messageSensitiveDataCleaner.go create mode 100755 build_extensions.sh create mode 100644 tap/api/api.go create mode 100644 tap/api/go.mod create mode 100644 tap/extensions/amqp/go.mod create mode 100644 tap/extensions/amqp/helpers.go create mode 100644 tap/extensions/amqp/main.go create mode 100644 tap/extensions/amqp/read.go create mode 100644 tap/extensions/amqp/spec091.go create mode 100644 tap/extensions/amqp/structs.go create mode 100644 tap/extensions/amqp/types.go create mode 100644 tap/extensions/amqp/write.go create mode 100644 tap/extensions/http/go.mod create mode 100644 tap/extensions/http/go.sum rename tap/{ => extensions/http}/grpc_assembler.go (87%) create mode 100644 tap/extensions/http/handlers.go create mode 100644 tap/extensions/http/main.go create mode 100644 tap/extensions/http/matcher.go create mode 100644 tap/extensions/http/structs.go create mode 100644 tap/extensions/kafka/buffer.go create mode 100644 tap/extensions/kafka/cluster.go create mode 100644 tap/extensions/kafka/compression.go create mode 100644 tap/extensions/kafka/decode.go create mode 100644 tap/extensions/kafka/discard.go create mode 100644 tap/extensions/kafka/encode.go create mode 100644 tap/extensions/kafka/error.go create mode 100644 tap/extensions/kafka/go.mod create mode 100644 tap/extensions/kafka/go.sum create mode 100644 tap/extensions/kafka/helpers.go create mode 100644 tap/extensions/kafka/main.go create mode 100644 tap/extensions/kafka/matcher.go create mode 100644 tap/extensions/kafka/protocol.go create mode 100644 tap/extensions/kafka/protocol_make.go create mode 100644 tap/extensions/kafka/read.go create mode 100644 tap/extensions/kafka/record.go create mode 100644 tap/extensions/kafka/record_bytes.go create mode 100644 tap/extensions/kafka/reflect.go create mode 100644 tap/extensions/kafka/request.go create mode 100644 tap/extensions/kafka/response.go create mode 100644 tap/extensions/kafka/structs.go delete mode 100644 tap/har_writer.go delete mode 100644 tap/http_matcher.go delete mode 100644 tap/http_reader.go create mode 100644 tap/tcp_reader.go create mode 100644 ui/.env.example create mode 100644 ui/src/components/EntryDetailed.tsx delete mode 100644 ui/src/components/EntryDetailed/EntryDetailed.module.sass delete mode 100644 ui/src/components/EntryDetailed/EntryDetailed.tsx create mode 100644 ui/src/components/EntryDetailed/EntryViewer.module.sass create mode 100644 ui/src/components/EntryDetailed/EntryViewer.tsx delete mode 100644 ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsContent.tsx delete mode 100644 ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsTitle.tsx delete mode 100644 ui/src/components/EntryDetailed/Rest/RestEntryDetailsContent.tsx delete mode 100644 ui/src/components/EntryDetailed/Rest/RestEntryDetailsTitle.tsx delete mode 100644 ui/src/components/EntryListItem/KafkaEntryContent.tsx delete mode 100644 ui/src/components/EntryListItem/RestEntryContent.tsx create mode 100644 ui/src/components/UI/Protocol.tsx create mode 100644 ui/src/components/UI/style/Protocol.module.sass delete mode 100644 ui/src/components/assets/kafkaIcon.svg delete mode 100644 ui/src/components/assets/restIcon.svg delete mode 100644 ui/src/helpers/utils.ts diff --git a/.dockerignore b/.dockerignore index 464de808c..a668366b5 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,7 +2,7 @@ .dockerignore .editorconfig .gitignore -.env.* +**/.env* Dockerfile Makefile LICENSE diff --git a/.gitignore b/.gitignore index cc606960f..59674fecc 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,10 @@ build # Mac OS .DS_Store +.vscode/ + +# Ignore the scripts that are created for development +*dev.* + +# Environment variables +.env diff --git a/Dockerfile b/Dockerfile index a3864f126..6ccd1b2ff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ FROM golang:1.16-alpine AS builder # Set necessary environment variables needed for our image. ENV CGO_ENABLED=1 GOOS=linux GOARCH=amd64 -RUN apk add libpcap-dev gcc g++ make +RUN apk add libpcap-dev gcc g++ make bash # Move to agent working directory (/agent-build). WORKDIR /app/agent-build @@ -19,6 +19,7 @@ WORKDIR /app/agent-build COPY agent/go.mod agent/go.sum ./ COPY shared/go.mod shared/go.mod ../shared/ COPY tap/go.mod tap/go.mod ../tap/ +COPY tap/api/go.* ../tap/api/ RUN go mod download # cheap trick to make the build faster (As long as go.mod wasn't changes) RUN go list -f '{{.Path}}@{{.Version}}' -m all | sed 1d | grep -e 'go-cache' -e 'sqlite' | xargs go get @@ -38,6 +39,8 @@ RUN go build -ldflags="-s -w \ -X 'mizuserver/pkg/version.BuildTimestamp=${BUILD_TIMESTAMP}' \ -X 'mizuserver/pkg/version.SemVer=${SEM_VER}'" -o mizuagent . +COPY build_extensions.sh .. +RUN cd .. && /bin/bash build_extensions.sh FROM alpine:3.13.5 @@ -46,6 +49,7 @@ WORKDIR /app # Copy binary and config files from /build to root folder of scratch container. COPY --from=builder ["/app/agent-build/mizuagent", "."] +COPY --from=builder ["/app/agent/build/extensions", "extensions"] COPY --from=site-build ["/app/ui-build/build", "site"] # gin-gonic runs in debug mode without this diff --git a/Makefile b/Makefile index 2807c6283..722a5ff98 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ export SEM_VER?=0.0.0 ui: ## Build UI. @(cd ui; npm i ; npm run build; ) - @ls -l ui/build + @ls -l ui/build cli: ## Build CLI. @echo "building cli"; cd cli && $(MAKE) build @@ -34,6 +34,7 @@ build-cli-ci: ## Build CLI for CI. agent: ## Build agent. @(echo "building mizu agent .." ) @(cd agent; go build -o build/mizuagent main.go) + ${MAKE} extensions @ls -l agent/build docker: ## Build and publish agent docker image. @@ -71,6 +72,9 @@ clean-cli: ## Clean CLI. clean-docker: @(echo "DOCKER cleanup - NOT IMPLEMENTED YET " ) +extensions: + ./build_extensions.sh + test-cli: @echo "running cli tests"; cd cli && $(MAKE) test diff --git a/README.md b/README.md index 4e84d10a3..7dafbe592 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,6 @@ Web interface is now available at http://localhost:8899 ^C ``` - Any request that contains `User-Agent` header with one of the specified values (`kube-probe` or `prometheus`) will not be captured ### API Rules validation diff --git a/agent/go.mod b/agent/go.mod index 9638a6e30..15884fac8 100644 --- a/agent/go.mod +++ b/agent/go.mod @@ -3,7 +3,6 @@ module mizuserver go 1.16 require ( - github.com/beevik/etree v1.1.0 github.com/djherbis/atime v1.0.0 github.com/fsnotify/fsnotify v1.4.9 github.com/gin-contrib/static v0.0.1 @@ -18,8 +17,9 @@ require ( github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7 github.com/up9inc/mizu/shared v0.0.0 github.com/up9inc/mizu/tap v0.0.0 + github.com/up9inc/mizu/tap/api v0.0.0 github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 - go.mongodb.org/mongo-driver v1.5.1 + go.mongodb.org/mongo-driver v1.7.1 gorm.io/driver/sqlite v1.1.4 gorm.io/gorm v1.21.8 k8s.io/api v0.21.0 @@ -30,3 +30,5 @@ require ( replace github.com/up9inc/mizu/shared v0.0.0 => ../shared replace github.com/up9inc/mizu/tap v0.0.0 => ../tap + +replace github.com/up9inc/mizu/tap/api v0.0.0 => ../tap/api diff --git a/agent/go.sum b/agent/go.sum index a0b6f00fa..c49e1f152 100644 --- a/agent/go.sum +++ b/agent/go.sum @@ -42,9 +42,6 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= -github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4 h1:NJOOlc6ZJjix0A1rAU+nxruZtR8KboG1848yqpIUo4M= github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4/go.mod h1:DQPxZS994Ld1Y8uwnJT+dRL04XPD0cElP/pHH/zEBHM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -101,7 +98,6 @@ github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GO github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-playground/validator/v10 v10.5.0 h1:X9rflw/KmpACwT8zdrm1upefpvdy6ur8d1kWyq6sg3E= github.com/go-playground/validator/v10 v10.5.0/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -194,8 +190,6 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI= github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -292,8 +286,8 @@ github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmv github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= -go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.7.1 h1:jwqTeEM3x6L9xDXrCxN0Hbg7vdGfPBOTIkr0+/LYZDA= +go.mongodb.org/mongo-driver v1.7.1/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -362,9 +356,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758 h1:aEpZnXcAmXkd6AvLb2OPt+EN1Zu/8Ne3pCqPjja5PXY= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -410,9 +403,8 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe h1:WdX7u8s3yOigWAhHEaDl8r9G+4XwFQEQFtBMYyN+kXQ= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -423,9 +415,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/agent/main.go b/agent/main.go index 8aff329c3..b19db67b3 100644 --- a/agent/main.go +++ b/agent/main.go @@ -4,21 +4,29 @@ import ( "encoding/json" "flag" "fmt" + "io/ioutil" + "log" + "mizuserver/pkg/api" + "mizuserver/pkg/controllers" + "mizuserver/pkg/models" + "mizuserver/pkg/routes" + "mizuserver/pkg/utils" + "net/http" + "os" + "os/signal" + "path" + "path/filepath" + "plugin" + "sort" + "strings" + "github.com/gin-contrib/static" "github.com/gin-gonic/gin" "github.com/gorilla/websocket" "github.com/romana/rlog" "github.com/up9inc/mizu/shared" "github.com/up9inc/mizu/tap" - "mizuserver/pkg/api" - "mizuserver/pkg/models" - "mizuserver/pkg/routes" - "mizuserver/pkg/sensitiveDataFiltering" - "mizuserver/pkg/utils" - "net/http" - "os" - "os/signal" - "strings" + tapApi "github.com/up9inc/mizu/tap/api" ) var tapperMode = flag.Bool("tap", false, "Run in tapper mode without API") @@ -29,25 +37,29 @@ var namespace = flag.String("namespace", "", "Resolve IPs if they belong to reso var harsReaderMode = flag.Bool("hars-read", false, "Run in hars-read mode") var harsDir = flag.String("hars-dir", "", "Directory to read hars from") +var extensions []*tapApi.Extension // global +var extensionsMap map[string]*tapApi.Extension // global + func main() { flag.Parse() + loadExtensions() hostMode := os.Getenv(shared.HostModeEnvVar) == "1" tapOpts := &tap.TapOpts{HostMode: hostMode} - - if !*tapperMode && !*apiServerMode && !*standaloneMode && !*harsReaderMode{ + if !*tapperMode && !*apiServerMode && !*standaloneMode && !*harsReaderMode { panic("One of the flags --tap, --api or --standalone or --hars-read must be provided") } if *standaloneMode { api.StartResolving(*namespace) - harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts) - filteredHarChannel := make(chan *tap.OutputChannelItem) + outputItemsChannel := make(chan *tapApi.OutputChannelItem) + filteredOutputItemsChannel := make(chan *tapApi.OutputChannelItem) + tap.StartPassiveTapper(tapOpts, outputItemsChannel, extensions) - go filterHarItems(harOutputChannel, filteredHarChannel, getTrafficFilteringOptions()) - go api.StartReadingEntries(filteredHarChannel, nil) - go api.StartReadingOutbound(outboundLinkOutputChannel) + go filterItems(outputItemsChannel, filteredOutputItemsChannel, getTrafficFilteringOptions()) + go api.StartReadingEntries(filteredOutputItemsChannel, nil, extensionsMap) + // go api.StartReadingOutbound(outboundLinkOutputChannel) hostApi(nil) } else if *tapperMode { @@ -61,31 +73,32 @@ func main() { rlog.Infof("Filtering for the following authorities: %v", tap.GetFilterIPs()) } - harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts) - + // harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts) + filteredOutputItemsChannel := make(chan *tapApi.OutputChannelItem) + tap.StartPassiveTapper(tapOpts, filteredOutputItemsChannel, extensions) socketConnection, err := shared.ConnectToSocketServer(*apiServerAddress, shared.DEFAULT_SOCKET_RETRIES, shared.DEFAULT_SOCKET_RETRY_SLEEP_TIME, false) if err != nil { panic(fmt.Sprintf("Error connecting to socket server at %s %v", *apiServerAddress, err)) } - go pipeTapChannelToSocket(socketConnection, harOutputChannel) - go pipeOutboundLinksChannelToSocket(socketConnection, outboundLinkOutputChannel) + go pipeTapChannelToSocket(socketConnection, filteredOutputItemsChannel) + // go pipeOutboundLinksChannelToSocket(socketConnection, outboundLinkOutputChannel) } else if *apiServerMode { api.StartResolving(*namespace) - socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000) - filteredHarChannel := make(chan *tap.OutputChannelItem) + outputItemsChannel := make(chan *tapApi.OutputChannelItem) + filteredOutputItemsChannel := make(chan *tapApi.OutputChannelItem) - go filterHarItems(socketHarOutChannel, filteredHarChannel, getTrafficFilteringOptions()) - go api.StartReadingEntries(filteredHarChannel, nil) + go filterItems(outputItemsChannel, filteredOutputItemsChannel, getTrafficFilteringOptions()) + go api.StartReadingEntries(filteredOutputItemsChannel, nil, extensionsMap) - hostApi(socketHarOutChannel) + hostApi(outputItemsChannel) } else if *harsReaderMode { - socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000) - filteredHarChannel := make(chan *tap.OutputChannelItem) + outputItemsChannel := make(chan *tapApi.OutputChannelItem, 1000) + filteredHarChannel := make(chan *tapApi.OutputChannelItem) - go filterHarItems(socketHarOutChannel, filteredHarChannel, getTrafficFilteringOptions()) - go api.StartReadingEntries(filteredHarChannel, harsDir) + go filterItems(outputItemsChannel, filteredHarChannel, getTrafficFilteringOptions()) + go api.StartReadingEntries(filteredHarChannel, harsDir, extensionsMap) hostApi(nil) } @@ -96,7 +109,50 @@ func main() { rlog.Info("Exiting") } -func hostApi(socketHarOutputChannel chan<- *tap.OutputChannelItem) { +func loadExtensions() { + dir, _ := filepath.Abs(filepath.Dir(os.Args[0])) + extensionsDir := path.Join(dir, "./extensions/") + + files, err := ioutil.ReadDir(extensionsDir) + if err != nil { + log.Fatal(err) + } + extensions = make([]*tapApi.Extension, len(files)) + extensionsMap = make(map[string]*tapApi.Extension) + for i, file := range files { + filename := file.Name() + log.Printf("Loading extension: %s\n", filename) + extension := &tapApi.Extension{ + Path: path.Join(extensionsDir, filename), + } + plug, _ := plugin.Open(extension.Path) + extension.Plug = plug + symDissector, err := plug.Lookup("Dissector") + + var dissector tapApi.Dissector + var ok bool + dissector, ok = symDissector.(tapApi.Dissector) + if err != nil || !ok { + panic(fmt.Sprintf("Failed to load the extension: %s\n", extension.Path)) + } + dissector.Register(extension) + extension.Dissector = dissector + extensions[i] = extension + extensionsMap[extension.Protocol.Name] = extension + } + + sort.Slice(extensions, func(i, j int) bool { + return extensions[i].Protocol.Priority < extensions[j].Protocol.Priority + }) + + for _, extension := range extensions { + log.Printf("Extension Properties: %+v\n", extension) + } + + controllers.InitExtensionsMap(extensionsMap) +} + +func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) { app := gin.Default() app.GET("/echo", func(c *gin.Context) { @@ -104,7 +160,7 @@ func hostApi(socketHarOutputChannel chan<- *tap.OutputChannelItem) { }) eventHandlers := api.RoutesEventHandlers{ - SocketHarOutChannel: socketHarOutputChannel, + SocketOutChannel: socketHarOutputChannel, } app.Use(DisableRootStaticCache()) @@ -147,20 +203,34 @@ func CORSMiddleware() gin.HandlerFunc { } } +func parseEnvVar(env string) map[string][]string { + var mapOfList map[string][]string + + val, present := os.LookupEnv(env) + + if !present { + return mapOfList + } + + err := json.Unmarshal([]byte(val), &mapOfList) + if err != nil { + panic(fmt.Sprintf("env var %s's value of %s is invalid! must be map[string][]string %v", env, mapOfList, err)) + } + return mapOfList +} + func getTapTargets() []string { nodeName := os.Getenv(shared.NodeNameEnvVar) - var tappedAddressesPerNodeDict map[string][]string - err := json.Unmarshal([]byte(os.Getenv(shared.TappedAddressesPerNodeDictEnvVar)), &tappedAddressesPerNodeDict) - if err != nil { - panic(fmt.Sprintf("env var %s's value of %s is invalid! must be map[string][]string %v", shared.TappedAddressesPerNodeDictEnvVar, tappedAddressesPerNodeDict, err)) - } + tappedAddressesPerNodeDict := parseEnvVar(shared.TappedAddressesPerNodeDictEnvVar) return tappedAddressesPerNodeDict[nodeName] } func getTrafficFilteringOptions() *shared.TrafficFilteringOptions { filteringOptionsJson := os.Getenv(shared.MizuFilteringOptionsEnvVar) if filteringOptionsJson == "" { - return nil + return &shared.TrafficFilteringOptions{ + HealthChecksUserAgentHeaders: []string{}, + } } var filteringOptions shared.TrafficFilteringOptions err := json.Unmarshal([]byte(filteringOptionsJson), &filteringOptions) @@ -171,7 +241,7 @@ func getTrafficFilteringOptions() *shared.TrafficFilteringOptions { return &filteringOptions } -func filterHarItems(inChannel <-chan *tap.OutputChannelItem, outChannel chan *tap.OutputChannelItem, filterOptions *shared.TrafficFilteringOptions) { +func filterItems(inChannel <-chan *tapApi.OutputChannelItem, outChannel chan *tapApi.OutputChannelItem, filterOptions *shared.TrafficFilteringOptions) { for message := range inChannel { if message.ConnectionInfo.IsOutgoing && api.CheckIsServiceIP(message.ConnectionInfo.ServerIP) { continue @@ -181,19 +251,27 @@ func filterHarItems(inChannel <-chan *tap.OutputChannelItem, outChannel chan *ta continue } - if !filterOptions.DisableRedaction { - sensitiveDataFiltering.FilterSensitiveInfoFromHarRequest(message, filterOptions) - } + // if !filterOptions.DisableRedaction { + // sensitiveDataFiltering.FilterSensitiveInfoFromHarRequest(message, filterOptions) + // } outChannel <- message } } -func isHealthCheckByUserAgent(message *tap.OutputChannelItem, userAgentsToIgnore []string) bool { - for _, header := range message.HarEntry.Request.Headers { - if strings.ToLower(header.Name) == "user-agent" { +func isHealthCheckByUserAgent(item *tapApi.OutputChannelItem, userAgentsToIgnore []string) bool { + if item.Protocol.Name != "http" { + return false + } + + request := item.Pair.Request.Payload.(map[string]interface{}) + reqDetails := request["details"].(map[string]interface{}) + + for _, header := range reqDetails["headers"].([]interface{}) { + h := header.(map[string]interface{}) + if strings.ToLower(h["name"].(string)) == "user-agent" { for _, userAgent := range userAgentsToIgnore { - if strings.Contains(strings.ToLower(header.Value), strings.ToLower(userAgent)) { + if strings.Contains(strings.ToLower(h["value"].(string)), strings.ToLower(userAgent)) { return true } } @@ -203,7 +281,7 @@ func isHealthCheckByUserAgent(message *tap.OutputChannelItem, userAgentsToIgnore return false } -func pipeTapChannelToSocket(connection *websocket.Conn, messageDataChannel <-chan *tap.OutputChannelItem) { +func pipeTapChannelToSocket(connection *websocket.Conn, messageDataChannel <-chan *tapApi.OutputChannelItem) { if connection == nil { panic("Websocket connection is nil") } @@ -219,6 +297,8 @@ func pipeTapChannelToSocket(connection *websocket.Conn, messageDataChannel <-cha continue } + // NOTE: This is where the `*tapApi.OutputChannelItem` leaves the code + // and goes into the intermediate WebSocket. err = connection.WriteMessage(websocket.TextMessage, marshaledData) if err != nil { rlog.Infof("error sending message through socket server %s, (%v,%+v)\n", err, err, err) diff --git a/agent/pkg/api/main.go b/agent/pkg/api/main.go index 90a411ee2..3bcf5081a 100644 --- a/agent/pkg/api/main.go +++ b/agent/pkg/api/main.go @@ -5,8 +5,8 @@ import ( "context" "encoding/json" "fmt" + "mizuserver/pkg/database" "mizuserver/pkg/holder" - "mizuserver/pkg/providers" "net/url" "os" "path" @@ -14,12 +14,13 @@ import ( "strings" "time" + "go.mongodb.org/mongo-driver/bson/primitive" + "github.com/google/martian/har" "github.com/romana/rlog" "github.com/up9inc/mizu/tap" - "go.mongodb.org/mongo-driver/bson/primitive" + tapApi "github.com/up9inc/mizu/tap/api" - "mizuserver/pkg/database" "mizuserver/pkg/models" "mizuserver/pkg/resolver" "mizuserver/pkg/utils" @@ -49,11 +50,11 @@ func StartResolving(namespace string) { holder.SetResolver(res) } -func StartReadingEntries(harChannel <-chan *tap.OutputChannelItem, workingDir *string) { +func StartReadingEntries(harChannel <-chan *tapApi.OutputChannelItem, workingDir *string, extensionsMap map[string]*tapApi.Extension) { if workingDir != nil && *workingDir != "" { startReadingFiles(*workingDir) } else { - startReadingChannel(harChannel) + startReadingChannel(harChannel, extensionsMap) } } @@ -87,30 +88,36 @@ func startReadingFiles(workingDir string) { decErr := json.NewDecoder(bufio.NewReader(file)).Decode(&inputHar) utils.CheckErr(decErr) - for _, entry := range inputHar.Log.Entries { - time.Sleep(time.Millisecond * 250) - connectionInfo := &tap.ConnectionInfo{ - ClientIP: fileInfo.Name(), - ClientPort: "", - ServerIP: "", - ServerPort: "", - IsOutgoing: false, - } - saveHarToDb(entry, connectionInfo) - } + // for _, entry := range inputHar.Log.Entries { + // time.Sleep(time.Millisecond * 250) + // // connectionInfo := &tap.ConnectionInfo{ + // // ClientIP: fileInfo.Name(), + // // ClientPort: "", + // // ServerIP: "", + // // ServerPort: "", + // // IsOutgoing: false, + // // } + // // saveHarToDb(entry, connectionInfo) + // } rmErr := os.Remove(inputFilePath) utils.CheckErr(rmErr) } } -func startReadingChannel(outputItems <-chan *tap.OutputChannelItem) { +func startReadingChannel(outputItems <-chan *tapApi.OutputChannelItem, extensionsMap map[string]*tapApi.Extension) { if outputItems == nil { panic("Channel of captured messages is nil") } for item := range outputItems { - providers.EntryAdded() - saveHarToDb(item.HarEntry, item.ConnectionInfo) + extension := extensionsMap[item.Protocol.Name] + resolvedSource, resolvedDestionation := resolveIP(item.ConnectionInfo) + mizuEntry := extension.Dissector.Analyze(item, primitive.NewObjectID().Hex(), resolvedSource, resolvedDestionation) + baseEntry := extension.Dissector.Summarize(mizuEntry) + mizuEntry.EstimatedSizeBytes = getEstimatedEntrySizeBytes(mizuEntry) + database.CreateEntry(mizuEntry) + baseEntryBytes, _ := models.CreateBaseEntryWebSocketMessage(baseEntry) + BroadcastToBrowserClients(baseEntryBytes) } } @@ -121,14 +128,7 @@ func StartReadingOutbound(outboundLinkChannel <-chan *tap.OutboundLink) { } } -func saveHarToDb(entry *har.Entry, connectionInfo *tap.ConnectionInfo) { - entryBytes, _ := json.Marshal(entry) - serviceName, urlPath := getServiceNameFromUrl(entry.Request.URL) - entryId := primitive.NewObjectID().Hex() - var ( - resolvedSource string - resolvedDestination string - ) +func resolveIP(connectionInfo *tapApi.ConnectionInfo) (resolvedSource string, resolvedDestination string) { if k8sResolver != nil { unresolvedSource := connectionInfo.ClientIP resolvedSource = k8sResolver.Resolve(unresolvedSource) @@ -147,32 +147,7 @@ func saveHarToDb(entry *har.Entry, connectionInfo *tap.ConnectionInfo) { } } } - - mizuEntry := models.MizuEntry{ - EntryId: entryId, - Entry: string(entryBytes), // simple way to store it and not convert to bytes - Service: serviceName, - Url: entry.Request.URL, - Path: urlPath, - Method: entry.Request.Method, - Status: entry.Response.Status, - RequestSenderIp: connectionInfo.ClientIP, - Timestamp: entry.StartedDateTime.UnixNano() / int64(time.Millisecond), - ResolvedSource: resolvedSource, - ResolvedDestination: resolvedDestination, - IsOutgoing: connectionInfo.IsOutgoing, - } - mizuEntry.EstimatedSizeBytes = getEstimatedEntrySizeBytes(mizuEntry) - database.CreateEntry(&mizuEntry) - - baseEntry := models.BaseEntryDetails{} - if err := models.GetEntry(&mizuEntry, &baseEntry); err != nil { - return - } - baseEntry.Rules = models.RunValidationRulesState(*entry, serviceName) - baseEntry.Latency = entry.Timings.Receive - baseEntryBytes, _ := models.CreateBaseEntryWebSocketMessage(&baseEntry) - BroadcastToBrowserClients(baseEntryBytes) + return resolvedSource, resolvedDestination } func getServiceNameFromUrl(inputUrl string) (string, string) { @@ -182,11 +157,14 @@ func getServiceNameFromUrl(inputUrl string) (string, string) { } func CheckIsServiceIP(address string) bool { + if k8sResolver == nil { + return false + } return k8sResolver.CheckIsServiceIP(address) } // gives a rough estimate of the size this will take up in the db, good enough for maintaining db size limit accurately -func getEstimatedEntrySizeBytes(mizuEntry models.MizuEntry) int { +func getEstimatedEntrySizeBytes(mizuEntry *tapApi.MizuEntry) int { sizeBytes := len(mizuEntry.Entry) sizeBytes += len(mizuEntry.EntryId) sizeBytes += len(mizuEntry.Service) diff --git a/agent/pkg/api/socket_server_handlers.go b/agent/pkg/api/socket_server_handlers.go index 0e6a9be57..3dada8cf9 100644 --- a/agent/pkg/api/socket_server_handlers.go +++ b/agent/pkg/api/socket_server_handlers.go @@ -8,9 +8,10 @@ import ( "mizuserver/pkg/up9" "sync" + tapApi "github.com/up9inc/mizu/tap/api" + "github.com/romana/rlog" "github.com/up9inc/mizu/shared" - "github.com/up9inc/mizu/tap" ) var browserClientSocketUUIDs = make([]int, 0) @@ -18,7 +19,7 @@ var socketListLock = sync.Mutex{} type RoutesEventHandlers struct { EventHandlers - SocketHarOutChannel chan<- *tap.OutputChannelItem + SocketOutChannel chan<- *tapApi.OutputChannelItem } func init() { @@ -73,7 +74,8 @@ func (h *RoutesEventHandlers) WebSocketMessage(_ int, message []byte) { if err != nil { rlog.Infof("Could not unmarshal message of message type %s %v\n", socketMessageBase.MessageType, err) } else { - h.SocketHarOutChannel <- tappedEntryMessage.Data + // NOTE: This is where the message comes back from the intermediate WebSocket to code. + h.SocketOutChannel <- tappedEntryMessage.Data } case shared.WebSocketMessageTypeUpdateStatus: var statusMessage shared.WebSocketStatusMessage diff --git a/agent/pkg/controllers/entries_controller.go b/agent/pkg/controllers/entries_controller.go index 99bb8d8fb..0b4dfa702 100644 --- a/agent/pkg/controllers/entries_controller.go +++ b/agent/pkg/controllers/entries_controller.go @@ -16,8 +16,16 @@ import ( "github.com/gin-gonic/gin" "github.com/google/martian/har" "github.com/romana/rlog" + + tapApi "github.com/up9inc/mizu/tap/api" ) +var extensionsMap map[string]*tapApi.Extension // global + +func InitExtensionsMap(ref map[string]*tapApi.Extension) { + extensionsMap = ref +} + func GetEntries(c *gin.Context) { entriesFilter := &models.EntriesFilter{} @@ -31,7 +39,7 @@ func GetEntries(c *gin.Context) { order := database.OperatorToOrderMapping[entriesFilter.Operator] operatorSymbol := database.OperatorToSymbolMapping[entriesFilter.Operator] - var entries []models.MizuEntry + var entries []tapApi.MizuEntry database.GetEntriesTable(). Order(fmt.Sprintf("timestamp %s", order)). Where(fmt.Sprintf("timestamp %s %v", operatorSymbol, entriesFilter.Timestamp)). @@ -44,9 +52,9 @@ func GetEntries(c *gin.Context) { utils.ReverseSlice(entries) } - baseEntries := make([]models.BaseEntryDetails, 0) + baseEntries := make([]tapApi.BaseEntryDetails, 0) for _, data := range entries { - harEntry := models.BaseEntryDetails{} + harEntry := tapApi.BaseEntryDetails{} if err := models.GetEntry(&data, &harEntry); err != nil { continue } @@ -80,7 +88,7 @@ func GetHARs(c *gin.Context) { timestampTo = entriesFilter.To } - var entries []models.MizuEntry + var entries []tapApi.MizuEntry database.GetEntriesTable(). Where(fmt.Sprintf("timestamp BETWEEN %v AND %v", timestampFrom, timestampTo)). Order(fmt.Sprintf("timestamp %s", order)). @@ -207,7 +215,7 @@ func GetFullEntries(c *gin.Context) { } func GetEntry(c *gin.Context) { - var entryData models.MizuEntry + var entryData tapApi.MizuEntry database.GetEntriesTable(). Where(map[string]string{"entryId": c.Param("entryId")}). First(&entryData) @@ -219,20 +227,28 @@ func GetEntry(c *gin.Context) { "msg": "Can't get entry details", }) } - fullEntryWithPolicy := models.FullEntryWithPolicy{} - if err := models.GetEntry(&entryData, &fullEntryWithPolicy); err != nil { - c.JSON(http.StatusInternalServerError, map[string]interface{}{ - "error": true, - "msg": "Can't get entry details", - }) - } - c.JSON(http.StatusOK, fullEntryWithPolicy) + + // FIXME: Fix the part below + // fullEntryWithPolicy := models.FullEntryWithPolicy{} + // if err := models.GetEntry(&entryData, &fullEntryWithPolicy); err != nil { + // c.JSON(http.StatusInternalServerError, map[string]interface{}{ + // "error": true, + // "msg": "Can't get entry details", + // }) + // } + extension := extensionsMap[entryData.ProtocolName] + protocol, representation, _ := extension.Dissector.Represent(&entryData) + c.JSON(http.StatusOK, tapApi.MizuEntryWrapper{ + Protocol: protocol, + Representation: string(representation), + Data: entryData, + }) } func DeleteAllEntries(c *gin.Context) { database.GetEntriesTable(). Where("1 = 1"). - Delete(&models.MizuEntry{}) + Delete(&tapApi.MizuEntry{}) c.JSON(http.StatusOK, map[string]string{ "msg": "Success", diff --git a/agent/pkg/database/main.go b/agent/pkg/database/main.go index c3b1d7847..f6dfe402e 100644 --- a/agent/pkg/database/main.go +++ b/agent/pkg/database/main.go @@ -2,16 +2,18 @@ package database import ( "fmt" + "mizuserver/pkg/utils" + "time" + "gorm.io/driver/sqlite" "gorm.io/gorm" "gorm.io/gorm/logger" - "mizuserver/pkg/models" - "mizuserver/pkg/utils" - "time" + + tapApi "github.com/up9inc/mizu/tap/api" ) const ( - DBPath = "./entries.db" + DBPath = "./entries.db" OrderDesc = "desc" OrderAsc = "asc" LT = "lt" @@ -19,8 +21,8 @@ const ( ) var ( - DB *gorm.DB - IsDBLocked = false + DB *gorm.DB + IsDBLocked = false OperatorToSymbolMapping = map[string]string{ LT: "<", GT: ">", @@ -40,7 +42,7 @@ func GetEntriesTable() *gorm.DB { return DB.Table("mizu_entries") } -func CreateEntry(entry *models.MizuEntry) { +func CreateEntry(entry *tapApi.MizuEntry) { if IsDBLocked { return } @@ -51,14 +53,13 @@ func initDataBase(databasePath string) *gorm.DB { temp, _ := gorm.Open(sqlite.Open(databasePath), &gorm.Config{ Logger: &utils.TruncatingLogger{LogLevel: logger.Warn, SlowThreshold: 500 * time.Millisecond}, }) - _ = temp.AutoMigrate(&models.MizuEntry{}) // this will ensure table is created + _ = temp.AutoMigrate(&tapApi.MizuEntry{}) // this will ensure table is created return temp } - -func GetEntriesFromDb(timestampFrom int64, timestampTo int64) []models.MizuEntry { +func GetEntriesFromDb(timestampFrom int64, timestampTo int64) []tapApi.MizuEntry { order := OrderDesc - var entries []models.MizuEntry + var entries []tapApi.MizuEntry GetEntriesTable(). Where(fmt.Sprintf("timestamp BETWEEN %v AND %v", timestampFrom, timestampTo)). Order(fmt.Sprintf("timestamp %s", order)). @@ -70,4 +71,3 @@ func GetEntriesFromDb(timestampFrom int64, timestampTo int64) []models.MizuEntry } return entries } - diff --git a/agent/pkg/database/size_enforcer.go b/agent/pkg/database/size_enforcer.go index c17c53d97..b28c0ff6b 100644 --- a/agent/pkg/database/size_enforcer.go +++ b/agent/pkg/database/size_enforcer.go @@ -1,16 +1,17 @@ package database import ( + "log" + "os" + "strconv" + "time" + "github.com/fsnotify/fsnotify" "github.com/romana/rlog" "github.com/up9inc/mizu/shared" "github.com/up9inc/mizu/shared/debounce" "github.com/up9inc/mizu/shared/units" - "log" - "mizuserver/pkg/models" - "os" - "strconv" - "time" + tapApi "github.com/up9inc/mizu/tap/api" ) const percentageOfMaxSizeBytesToPrune = 15 @@ -99,7 +100,7 @@ func pruneOldEntries(currentFileSize int64) { if bytesToBeRemoved >= amountOfBytesToTrim { break } - var entry models.MizuEntry + var entry tapApi.MizuEntry err = DB.ScanRows(rows, &entry) if err != nil { rlog.Errorf("Error scanning db row: %v", err) @@ -111,7 +112,7 @@ func pruneOldEntries(currentFileSize int64) { } if len(entryIdsToRemove) > 0 { - GetEntriesTable().Where(entryIdsToRemove).Delete(models.MizuEntry{}) + GetEntriesTable().Where(entryIdsToRemove).Delete(tapApi.MizuEntry{}) // VACUUM causes sqlite to shrink the db file after rows have been deleted, the db file will not shrink without this DB.Exec("VACUUM") rlog.Errorf("Removed %d rows and cleared %s", len(entryIdsToRemove), units.BytesToHumanReadable(bytesToBeRemoved)) diff --git a/agent/pkg/models/models.go b/agent/pkg/models/models.go index bf8c0cb7b..8a92fc64a 100644 --- a/agent/pkg/models/models.go +++ b/agent/pkg/models/models.go @@ -3,64 +3,22 @@ package models import ( "encoding/json" + tapApi "github.com/up9inc/mizu/tap/api" + "mizuserver/pkg/rules" "mizuserver/pkg/utils" - "time" "github.com/google/martian/har" "github.com/up9inc/mizu/shared" "github.com/up9inc/mizu/tap" ) -type DataUnmarshaler interface { - UnmarshalData(*MizuEntry) error -} - -func GetEntry(r *MizuEntry, v DataUnmarshaler) error { +func GetEntry(r *tapApi.MizuEntry, v tapApi.DataUnmarshaler) error { return v.UnmarshalData(r) } -type MizuEntry struct { - ID uint `gorm:"primarykey"` - CreatedAt time.Time - UpdatedAt time.Time - Entry string `json:"entry,omitempty" gorm:"column:entry"` - EntryId string `json:"entryId" gorm:"column:entryId"` - Url string `json:"url" gorm:"column:url"` - Method string `json:"method" gorm:"column:method"` - Status int `json:"status" gorm:"column:status"` - RequestSenderIp string `json:"requestSenderIp" gorm:"column:requestSenderIp"` - Service string `json:"service" gorm:"column:service"` - Timestamp int64 `json:"timestamp" gorm:"column:timestamp"` - Path string `json:"path" gorm:"column:path"` - ResolvedSource string `json:"resolvedSource,omitempty" gorm:"column:resolvedSource"` - ResolvedDestination string `json:"resolvedDestination,omitempty" gorm:"column:resolvedDestination"` - IsOutgoing bool `json:"isOutgoing,omitempty" gorm:"column:isOutgoing"` - EstimatedSizeBytes int `json:"-" gorm:"column:estimatedSizeBytes"` -} - -type BaseEntryDetails struct { - Id string `json:"id,omitempty"` - Url string `json:"url,omitempty"` - RequestSenderIp string `json:"requestSenderIp,omitempty"` - Service string `json:"service,omitempty"` - Path string `json:"path,omitempty"` - StatusCode int `json:"statusCode,omitempty"` - Method string `json:"method,omitempty"` - Timestamp int64 `json:"timestamp,omitempty"` - IsOutgoing bool `json:"isOutgoing,omitempty"` - Latency int64 `json:"latency,omitempty"` - Rules ApplicableRules `json:"rules,omitempty"` -} - -type ApplicableRules struct { - Latency int64 `json:"latency,omitempty"` - Status bool `json:"status,omitempty"` - NumberOfRules int `json:"numberOfRules,omitempty"` -} - -func NewApplicableRules(status bool, latency int64, number int) ApplicableRules { - ar := ApplicableRules{} +func NewApplicableRules(status bool, latency int64, number int) tapApi.ApplicableRules { + ar := tapApi.ApplicableRules{} ar.Status = status ar.Latency = latency ar.NumberOfRules = number @@ -75,26 +33,7 @@ type FullEntryDetailsExtra struct { har.Entry } -func (bed *BaseEntryDetails) UnmarshalData(entry *MizuEntry) error { - entryUrl := entry.Url - service := entry.Service - if entry.ResolvedDestination != "" { - entryUrl = utils.SetHostname(entryUrl, entry.ResolvedDestination) - service = utils.SetHostname(service, entry.ResolvedDestination) - } - bed.Id = entry.EntryId - bed.Url = entryUrl - bed.Service = service - bed.Path = entry.Path - bed.StatusCode = entry.Status - bed.Method = entry.Method - bed.Timestamp = entry.Timestamp - bed.RequestSenderIp = entry.RequestSenderIp - bed.IsOutgoing = entry.IsOutgoing - return nil -} - -func (fed *FullEntryDetails) UnmarshalData(entry *MizuEntry) error { +func (fed *FullEntryDetails) UnmarshalData(entry *tapApi.MizuEntry) error { if err := json.Unmarshal([]byte(entry.Entry), &fed.Entry); err != nil { return err } @@ -105,7 +44,7 @@ func (fed *FullEntryDetails) UnmarshalData(entry *MizuEntry) error { return nil } -func (fedex *FullEntryDetailsExtra) UnmarshalData(entry *MizuEntry) error { +func (fedex *FullEntryDetailsExtra) UnmarshalData(entry *tapApi.MizuEntry) error { if err := json.Unmarshal([]byte(entry.Entry), &fedex.Entry); err != nil { return err } @@ -138,12 +77,12 @@ type HarFetchRequestQuery struct { type WebSocketEntryMessage struct { *shared.WebSocketMessageMetadata - Data *BaseEntryDetails `json:"data,omitempty"` + Data *tapApi.BaseEntryDetails `json:"data,omitempty"` } type WebSocketTappedEntryMessage struct { *shared.WebSocketMessageMetadata - Data *tap.OutputChannelItem + Data *tapApi.OutputChannelItem } type WebsocketOutboundLinkMessage struct { @@ -151,7 +90,7 @@ type WebsocketOutboundLinkMessage struct { Data *tap.OutboundLink } -func CreateBaseEntryWebSocketMessage(base *BaseEntryDetails) ([]byte, error) { +func CreateBaseEntryWebSocketMessage(base *tapApi.BaseEntryDetails) ([]byte, error) { message := &WebSocketEntryMessage{ WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{ MessageType: shared.WebSocketMessageTypeEntry, @@ -161,7 +100,7 @@ func CreateBaseEntryWebSocketMessage(base *BaseEntryDetails) ([]byte, error) { return json.Marshal(message) } -func CreateWebsocketTappedEntryMessage(base *tap.OutputChannelItem) ([]byte, error) { +func CreateWebsocketTappedEntryMessage(base *tapApi.OutputChannelItem) ([]byte, error) { message := &WebSocketTappedEntryMessage{ WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{ MessageType: shared.WebSocketMessageTypeTappedEntry, @@ -207,7 +146,7 @@ type FullEntryWithPolicy struct { Service string `json:"service"` } -func (fewp *FullEntryWithPolicy) UnmarshalData(entry *MizuEntry) error { +func (fewp *FullEntryWithPolicy) UnmarshalData(entry *tapApi.MizuEntry) error { if err := json.Unmarshal([]byte(entry.Entry), &fewp.Entry); err != nil { return err } @@ -218,7 +157,7 @@ func (fewp *FullEntryWithPolicy) UnmarshalData(entry *MizuEntry) error { return nil } -func RunValidationRulesState(harEntry har.Entry, service string) ApplicableRules { +func RunValidationRulesState(harEntry har.Entry, service string) tapApi.ApplicableRules { numberOfRules, resultPolicyToSend := rules.MatchRequestPolicy(harEntry, service) statusPolicyToSend, latency, numberOfRules := rules.PassedValidationRules(resultPolicyToSend, numberOfRules) ar := NewApplicableRules(statusPolicyToSend, latency, numberOfRules) diff --git a/agent/pkg/resolver/resolver.go b/agent/pkg/resolver/resolver.go index d68896acb..f29791198 100644 --- a/agent/pkg/resolver/resolver.go +++ b/agent/pkg/resolver/resolver.go @@ -4,10 +4,11 @@ import ( "context" "errors" "fmt" + "github.com/romana/rlog" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "github.com/orcaman/concurrent-map" + cmap "github.com/orcaman/concurrent-map" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" diff --git a/agent/pkg/sensitiveDataFiltering/messageSensitiveDataCleaner.go b/agent/pkg/sensitiveDataFiltering/messageSensitiveDataCleaner.go deleted file mode 100644 index 4a4889147..000000000 --- a/agent/pkg/sensitiveDataFiltering/messageSensitiveDataCleaner.go +++ /dev/null @@ -1,200 +0,0 @@ -package sensitiveDataFiltering - -import ( - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "github.com/up9inc/mizu/tap" - "net/url" - "strings" - - "github.com/beevik/etree" - "github.com/google/martian/har" - "github.com/up9inc/mizu/shared" -) - -func FilterSensitiveInfoFromHarRequest(harOutputItem *tap.OutputChannelItem, options *shared.TrafficFilteringOptions) { - harOutputItem.HarEntry.Request.Headers = filterHarHeaders(harOutputItem.HarEntry.Request.Headers) - harOutputItem.HarEntry.Response.Headers = filterHarHeaders(harOutputItem.HarEntry.Response.Headers) - - harOutputItem.HarEntry.Request.Cookies = make([]har.Cookie, 0, 0) - harOutputItem.HarEntry.Response.Cookies = make([]har.Cookie, 0, 0) - - harOutputItem.HarEntry.Request.URL = filterUrl(harOutputItem.HarEntry.Request.URL) - for i, queryString := range harOutputItem.HarEntry.Request.QueryString { - if isFieldNameSensitive(queryString.Name) { - harOutputItem.HarEntry.Request.QueryString[i].Value = maskedFieldPlaceholderValue - } - } - - if harOutputItem.HarEntry.Request.PostData != nil { - requestContentType := getContentTypeHeaderValue(harOutputItem.HarEntry.Request.Headers) - filteredRequestBody, err := filterHttpBody([]byte(harOutputItem.HarEntry.Request.PostData.Text), requestContentType, options) - if err == nil { - harOutputItem.HarEntry.Request.PostData.Text = string(filteredRequestBody) - } - } - if harOutputItem.HarEntry.Response.Content != nil { - responseContentType := getContentTypeHeaderValue(harOutputItem.HarEntry.Response.Headers) - filteredResponseBody, err := filterHttpBody(harOutputItem.HarEntry.Response.Content.Text, responseContentType, options) - if err == nil { - harOutputItem.HarEntry.Response.Content.Text = filteredResponseBody - } - } -} - -func filterHarHeaders(headers []har.Header) []har.Header { - newHeaders := make([]har.Header, 0) - for i, header := range headers { - if strings.ToLower(header.Name) == "cookie" { - continue - } else if isFieldNameSensitive(header.Name) { - newHeaders = append(newHeaders, har.Header{Name: header.Name, Value: maskedFieldPlaceholderValue}) - headers[i].Value = maskedFieldPlaceholderValue - } else { - newHeaders = append(newHeaders, header) - } - } - return newHeaders -} - -func getContentTypeHeaderValue(headers []har.Header) string { - for _, header := range headers { - if strings.ToLower(header.Name) == "content-type" { - return header.Value - } - } - return "" -} - -func isFieldNameSensitive(fieldName string) bool { - name := strings.ToLower(fieldName) - name = strings.ReplaceAll(name, "_", "") - name = strings.ReplaceAll(name, "-", "") - name = strings.ReplaceAll(name, " ", "") - - for _, sensitiveField := range personallyIdentifiableDataFields { - if strings.Contains(name, sensitiveField) { - return true - } - } - - return false -} - -func filterHttpBody(bytes []byte, contentType string, options *shared.TrafficFilteringOptions) ([]byte, error) { - mimeType := strings.Split(contentType, ";")[0] - switch strings.ToLower(mimeType) { - case "application/json": - return filterJsonBody(bytes) - case "text/html": - fallthrough - case "application/xhtml+xml": - fallthrough - case "text/xml": - fallthrough - case "application/xml": - return filterXmlEtree(bytes) - case "text/plain": - if options != nil && options.PlainTextMaskingRegexes != nil { - return filterPlainText(bytes, options), nil - } - } - return bytes, nil -} - -func filterPlainText(bytes []byte, options *shared.TrafficFilteringOptions) []byte { - for _, regex := range options.PlainTextMaskingRegexes { - bytes = regex.ReplaceAll(bytes, []byte(maskedFieldPlaceholderValue)) - } - return bytes -} - -func filterXmlEtree(bytes []byte) ([]byte, error) { - if !IsValidXML(bytes) { - return nil, errors.New("Invalid XML") - } - xmlDoc := etree.NewDocument() - err := xmlDoc.ReadFromBytes(bytes) - if err != nil { - return nil, err - } else { - filterXmlElement(xmlDoc.Root()) - } - return xmlDoc.WriteToBytes() -} - -func IsValidXML(data []byte) bool { - return xml.Unmarshal(data, new(interface{})) == nil -} - -func filterXmlElement(element *etree.Element) { - for i, attribute := range element.Attr { - if isFieldNameSensitive(attribute.Key) { - element.Attr[i].Value = maskedFieldPlaceholderValue - } - } - if element.ChildElements() == nil || len(element.ChildElements()) == 0 { - if isFieldNameSensitive(element.Tag) { - element.SetText(maskedFieldPlaceholderValue) - } - } else { - for _, element := range element.ChildElements() { - filterXmlElement(element) - } - } -} - -func filterJsonBody(bytes []byte) ([]byte, error) { - var bodyJsonMap map[string] interface{} - err := json.Unmarshal(bytes ,&bodyJsonMap) - if err != nil { - return nil, err - } - filterJsonMap(bodyJsonMap) - return json.Marshal(bodyJsonMap) -} - -func filterJsonMap(jsonMap map[string] interface{}) { - for key, value := range jsonMap { - // Do not replace nil values with maskedFieldPlaceholderValue - if value == nil { - continue - } - - nestedMap, isNested := value.(map[string] interface{}) - if isNested { - filterJsonMap(nestedMap) - } else { - if isFieldNameSensitive(key) { - jsonMap[key] = maskedFieldPlaceholderValue - } - } - } -} - -// receives string representing url, returns string url without sensitive query param values (http://service/api?userId=bob&password=123&type=login -> http://service/api?userId=[REDACTED]&password=[REDACTED]&type=login) -func filterUrl(originalUrl string) string { - parsedUrl, err := url.Parse(originalUrl) - if err != nil { - return fmt.Sprintf("http://%s", maskedFieldPlaceholderValue) - } else { - if len(parsedUrl.RawQuery) > 0 { - newQueryArgs := make([]string, 0) - for urlQueryParamName, urlQueryParamValues := range parsedUrl.Query() { - newValues := urlQueryParamValues - if isFieldNameSensitive(urlQueryParamName) { - newValues = []string {maskedFieldPlaceholderValue} - } - for _, paramValue := range newValues { - newQueryArgs = append(newQueryArgs, fmt.Sprintf("%s=%s", urlQueryParamName, paramValue)) - } - } - - parsedUrl.RawQuery = strings.Join(newQueryArgs, "&") - } - - return parsedUrl.String() - } -} diff --git a/build_extensions.sh b/build_extensions.sh new file mode 100755 index 000000000..855f1c44a --- /dev/null +++ b/build_extensions.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +for f in tap/extensions/*; do + if [ -d "$f" ]; then + extension=$(basename $f) && \ + cd tap/extensions/${extension} && \ + go build -buildmode=plugin -o ../${extension}.so . && \ + cd ../../.. && \ + mkdir -p agent/build/extensions && \ + cp tap/extensions/${extension}.so agent/build/extensions + fi +done diff --git a/cli/cmd/common.go b/cli/cmd/common.go index 1f09c6ace..80e91c21a 100644 --- a/cli/cmd/common.go +++ b/cli/cmd/common.go @@ -3,6 +3,13 @@ package cmd import ( "context" "fmt" + "log" + "os" + "os/exec" + "os/signal" + "runtime" + "syscall" + "github.com/up9inc/mizu/cli/config" "github.com/up9inc/mizu/cli/config/configStructs" "github.com/up9inc/mizu/cli/errormessage" @@ -10,9 +17,6 @@ import ( "github.com/up9inc/mizu/cli/logger" "github.com/up9inc/mizu/cli/mizu" "github.com/up9inc/mizu/cli/uiUtils" - "os" - "os/signal" - "syscall" ) func GetApiServerUrl() string { @@ -45,3 +49,22 @@ func waitForFinish(ctx context.Context, cancel context.CancelFunc) { cancel() } } + +func openBrowser(url string) { + var err error + + switch runtime.GOOS { + case "linux": + err = exec.Command("xdg-open", url).Start() + case "windows": + err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() + case "darwin": + err = exec.Command("open", url).Start() + default: + err = fmt.Errorf("unsupported platform") + } + if err != nil { + log.Fatal(err) + } + +} diff --git a/cli/cmd/tapRunner.go b/cli/cmd/tapRunner.go index b1ce78cb8..ee2777c65 100644 --- a/cli/cmd/tapRunner.go +++ b/cli/cmd/tapRunner.go @@ -3,6 +3,11 @@ package cmd import ( "context" "fmt" + "path" + "regexp" + "strings" + "time" + "github.com/up9inc/mizu/cli/apiserver" "github.com/up9inc/mizu/cli/config" "github.com/up9inc/mizu/cli/config/configStructs" @@ -19,10 +24,6 @@ import ( yaml "gopkg.in/yaml.v3" core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" - "path" - "regexp" - "strings" - "time" ) const ( @@ -237,7 +238,6 @@ func updateMizuTappers(ctx context.Context, kubernetesProvider *kubernetes.Provi fmt.Sprintf("%s.%s.svc.cluster.local", state.apiServerService.Name, state.apiServerService.Namespace), nodeToTappedPodIPMap, serviceAccountName, - config.Config.Tap.TapOutgoing(), config.Config.Tap.TapperResources, config.Config.ImagePullPolicy(), ); err != nil { @@ -497,12 +497,14 @@ func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provi isPodReady = true go startProxyReportErrorIfAny(kubernetesProvider, cancel) - if err := apiserver.Provider.InitAndTestConnection(GetApiServerUrl()); err != nil { + url := GetApiServerUrl() + if err := apiserver.Provider.InitAndTestConnection(url); err != nil { logger.Log.Errorf(uiUtils.Error, "Couldn't connect to API server, check logs") cancel() break } - logger.Log.Infof("Mizu is available at %s\n", GetApiServerUrl()) + logger.Log.Infof("Mizu is available at %s\n", url) + openBrowser(url) requestForAnalysisIfNeeded() if err := apiserver.Provider.ReportTappedPods(state.currentlyTappedPods); err != nil { logger.Log.Debugf("[Error] failed update tapped pods %v", err) diff --git a/cli/cmd/viewRunner.go b/cli/cmd/viewRunner.go index 69bd2a27c..3fc50161b 100644 --- a/cli/cmd/viewRunner.go +++ b/cli/cmd/viewRunner.go @@ -3,6 +3,8 @@ package cmd import ( "context" "fmt" + "net/http" + "github.com/up9inc/mizu/cli/apiserver" "github.com/up9inc/mizu/cli/config" "github.com/up9inc/mizu/cli/kubernetes" @@ -10,7 +12,6 @@ import ( "github.com/up9inc/mizu/cli/mizu" "github.com/up9inc/mizu/cli/mizu/version" "github.com/up9inc/mizu/cli/uiUtils" - "net/http" ) func runMizuView() { @@ -35,7 +36,9 @@ func runMizuView() { return } - response, err := http.Get(fmt.Sprintf("%s/", GetApiServerUrl())) + url := GetApiServerUrl() + + response, err := http.Get(fmt.Sprintf("%s/", url)) if err == nil && response.StatusCode == 200 { logger.Log.Infof("Found a running service %s and open port %d", mizu.ApiServerPodName, config.Config.View.GuiPort) return @@ -48,7 +51,8 @@ func runMizuView() { return } - logger.Log.Infof("Mizu is available at %s\n", GetApiServerUrl()) + logger.Log.Infof("Mizu is available at %s\n", url) + openBrowser(url) if isCompatible, err := version.CheckVersionCompatibility(); err != nil { logger.Log.Errorf("Failed to check versions compatibility %v", err) cancel() diff --git a/cli/config/configStructs/tapConfig.go b/cli/config/configStructs/tapConfig.go index d80163b27..8a999c37b 100644 --- a/cli/config/configStructs/tapConfig.go +++ b/cli/config/configStructs/tapConfig.go @@ -53,15 +53,6 @@ func (config *TapConfig) PodRegex() *regexp.Regexp { return podRegex } -func (config *TapConfig) TapOutgoing() bool { - directionLowerCase := strings.ToLower(config.Direction) - if directionLowerCase == "any" { - return true - } - - return false -} - func (config *TapConfig) MaxEntriesDBSizeBytes() int64 { maxEntriesDBSizeBytes, _ := units.HumanReadableToBytes(config.HumanMaxEntriesDBSize) return maxEntriesDBSizeBytes diff --git a/cli/kubernetes/provider.go b/cli/kubernetes/provider.go index 486bf1377..7c76290b9 100644 --- a/cli/kubernetes/provider.go +++ b/cli/kubernetes/provider.go @@ -7,15 +7,17 @@ import ( "encoding/json" "errors" "fmt" - "github.com/up9inc/mizu/cli/config/configStructs" - "github.com/up9inc/mizu/cli/logger" "path/filepath" "regexp" "strconv" + "github.com/up9inc/mizu/cli/config/configStructs" + "github.com/up9inc/mizu/cli/logger" + + "io" + "github.com/up9inc/mizu/cli/mizu" "github.com/up9inc/mizu/shared" - "io" core "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -55,21 +57,21 @@ func NewProvider(kubeConfigPath string) (*Provider, error) { restClientConfig, err := kubernetesConfig.ClientConfig() if err != nil { if clientcmd.IsEmptyConfig(err) { - return nil, fmt.Errorf("couldn't find the kube config file, or file is empty (%s)\n" + + return nil, fmt.Errorf("couldn't find the kube config file, or file is empty (%s)\n"+ "you can set alternative kube config file path by adding the kube-config-path field to the mizu config file, err: %w", kubeConfigPath, err) } if clientcmd.IsConfigurationInvalid(err) { - return nil, fmt.Errorf("invalid kube config file (%s)\n" + + return nil, fmt.Errorf("invalid kube config file (%s)\n"+ "you can set alternative kube config file path by adding the kube-config-path field to the mizu config file, err: %w", kubeConfigPath, err) } - return nil, fmt.Errorf("error while using kube config (%s)\n" + + return nil, fmt.Errorf("error while using kube config (%s)\n"+ "you can set alternative kube config file path by adding the kube-config-path field to the mizu config file, err: %w", kubeConfigPath, err) } clientSet, err := getClientSet(restClientConfig) if err != nil { - return nil, fmt.Errorf("error while using kube config (%s)\n" + + return nil, fmt.Errorf("error while using kube config (%s)\n"+ "you can set alternative kube config file path by adding the kube-config-path field to the mizu config file, err: %w", kubeConfigPath, err) } @@ -573,11 +575,11 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string, return nil } -func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeToTappedPodIPMap map[string][]string, serviceAccountName string, tapOutgoing bool, resources configStructs.Resources, imagePullPolicy core.PullPolicy) error { - logger.Log.Debugf("Applying %d tapper deamonsets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeToTappedPodIPMap), namespace, daemonSetName, podImage, tapperPodName) +func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeToTappedPodIPMap map[string][]string, serviceAccountName string, resources configStructs.Resources, imagePullPolicy core.PullPolicy) error { + logger.Log.Debugf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeToTappedPodIPMap), namespace, daemonSetName, podImage, tapperPodName) if len(nodeToTappedPodIPMap) == 0 { - return fmt.Errorf("Daemon set %s must tap at least 1 pod", daemonSetName) + return fmt.Errorf("daemon set %s must tap at least 1 pod", daemonSetName) } nodeToTappedPodIPMapJsonStr, err := json.Marshal(nodeToTappedPodIPMap) @@ -590,9 +592,7 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac "-i", "any", "--tap", "--api-server-address", fmt.Sprintf("ws://%s/wsTapper", apiServerPodIp), - } - if tapOutgoing { - mizuCmd = append(mizuCmd, "--anydirection") + "--nodefrag", } agentContainer := applyconfcore.Container() diff --git a/shared/models.go b/shared/models.go index ade4a9269..9802e3fde 100644 --- a/shared/models.go +++ b/shared/models.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "strings" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) type WebSocketMessageType string diff --git a/tap/api/api.go b/tap/api/api.go new file mode 100644 index 000000000..65674628f --- /dev/null +++ b/tap/api/api.go @@ -0,0 +1,165 @@ +package api + +import ( + "bufio" + "plugin" + "sync" + "time" +) + +type Protocol struct { + Name string `json:"name"` + LongName string `json:"long_name"` + Abbreviation string `json:"abbreviation"` + Version string `json:"version"` + BackgroundColor string `json:"background_color"` + ForegroundColor string `json:"foreground_color"` + FontSize int8 `json:"font_size"` + ReferenceLink string `json:"reference_link"` + Ports []string `json:"ports"` + Priority uint8 `json:"priority"` +} + +type Extension struct { + Protocol Protocol + Path string + Plug *plugin.Plugin + Dissector Dissector + MatcherMap *sync.Map +} + +type ConnectionInfo struct { + ClientIP string + ClientPort string + ServerIP string + ServerPort string + IsOutgoing bool +} + +type TcpID struct { + SrcIP string + DstIP string + SrcPort string + DstPort string + Ident string +} + +type CounterPair struct { + Request uint + Response uint +} + +type GenericMessage struct { + IsRequest bool `json:"is_request"` + CaptureTime time.Time `json:"capture_time"` + Payload interface{} `json:"payload"` +} + +type RequestResponsePair struct { + Request GenericMessage `json:"request"` + Response GenericMessage `json:"response"` +} + +// `Protocol` is modified in the later stages of data propagation. Therefore it's not a pointer. +type OutputChannelItem struct { + Protocol Protocol + Timestamp int64 + ConnectionInfo *ConnectionInfo + Pair *RequestResponsePair +} + +type Dissector interface { + Register(*Extension) + Ping() + Dissect(b *bufio.Reader, isClient bool, tcpID *TcpID, counterPair *CounterPair, emitter Emitter) error + Analyze(item *OutputChannelItem, entryId string, resolvedSource string, resolvedDestination string) *MizuEntry + Summarize(entry *MizuEntry) *BaseEntryDetails + Represent(entry *MizuEntry) (Protocol, []byte, error) +} + +type Emitting struct { + OutputChannel chan *OutputChannelItem +} + +type Emitter interface { + Emit(item *OutputChannelItem) +} + +func (e *Emitting) Emit(item *OutputChannelItem) { + e.OutputChannel <- item +} + +type MizuEntry struct { + ID uint `gorm:"primarykey"` + CreatedAt time.Time + UpdatedAt time.Time + ProtocolName string `json:"protocol_key" gorm:"column:protocolKey"` + ProtocolVersion string `json:"protocol_version" gorm:"column:protocolVersion"` + Entry string `json:"entry,omitempty" gorm:"column:entry"` + EntryId string `json:"entryId" gorm:"column:entryId"` + Url string `json:"url" gorm:"column:url"` + Method string `json:"method" gorm:"column:method"` + Status int `json:"status" gorm:"column:status"` + RequestSenderIp string `json:"requestSenderIp" gorm:"column:requestSenderIp"` + Service string `json:"service" gorm:"column:service"` + Timestamp int64 `json:"timestamp" gorm:"column:timestamp"` + Path string `json:"path" gorm:"column:path"` + ResolvedSource string `json:"resolvedSource,omitempty" gorm:"column:resolvedSource"` + ResolvedDestination string `json:"resolvedDestination,omitempty" gorm:"column:resolvedDestination"` + SourceIp string `json:"sourceIp,omitempty" gorm:"column:sourceIp"` + DestinationIp string `json:"destinationIp,omitempty" gorm:"column:destinationIp"` + SourcePort string `json:"sourcePort,omitempty" gorm:"column:sourcePort"` + DestinationPort string `json:"destinationPort,omitempty" gorm:"column:destinationPort"` + IsOutgoing bool `json:"isOutgoing,omitempty" gorm:"column:isOutgoing"` + EstimatedSizeBytes int `json:"-" gorm:"column:estimatedSizeBytes"` +} + +type MizuEntryWrapper struct { + Protocol Protocol `json:"protocol"` + Representation string `json:"representation"` + Data MizuEntry `json:"data"` +} + +type BaseEntryDetails struct { + Id string `json:"id,omitempty"` + Protocol Protocol `json:"protocol,omitempty"` + Url string `json:"url,omitempty"` + RequestSenderIp string `json:"request_sender_ip,omitempty"` + Service string `json:"service,omitempty"` + Summary string `json:"summary,omitempty"` + StatusCode int `json:"status_code"` + Method string `json:"method,omitempty"` + Timestamp int64 `json:"timestamp,omitempty"` + SourceIp string `json:"source_ip,omitempty"` + DestinationIp string `json:"destination_ip,omitempty"` + SourcePort string `json:"source_port,omitempty"` + DestinationPort string `json:"destination_port,omitempty"` + IsOutgoing bool `json:"isOutgoing,omitempty"` + Latency int64 `json:"latency,omitempty"` + Rules ApplicableRules `json:"rules,omitempty"` +} + +type ApplicableRules struct { + Latency int64 `json:"latency,omitempty"` + Status bool `json:"status,omitempty"` + NumberOfRules int `json:"numberOfRules,omitempty"` +} + +type DataUnmarshaler interface { + UnmarshalData(*MizuEntry) error +} + +func (bed *BaseEntryDetails) UnmarshalData(entry *MizuEntry) error { + entryUrl := entry.Url + service := entry.Service + bed.Id = entry.EntryId + bed.Url = entryUrl + bed.Service = service + bed.Summary = entry.Path + bed.StatusCode = entry.Status + bed.Method = entry.Method + bed.Timestamp = entry.Timestamp + bed.RequestSenderIp = entry.RequestSenderIp + bed.IsOutgoing = entry.IsOutgoing + return nil +} diff --git a/tap/api/go.mod b/tap/api/go.mod new file mode 100644 index 000000000..d5379a1fd --- /dev/null +++ b/tap/api/go.mod @@ -0,0 +1,3 @@ +module github.com/up9inc/mizu/tap/api + +go 1.16 diff --git a/tap/cleaner.go b/tap/cleaner.go index 96972fc9e..10a52968d 100644 --- a/tap/cleaner.go +++ b/tap/cleaner.go @@ -1,11 +1,12 @@ package tap import ( - "github.com/romana/rlog" "sync" "time" "github.com/google/gopacket/reassembly" + "github.com/romana/rlog" + "github.com/up9inc/mizu/tap/api" ) type CleanerStats struct { @@ -17,7 +18,6 @@ type CleanerStats struct { type Cleaner struct { assembler *reassembly.Assembler assemblerMutex *sync.Mutex - matcher *requestResponseMatcher cleanPeriod time.Duration connectionTimeout time.Duration stats CleanerStats @@ -32,13 +32,15 @@ func (cl *Cleaner) clean() { flushed, closed := cl.assembler.FlushCloseOlderThan(startCleanTime.Add(-cl.connectionTimeout)) cl.assemblerMutex.Unlock() - deleted := cl.matcher.deleteOlderThan(startCleanTime.Add(-cl.connectionTimeout)) + for _, extension := range extensions { + deleted := deleteOlderThan(extension.MatcherMap, startCleanTime.Add(-cl.connectionTimeout)) + cl.stats.deleted += deleted + } cl.statsMutex.Lock() rlog.Debugf("Assembler Stats after cleaning %s", cl.assembler.Dump()) cl.stats.flushed += flushed cl.stats.closed += closed - cl.stats.deleted += deleted cl.statsMutex.Unlock() } @@ -70,3 +72,25 @@ func (cl *Cleaner) dumpStats() CleanerStats { return stats } + +func deleteOlderThan(matcherMap *sync.Map, t time.Time) int { + numDeleted := 0 + + if matcherMap == nil { + return numDeleted + } + + matcherMap.Range(func(key interface{}, value interface{}) bool { + message, _ := value.(*api.GenericMessage) + // TODO: Investigate the reason why `request` is `nil` in some rare occasion + if message != nil { + if message.CaptureTime.Before(t) { + matcherMap.Delete(key) + numDeleted++ + } + } + return true + }) + + return numDeleted +} diff --git a/tap/extensions/amqp/go.mod b/tap/extensions/amqp/go.mod new file mode 100644 index 000000000..716aebec8 --- /dev/null +++ b/tap/extensions/amqp/go.mod @@ -0,0 +1,9 @@ +module github.com/up9inc/mizu/tap/extensions/amqp + +go 1.16 + +require ( + github.com/up9inc/mizu/tap/api v0.0.0 +) + +replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api diff --git a/tap/extensions/amqp/helpers.go b/tap/extensions/amqp/helpers.go new file mode 100644 index 000000000..b4eab04a4 --- /dev/null +++ b/tap/extensions/amqp/helpers.go @@ -0,0 +1,664 @@ +package main + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/up9inc/mizu/tap/api" +) + +var connectionMethodMap = map[int]string{ + 10: "connection start", + 11: "connection start-ok", + 20: "connection secure", + 21: "connection secure-ok", + 30: "connection tune", + 31: "connection tune-ok", + 40: "connection open", + 41: "connection open-ok", + 50: "connection close", + 51: "connection close-ok", + 60: "connection blocked", + 61: "connection unblocked", +} + +var channelMethodMap = map[int]string{ + 10: "channel open", + 11: "channel open-ok", + 20: "channel flow", + 21: "channel flow-ok", + 40: "channel close", + 41: "channel close-ok", +} + +var exchangeMethodMap = map[int]string{ + 10: "exchange declare", + 11: "exchange declare-ok", + 20: "exchange delete", + 21: "exchange delete-ok", + 30: "exchange bind", + 31: "exchange bind-ok", + 40: "exchange unbind", + 51: "exchange unbind-ok", +} + +var queueMethodMap = map[int]string{ + 10: "queue declare", + 11: "queue declare-ok", + 20: "queue bind", + 21: "queue bind-ok", + 50: "queue unbind", + 51: "queue unbind-ok", + 30: "queue purge", + 31: "queue purge-ok", + 40: "queue delete", + 41: "queue delete-ok", +} + +var basicMethodMap = map[int]string{ + 10: "basic qos", + 11: "basic qos-ok", + 20: "basic consume", + 21: "basic consume-ok", + 30: "basic cancel", + 31: "basic cancel-ok", + 40: "basic publish", + 50: "basic return", + 60: "basic deliver", + 70: "basic get", + 71: "basic get-ok", + 72: "basic get-empty", + 80: "basic ack", + 90: "basic reject", + 100: "basic recover-async", + 110: "basic recover", + 111: "basic recover-ok", + 120: "basic nack", +} + +var txMethodMap = map[int]string{ + 10: "tx select", + 11: "tx select-ok", + 20: "tx commit", + 21: "tx commit-ok", + 30: "tx rollback", + 31: "tx rollback-ok", +} + +type AMQPWrapper struct { + Method string `json:"method"` + Url string `json:"url"` + Details interface{} `json:"details"` +} + +func emitAMQP(event interface{}, _type string, method string, connectionInfo *api.ConnectionInfo, emitter api.Emitter) { + request := &api.GenericMessage{ + IsRequest: true, + CaptureTime: time.Now(), + Payload: AMQPPayload{ + Data: &AMQPWrapper{ + Method: method, + Url: "", + Details: event, + }, + }, + } + item := &api.OutputChannelItem{ + Protocol: protocol, + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + ConnectionInfo: connectionInfo, + Pair: &api.RequestResponsePair{ + Request: *request, + Response: api.GenericMessage{}, + }, + } + emitter.Emit(item) +} + +func representProperties(properties map[string]interface{}, rep []interface{}) ([]interface{}, string, string) { + contentType := "" + contentEncoding := "" + deliveryMode := "" + priority := "" + correlationId := "" + replyTo := "" + expiration := "" + messageId := "" + timestamp := "" + _type := "" + userId := "" + appId := "" + + if properties["ContentType"] != nil { + contentType = properties["ContentType"].(string) + } + if properties["ContentEncoding"] != nil { + contentEncoding = properties["ContentEncoding"].(string) + } + if properties["Delivery Mode"] != nil { + deliveryMode = fmt.Sprintf("%g", properties["DeliveryMode"].(float64)) + } + if properties["Priority"] != nil { + priority = fmt.Sprintf("%g", properties["Priority"].(float64)) + } + if properties["CorrelationId"] != nil { + correlationId = properties["CorrelationId"].(string) + } + if properties["ReplyTo"] != nil { + replyTo = properties["ReplyTo"].(string) + } + if properties["Expiration"] != nil { + expiration = properties["Expiration"].(string) + } + if properties["MessageId"] != nil { + messageId = properties["MessageId"].(string) + } + if properties["Timestamp"] != nil { + timestamp = properties["Timestamp"].(string) + } + if properties["Type"] != nil { + _type = properties["Type"].(string) + } + if properties["UserId"] != nil { + userId = properties["UserId"].(string) + } + if properties["AppId"] != nil { + appId = properties["AppId"].(string) + } + + props, _ := json.Marshal([]map[string]string{ + { + "name": "Content Type", + "value": contentType, + }, + { + "name": "Content Encoding", + "value": contentEncoding, + }, + { + "name": "Delivery Mode", + "value": deliveryMode, + }, + { + "name": "Priority", + "value": priority, + }, + { + "name": "Correlation ID", + "value": correlationId, + }, + { + "name": "Reply To", + "value": replyTo, + }, + { + "name": "Expiration", + "value": expiration, + }, + { + "name": "Message ID", + "value": messageId, + }, + { + "name": "Timestamp", + "value": timestamp, + }, + { + "name": "Type", + "value": _type, + }, + { + "name": "User ID", + "value": userId, + }, + { + "name": "App ID", + "value": appId, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Properties", + "data": string(props), + }) + + return rep, contentType, contentEncoding +} + +func representBasicPublish(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Exchange", + "value": event["Exchange"].(string), + }, + { + "name": "Routing Key", + "value": event["RoutingKey"].(string), + }, + { + "name": "Mandatory", + "value": strconv.FormatBool(event["Mandatory"].(bool)), + }, + { + "name": "Immediate", + "value": strconv.FormatBool(event["Immediate"].(bool)), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + properties := event["Properties"].(map[string]interface{}) + rep, contentType, _ := representProperties(properties, rep) + + if properties["Headers"] != nil { + headers := make([]map[string]string, 0) + for name, value := range properties["Headers"].(map[string]interface{}) { + headers = append(headers, map[string]string{ + "name": name, + "value": value.(string), + }) + } + headersMarshaled, _ := json.Marshal(headers) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Headers", + "data": string(headersMarshaled), + }) + } + + if event["Body"] != nil { + rep = append(rep, map[string]string{ + "type": "body", + "title": "Body", + "encoding": "base64", + "mime_type": contentType, + "data": event["Body"].(string), + }) + } + + return rep +} + +func representBasicDeliver(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + consumerTag := "" + deliveryTag := "" + redelivered := "" + + if event["ConsumerTag"] != nil { + consumerTag = event["ConsumerTag"].(string) + } + if event["DeliveryTag"] != nil { + deliveryTag = fmt.Sprintf("%g", event["DeliveryTag"].(float64)) + } + if event["Redelivered"] != nil { + redelivered = strconv.FormatBool(event["Redelivered"].(bool)) + } + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Consumer Tag", + "value": consumerTag, + }, + { + "name": "Delivery Tag", + "value": deliveryTag, + }, + { + "name": "Redelivered", + "value": redelivered, + }, + { + "name": "Exchange", + "value": event["Exchange"].(string), + }, + { + "name": "Routing Key", + "value": event["RoutingKey"].(string), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + properties := event["Properties"].(map[string]interface{}) + rep, contentType, _ := representProperties(properties, rep) + + if properties["Headers"] != nil { + headers := make([]map[string]string, 0) + for name, value := range properties["Headers"].(map[string]interface{}) { + headers = append(headers, map[string]string{ + "name": name, + "value": value.(string), + }) + } + headersMarshaled, _ := json.Marshal(headers) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Headers", + "data": string(headersMarshaled), + }) + } + + if event["Body"] != nil { + rep = append(rep, map[string]string{ + "type": "body", + "title": "Body", + "encoding": "base64", + "mime_type": contentType, + "data": event["Body"].(string), + }) + } + + return rep +} + +func representQueueDeclare(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Queue", + "value": event["Queue"].(string), + }, + { + "name": "Passive", + "value": strconv.FormatBool(event["Passive"].(bool)), + }, + { + "name": "Durable", + "value": strconv.FormatBool(event["Durable"].(bool)), + }, + { + "name": "Exclusive", + "value": strconv.FormatBool(event["Exclusive"].(bool)), + }, + { + "name": "Auto Delete", + "value": strconv.FormatBool(event["AutoDelete"].(bool)), + }, + { + "name": "NoWait", + "value": strconv.FormatBool(event["NoWait"].(bool)), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + if event["Arguments"] != nil { + headers := make([]map[string]string, 0) + for name, value := range event["Arguments"].(map[string]interface{}) { + headers = append(headers, map[string]string{ + "name": name, + "value": value.(string), + }) + } + headersMarshaled, _ := json.Marshal(headers) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Arguments", + "data": string(headersMarshaled), + }) + } + + return rep +} + +func representExchangeDeclare(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Exchange", + "value": event["Exchange"].(string), + }, + { + "name": "Type", + "value": event["Type"].(string), + }, + { + "name": "Passive", + "value": strconv.FormatBool(event["Passive"].(bool)), + }, + { + "name": "Durable", + "value": strconv.FormatBool(event["Durable"].(bool)), + }, + { + "name": "Auto Delete", + "value": strconv.FormatBool(event["AutoDelete"].(bool)), + }, + { + "name": "Internal", + "value": strconv.FormatBool(event["Internal"].(bool)), + }, + { + "name": "NoWait", + "value": strconv.FormatBool(event["NoWait"].(bool)), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + if event["Arguments"] != nil { + headers := make([]map[string]string, 0) + for name, value := range event["Arguments"].(map[string]interface{}) { + headers = append(headers, map[string]string{ + "name": name, + "value": value.(string), + }) + } + headersMarshaled, _ := json.Marshal(headers) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Arguments", + "data": string(headersMarshaled), + }) + } + + return rep +} + +func representConnectionStart(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Version Major", + "value": fmt.Sprintf("%g", event["VersionMajor"].(float64)), + }, + { + "name": "Version Minor", + "value": fmt.Sprintf("%g", event["VersionMinor"].(float64)), + }, + { + "name": "Mechanisms", + "value": event["Mechanisms"].(string), + }, + { + "name": "Locales", + "value": event["Locales"].(string), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + if event["ServerProperties"] != nil { + headers := make([]map[string]string, 0) + for name, value := range event["ServerProperties"].(map[string]interface{}) { + var outcome string + switch value.(type) { + case string: + outcome = value.(string) + break + case map[string]interface{}: + x, _ := json.Marshal(value) + outcome = string(x) + break + default: + panic("Unknown data type for the server property!") + } + headers = append(headers, map[string]string{ + "name": name, + "value": outcome, + }) + } + headersMarshaled, _ := json.Marshal(headers) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Server Properties", + "data": string(headersMarshaled), + }) + } + + return rep +} + +func representConnectionClose(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Reply Code", + "value": fmt.Sprintf("%g", event["ReplyCode"].(float64)), + }, + { + "name": "Reply Text", + "value": event["ReplyText"].(string), + }, + { + "name": "Class ID", + "value": fmt.Sprintf("%g", event["ClassId"].(float64)), + }, + { + "name": "Method ID", + "value": fmt.Sprintf("%g", event["MethodId"].(float64)), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + return rep +} + +func representQueueBind(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Queue", + "value": event["Queue"].(string), + }, + { + "name": "Exchange", + "value": event["Exchange"].(string), + }, + { + "name": "RoutingKey", + "value": event["RoutingKey"].(string), + }, + { + "name": "NoWait", + "value": strconv.FormatBool(event["NoWait"].(bool)), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + if event["Arguments"] != nil { + headers := make([]map[string]string, 0) + for name, value := range event["Arguments"].(map[string]interface{}) { + headers = append(headers, map[string]string{ + "name": name, + "value": value.(string), + }) + } + headersMarshaled, _ := json.Marshal(headers) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Arguments", + "data": string(headersMarshaled), + }) + } + + return rep +} + +func representBasicConsume(event map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Queue", + "value": event["Queue"].(string), + }, + { + "name": "Consumer Tag", + "value": event["ConsumerTag"].(string), + }, + { + "name": "No Local", + "value": strconv.FormatBool(event["NoLocal"].(bool)), + }, + { + "name": "No Ack", + "value": strconv.FormatBool(event["NoAck"].(bool)), + }, + { + "name": "Exclusive", + "value": strconv.FormatBool(event["Exclusive"].(bool)), + }, + { + "name": "NoWait", + "value": strconv.FormatBool(event["NoWait"].(bool)), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + if event["Arguments"] != nil { + headers := make([]map[string]string, 0) + for name, value := range event["Arguments"].(map[string]interface{}) { + headers = append(headers, map[string]string{ + "name": name, + "value": value.(string), + }) + } + headersMarshaled, _ := json.Marshal(headers) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Arguments", + "data": string(headersMarshaled), + }) + } + + return rep +} diff --git a/tap/extensions/amqp/main.go b/tap/extensions/amqp/main.go new file mode 100644 index 000000000..19b5cd913 --- /dev/null +++ b/tap/extensions/amqp/main.go @@ -0,0 +1,341 @@ +package main + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "strconv" + + "github.com/up9inc/mizu/tap/api" +) + +var protocol api.Protocol = api.Protocol{ + Name: "amqp", + LongName: "Advanced Message Queuing Protocol 0-9-1", + Abbreviation: "AMQP", + Version: "0-9-1", + BackgroundColor: "#ff6600", + ForegroundColor: "#ffffff", + FontSize: 12, + ReferenceLink: "https://www.rabbitmq.com/amqp-0-9-1-reference.html", + Ports: []string{"5671", "5672"}, + Priority: 1, +} + +func init() { + log.Println("Initializing AMQP extension...") +} + +type dissecting string + +func (d dissecting) Register(extension *api.Extension) { + extension.Protocol = protocol +} + +func (d dissecting) Ping() { + log.Printf("pong %s\n", protocol.Name) +} + +const amqpRequest string = "amqp_request" + +func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, emitter api.Emitter) error { + r := AmqpReader{b} + + var remaining int + var header *HeaderFrame + var body []byte + + connectionInfo := &api.ConnectionInfo{ + ClientIP: tcpID.SrcIP, + ClientPort: tcpID.SrcPort, + ServerIP: tcpID.DstIP, + ServerPort: tcpID.DstPort, + IsOutgoing: true, + } + + eventBasicPublish := &BasicPublish{ + Exchange: "", + RoutingKey: "", + Mandatory: false, + Immediate: false, + Body: nil, + Properties: Properties{}, + } + + eventBasicDeliver := &BasicDeliver{ + ConsumerTag: "", + DeliveryTag: 0, + Redelivered: false, + Exchange: "", + RoutingKey: "", + Properties: Properties{}, + Body: nil, + } + + var lastMethodFrameMessage Message + + for { + frame, err := r.ReadFrame() + if err == io.EOF { + // We must read until we see an EOF... very important! + return errors.New("AMQP EOF") + } else if err != nil { + // TODO: Causes ignoring some methods. Return only in case of a certain error. But what? + return err + } + + switch f := frame.(type) { + case *HeartbeatFrame: + // drop + + case *HeaderFrame: + // start content state + header = f + remaining = int(header.Size) + switch lastMethodFrameMessage.(type) { + case *BasicPublish: + eventBasicPublish.Properties = header.Properties + case *BasicDeliver: + eventBasicDeliver.Properties = header.Properties + default: + } + + case *BodyFrame: + // continue until terminated + body = append(body, f.Body...) + remaining -= len(f.Body) + switch lastMethodFrameMessage.(type) { + case *BasicPublish: + eventBasicPublish.Body = f.Body + emitAMQP(*eventBasicPublish, amqpRequest, basicMethodMap[40], connectionInfo, emitter) + case *BasicDeliver: + eventBasicDeliver.Body = f.Body + emitAMQP(*eventBasicDeliver, amqpRequest, basicMethodMap[60], connectionInfo, emitter) + default: + } + + case *MethodFrame: + lastMethodFrameMessage = f.Method + switch m := f.Method.(type) { + case *BasicPublish: + eventBasicPublish.Exchange = m.Exchange + eventBasicPublish.RoutingKey = m.RoutingKey + eventBasicPublish.Mandatory = m.Mandatory + eventBasicPublish.Immediate = m.Immediate + + case *QueueBind: + eventQueueBind := &QueueBind{ + Queue: m.Queue, + Exchange: m.Exchange, + RoutingKey: m.RoutingKey, + NoWait: m.NoWait, + Arguments: m.Arguments, + } + emitAMQP(*eventQueueBind, amqpRequest, queueMethodMap[20], connectionInfo, emitter) + + case *BasicConsume: + eventBasicConsume := &BasicConsume{ + Queue: m.Queue, + ConsumerTag: m.ConsumerTag, + NoLocal: m.NoLocal, + NoAck: m.NoAck, + Exclusive: m.Exclusive, + NoWait: m.NoWait, + Arguments: m.Arguments, + } + emitAMQP(*eventBasicConsume, amqpRequest, basicMethodMap[20], connectionInfo, emitter) + + case *BasicDeliver: + eventBasicDeliver.ConsumerTag = m.ConsumerTag + eventBasicDeliver.DeliveryTag = m.DeliveryTag + eventBasicDeliver.Redelivered = m.Redelivered + eventBasicDeliver.Exchange = m.Exchange + eventBasicDeliver.RoutingKey = m.RoutingKey + + case *QueueDeclare: + eventQueueDeclare := &QueueDeclare{ + Queue: m.Queue, + Passive: m.Passive, + Durable: m.Durable, + AutoDelete: m.AutoDelete, + Exclusive: m.Exclusive, + NoWait: m.NoWait, + Arguments: m.Arguments, + } + emitAMQP(*eventQueueDeclare, amqpRequest, queueMethodMap[10], connectionInfo, emitter) + + case *ExchangeDeclare: + eventExchangeDeclare := &ExchangeDeclare{ + Exchange: m.Exchange, + Type: m.Type, + Passive: m.Passive, + Durable: m.Durable, + AutoDelete: m.AutoDelete, + Internal: m.Internal, + NoWait: m.NoWait, + Arguments: m.Arguments, + } + emitAMQP(*eventExchangeDeclare, amqpRequest, exchangeMethodMap[10], connectionInfo, emitter) + + case *ConnectionStart: + eventConnectionStart := &ConnectionStart{ + VersionMajor: m.VersionMajor, + VersionMinor: m.VersionMinor, + ServerProperties: m.ServerProperties, + Mechanisms: m.Mechanisms, + Locales: m.Locales, + } + emitAMQP(*eventConnectionStart, amqpRequest, connectionMethodMap[10], connectionInfo, emitter) + + case *ConnectionClose: + eventConnectionClose := &ConnectionClose{ + ReplyCode: m.ReplyCode, + ReplyText: m.ReplyText, + ClassId: m.ClassId, + MethodId: m.MethodId, + } + emitAMQP(*eventConnectionClose, amqpRequest, connectionMethodMap[50], connectionInfo, emitter) + + default: + + } + + default: + // log.Printf("unexpected frame: %+v\n", f) + } + } +} + +func (d dissecting) Analyze(item *api.OutputChannelItem, entryId string, resolvedSource string, resolvedDestination string) *api.MizuEntry { + request := item.Pair.Request.Payload.(map[string]interface{}) + reqDetails := request["details"].(map[string]interface{}) + service := "amqp" + if resolvedDestination != "" { + service = resolvedDestination + } else if resolvedSource != "" { + service = resolvedSource + } + + summary := "" + switch request["method"] { + case basicMethodMap[40]: + summary = reqDetails["Exchange"].(string) + break + case basicMethodMap[60]: + summary = reqDetails["Exchange"].(string) + break + case exchangeMethodMap[10]: + summary = reqDetails["Exchange"].(string) + break + case queueMethodMap[10]: + summary = reqDetails["Queue"].(string) + break + case connectionMethodMap[10]: + summary = fmt.Sprintf( + "%s.%s", + strconv.Itoa(int(reqDetails["VersionMajor"].(float64))), + strconv.Itoa(int(reqDetails["VersionMinor"].(float64))), + ) + break + case connectionMethodMap[50]: + summary = reqDetails["ReplyText"].(string) + break + case queueMethodMap[20]: + summary = reqDetails["Queue"].(string) + break + case basicMethodMap[20]: + summary = reqDetails["Queue"].(string) + break + } + + request["url"] = summary + entryBytes, _ := json.Marshal(item.Pair) + return &api.MizuEntry{ + ProtocolName: protocol.Name, + ProtocolVersion: protocol.Version, + EntryId: entryId, + Entry: string(entryBytes), + Url: fmt.Sprintf("%s%s", service, summary), + Method: request["method"].(string), + Status: 0, + RequestSenderIp: item.ConnectionInfo.ClientIP, + Service: service, + Timestamp: item.Timestamp, + Path: summary, + ResolvedSource: resolvedSource, + ResolvedDestination: resolvedDestination, + SourceIp: item.ConnectionInfo.ClientIP, + DestinationIp: item.ConnectionInfo.ServerIP, + SourcePort: item.ConnectionInfo.ClientPort, + DestinationPort: item.ConnectionInfo.ServerPort, + IsOutgoing: item.ConnectionInfo.IsOutgoing, + } + +} + +func (d dissecting) Summarize(entry *api.MizuEntry) *api.BaseEntryDetails { + return &api.BaseEntryDetails{ + Id: entry.EntryId, + Protocol: protocol, + Url: entry.Url, + RequestSenderIp: entry.RequestSenderIp, + Service: entry.Service, + Summary: entry.Path, + StatusCode: entry.Status, + Method: entry.Method, + Timestamp: entry.Timestamp, + SourceIp: entry.SourceIp, + DestinationIp: entry.DestinationIp, + SourcePort: entry.SourcePort, + DestinationPort: entry.DestinationPort, + IsOutgoing: entry.IsOutgoing, + Latency: 0, + Rules: api.ApplicableRules{ + Latency: 0, + Status: false, + }, + } +} + +func (d dissecting) Represent(entry *api.MizuEntry) (api.Protocol, []byte, error) { + var root map[string]interface{} + json.Unmarshal([]byte(entry.Entry), &root) + representation := make(map[string]interface{}, 0) + request := root["request"].(map[string]interface{})["payload"].(map[string]interface{}) + var repRequest []interface{} + details := request["details"].(map[string]interface{}) + switch request["method"].(string) { + case basicMethodMap[40]: + repRequest = representBasicPublish(details) + break + case basicMethodMap[60]: + repRequest = representBasicDeliver(details) + break + case queueMethodMap[10]: + repRequest = representQueueDeclare(details) + break + case exchangeMethodMap[10]: + repRequest = representExchangeDeclare(details) + break + case connectionMethodMap[10]: + repRequest = representConnectionStart(details) + break + case connectionMethodMap[50]: + repRequest = representConnectionClose(details) + break + case queueMethodMap[20]: + repRequest = representQueueBind(details) + break + case basicMethodMap[20]: + repRequest = representBasicConsume(details) + break + } + representation["request"] = repRequest + object, err := json.Marshal(representation) + return protocol, object, err +} + +var Dissector dissecting diff --git a/tap/extensions/amqp/read.go b/tap/extensions/amqp/read.go new file mode 100644 index 000000000..e31bb7198 --- /dev/null +++ b/tap/extensions/amqp/read.go @@ -0,0 +1,460 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package main + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "time" +) + +/* +Reads a frame from an input stream and returns an interface that can be cast into +one of the following: + + MethodFrame + PropertiesFrame + BodyFrame + HeartbeatFrame + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a +'frame-end' octet that detects malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or + +“gathering reads” to avoid doing three separate system calls to read a frame. +*/ +func (r *AmqpReader) ReadFrame() (frame frame, err error) { + var scratch [7]byte + + if _, err = io.ReadFull(r.R, scratch[:7]); err != nil { + return + } + + typ := uint8(scratch[0]) + channel := binary.BigEndian.Uint16(scratch[1:3]) + size := binary.BigEndian.Uint32(scratch[3:7]) + + if size > 1000000 { + return nil, ErrMaxSize + } + + switch typ { + case frameMethod: + if frame, err = r.parseMethodFrame(channel, size); err != nil { + return + } + + case frameHeader: + if frame, err = r.parseHeaderFrame(channel, size); err != nil { + return + } + + case frameBody: + if frame, err = r.parseBodyFrame(channel, size); err != nil { + return nil, err + } + + case frameHeartbeat: + if frame, err = r.parseHeartbeatFrame(channel, size); err != nil { + return + } + + default: + return nil, ErrFrame + } + + if _, err = io.ReadFull(r.R, scratch[:1]); err != nil { + return nil, err + } + + if scratch[0] != frameEnd { + return nil, ErrFrame + } + + return +} + +func readShortstr(r io.Reader) (v string, err error) { + var length uint8 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readLongstr(r io.Reader) (v string, err error) { + var length uint32 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + // slices can't be longer than max int32 value + if length > (^uint32(0) >> 1) { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readDecimal(r io.Reader) (v Decimal, err error) { + if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil { + return + } + return +} + +func readFloat32(r io.Reader) (v float32, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readFloat64(r io.Reader) (v float64, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readTimestamp(r io.Reader) (v time.Time, err error) { + var sec int64 + if err = binary.Read(r, binary.BigEndian, &sec); err != nil { + return + } + return time.Unix(sec, 0), nil +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func readField(r io.Reader) (v interface{}, err error) { + var typ byte + if err = binary.Read(r, binary.BigEndian, &typ); err != nil { + return + } + + switch typ { + case 't': + var value uint8 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return (value != 0), nil + + case 'b': + var value [1]byte + if _, err = io.ReadFull(r, value[0:1]); err != nil { + return + } + return value[0], nil + + case 's': + var value int16 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'I': + var value int32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'l': + var value int64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'f': + var value float32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'd': + var value float64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'D': + return readDecimal(r) + + case 'S': + return readLongstr(r) + + case 'A': + return readArray(r) + + case 'T': + return readTimestamp(r) + + case 'F': + return readTable(r) + + case 'x': + var len int32 + if err = binary.Read(r, binary.BigEndian, &len); err != nil { + return nil, err + } + + value := make([]byte, len) + if _, err = io.ReadFull(r, value); err != nil { + return nil, err + } + return value, err + + case 'V': + return nil, nil + } + + return nil, ErrSyntax +} + +/* + Field tables are long strings that contain packed name-value pairs. The + name-value pairs are encoded as short string defining the name, and octet + defining the values type and then the value itself. The valid field types for + tables are an extension of the native integer, bit, string, and timestamp + types, and are shown in the grammar. Multi-octet integer fields are always + held in network byte order. +*/ +func readTable(r io.Reader) (table Table, err error) { + var nested bytes.Buffer + var str string + + if str, err = readLongstr(r); err != nil { + return + } + + nested.Write([]byte(str)) + + table = make(Table) + + for nested.Len() > 0 { + var key string + var value interface{} + + if key, err = readShortstr(&nested); err != nil { + return + } + + if value, err = readField(&nested); err != nil { + return + } + + table[key] = value + } + + return +} + +func readArray(r io.Reader) ([]interface{}, error) { + var ( + size uint32 + err error + ) + + if err = binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + var ( + lim = &io.LimitedReader{R: r, N: int64(size)} + arr = []interface{}{} + field interface{} + ) + + for { + if field, err = readField(lim); err != nil { + if err == io.EOF { + break + } + return nil, err + } + arr = append(arr, field) + } + + return arr, nil +} + +// Checks if this bit mask matches the flags bitset +func hasProperty(mask uint16, prop int) bool { + return int(mask)&prop > 0 +} + +func (r *AmqpReader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &HeaderFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.R, binary.BigEndian, &hf.ClassId); err != nil { + return + } + + if err = binary.Read(r.R, binary.BigEndian, &hf.weight); err != nil { + return + } + + if err = binary.Read(r.R, binary.BigEndian, &hf.Size); err != nil { + return + } + + var flags uint16 + + if err = binary.Read(r.R, binary.BigEndian, &flags); err != nil { + return + } + + if hasProperty(flags, flagContentType) { + if hf.Properties.ContentType, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagContentEncoding) { + if hf.Properties.ContentEncoding, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagHeaders) { + if hf.Properties.Headers, err = readTable(r.R); err != nil { + return + } + } + if hasProperty(flags, flagDeliveryMode) { + if err = binary.Read(r.R, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(flags, flagPriority) { + if err = binary.Read(r.R, binary.BigEndian, &hf.Properties.Priority); err != nil { + return + } + } + if hasProperty(flags, flagCorrelationId) { + if hf.Properties.CorrelationId, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagReplyTo) { + if hf.Properties.ReplyTo, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagExpiration) { + if hf.Properties.Expiration, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagMessageId) { + if hf.Properties.MessageId, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagTimestamp) { + if hf.Properties.Timestamp, err = readTimestamp(r.R); err != nil { + return + } + } + if hasProperty(flags, flagType) { + if hf.Properties.Type, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagUserId) { + if hf.Properties.UserId, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagAppId) { + if hf.Properties.AppId, err = readShortstr(r.R); err != nil { + return + } + } + if hasProperty(flags, flagReserved1) { + if hf.Properties.reserved1, err = readShortstr(r.R); err != nil { + return + } + } + + return hf, nil +} + +func (r *AmqpReader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) { + bf := &BodyFrame{ + ChannelId: channel, + Body: make([]byte, size), + } + + if _, err = io.ReadFull(r.R, bf.Body); err != nil { + return nil, err + } + + return bf, nil +} + +var errHeartbeatPayload = errors.New("Heartbeats should not have a payload") + +func (r *AmqpReader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &HeartbeatFrame{ + ChannelId: channel, + } + + if size > 0 { + return nil, errHeartbeatPayload + } + + return hf, nil +} diff --git a/tap/extensions/amqp/spec091.go b/tap/extensions/amqp/spec091.go new file mode 100644 index 000000000..ea8448da4 --- /dev/null +++ b/tap/extensions/amqp/spec091.go @@ -0,0 +1,3307 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* GENERATED FILE - DO NOT EDIT */ +/* Rebuild from the spec/gen.go tool */ + +package main + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Error codes that can be sent from the server during a connection or +// channel exception or used by the client to indicate a class of error like +// ErrCredentials. The text of the error is likely more interesting than +// these constants. +const ( + frameMethod = 1 + frameHeader = 2 + frameBody = 3 + frameHeartbeat = 8 + frameMinSize = 4096 + frameEnd = 206 + replySuccess = 200 + ContentTooLarge = 311 + NoRoute = 312 + NoConsumers = 313 + ConnectionForced = 320 + InvalidPath = 402 + AccessRefused = 403 + NotFound = 404 + ResourceLocked = 405 + PreconditionFailed = 406 + FrameError = 501 + SyntaxError = 502 + CommandInvalid = 503 + ChannelError = 504 + UnexpectedFrame = 505 + ResourceError = 506 + NotAllowed = 530 + NotImplemented = 540 + InternalError = 541 + MaxSizeError = 551 +) + +func isSoftExceptionCode(code int) bool { + switch code { + case 311: + return true + case 312: + return true + case 313: + return true + case 403: + return true + case 404: + return true + case 405: + return true + case 406: + return true + + } + return false +} + +type ConnectionStart struct { + VersionMajor byte + VersionMinor byte + ServerProperties Table + Mechanisms string + Locales string +} + +func (msg *ConnectionStart) id() (uint16, uint16) { + return 10, 10 +} + +func (msg *ConnectionStart) wait() bool { + return true +} + +func (msg *ConnectionStart) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.VersionMajor); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.VersionMinor); err != nil { + return + } + + if err = writeTable(w, msg.ServerProperties); err != nil { + return + } + + if err = writeLongstr(w, msg.Mechanisms); err != nil { + return + } + if err = writeLongstr(w, msg.Locales); err != nil { + return + } + + return +} + +func (msg *ConnectionStart) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.VersionMajor); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.VersionMinor); err != nil { + return + } + + if msg.ServerProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanisms, err = readLongstr(r); err != nil { + return + } + if msg.Locales, err = readLongstr(r); err != nil { + return + } + + return +} + +type ConnectionStartOk struct { + ClientProperties Table + Mechanism string + Response string + Locale string +} + +func (msg *ConnectionStartOk) id() (uint16, uint16) { + return 10, 11 +} + +func (msg *ConnectionStartOk) wait() bool { + return true +} + +func (msg *ConnectionStartOk) write(w io.Writer) (err error) { + + if err = writeTable(w, msg.ClientProperties); err != nil { + return + } + + if err = writeShortstr(w, msg.Mechanism); err != nil { + return + } + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + if err = writeShortstr(w, msg.Locale); err != nil { + return + } + + return +} + +func (msg *ConnectionStartOk) read(r io.Reader) (err error) { + + if msg.ClientProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanism, err = readShortstr(r); err != nil { + return + } + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + if msg.Locale, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionSecure struct { + Challenge string +} + +func (msg *connectionSecure) id() (uint16, uint16) { + return 10, 20 +} + +func (msg *connectionSecure) wait() bool { + return true +} + +func (msg *connectionSecure) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Challenge); err != nil { + return + } + + return +} + +func (msg *connectionSecure) read(r io.Reader) (err error) { + + if msg.Challenge, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionSecureOk struct { + Response string +} + +func (msg *connectionSecureOk) id() (uint16, uint16) { + return 10, 21 +} + +func (msg *connectionSecureOk) wait() bool { + return true +} + +func (msg *connectionSecureOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + return +} + +func (msg *connectionSecureOk) read(r io.Reader) (err error) { + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionTune struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTune) id() (uint16, uint16) { + return 10, 30 +} + +func (msg *connectionTune) wait() bool { + return true +} + +func (msg *connectionTune) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTune) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionTuneOk struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTuneOk) id() (uint16, uint16) { + return 10, 31 +} + +func (msg *connectionTuneOk) wait() bool { + return true +} + +func (msg *connectionTuneOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTuneOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionOpen struct { + VirtualHost string + reserved1 string + reserved2 bool +} + +func (msg *connectionOpen) id() (uint16, uint16) { + return 10, 40 +} + +func (msg *connectionOpen) wait() bool { + return true +} + +func (msg *connectionOpen) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.VirtualHost); err != nil { + return + } + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + if msg.reserved2 { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *connectionOpen) read(r io.Reader) (err error) { + var bits byte + + if msg.VirtualHost, err = readShortstr(r); err != nil { + return + } + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.reserved2 = (bits&(1<<0) > 0) + + return +} + +type connectionOpenOk struct { + reserved1 string +} + +func (msg *connectionOpenOk) id() (uint16, uint16) { + return 10, 41 +} + +func (msg *connectionOpenOk) wait() bool { + return true +} + +func (msg *connectionOpenOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *connectionOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type ConnectionClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *ConnectionClose) id() (uint16, uint16) { + return 10, 50 +} + +func (msg *ConnectionClose) wait() bool { + return true +} + +func (msg *ConnectionClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *ConnectionClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type ConnectionCloseOk struct { +} + +func (msg *ConnectionCloseOk) id() (uint16, uint16) { + return 10, 51 +} + +func (msg *ConnectionCloseOk) wait() bool { + return true +} + +func (msg *ConnectionCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *ConnectionCloseOk) read(r io.Reader) (err error) { + + return +} + +type connectionBlocked struct { + Reason string +} + +func (msg *connectionBlocked) id() (uint16, uint16) { + return 10, 60 +} + +func (msg *connectionBlocked) wait() bool { + return false +} + +func (msg *connectionBlocked) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Reason); err != nil { + return + } + + return +} + +func (msg *connectionBlocked) read(r io.Reader) (err error) { + + if msg.Reason, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionUnblocked struct { +} + +func (msg *connectionUnblocked) id() (uint16, uint16) { + return 10, 61 +} + +func (msg *connectionUnblocked) wait() bool { + return false +} + +func (msg *connectionUnblocked) write(w io.Writer) (err error) { + + return +} + +func (msg *connectionUnblocked) read(r io.Reader) (err error) { + + return +} + +type channelOpen struct { + reserved1 string +} + +func (msg *channelOpen) id() (uint16, uint16) { + return 20, 10 +} + +func (msg *channelOpen) wait() bool { + return true +} + +func (msg *channelOpen) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpen) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type channelOpenOk struct { + reserved1 string +} + +func (msg *channelOpenOk) id() (uint16, uint16) { + return 20, 11 +} + +func (msg *channelOpenOk) wait() bool { + return true +} + +func (msg *channelOpenOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readLongstr(r); err != nil { + return + } + + return +} + +type channelFlow struct { + Active bool +} + +func (msg *channelFlow) id() (uint16, uint16) { + return 20, 20 +} + +func (msg *channelFlow) wait() bool { + return true +} + +func (msg *channelFlow) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlow) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelFlowOk struct { + Active bool +} + +func (msg *channelFlowOk) id() (uint16, uint16) { + return 20, 21 +} + +func (msg *channelFlowOk) wait() bool { + return false +} + +func (msg *channelFlowOk) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlowOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *channelClose) id() (uint16, uint16) { + return 20, 40 +} + +func (msg *channelClose) wait() bool { + return true +} + +func (msg *channelClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *channelClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type channelCloseOk struct { +} + +func (msg *channelCloseOk) id() (uint16, uint16) { + return 20, 41 +} + +func (msg *channelCloseOk) wait() bool { + return true +} + +func (msg *channelCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *channelCloseOk) read(r io.Reader) (err error) { + + return +} + +type ExchangeDeclare struct { + reserved1 uint16 + Exchange string + Type string + Passive bool + Durable bool + AutoDelete bool + Internal bool + NoWait bool + Arguments Table +} + +func (msg *ExchangeDeclare) id() (uint16, uint16) { + return 40, 10 +} + +func (msg *ExchangeDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *ExchangeDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.Type); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.AutoDelete { + bits |= 1 << 2 + } + + if msg.Internal { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *ExchangeDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.Type, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.AutoDelete = (bits&(1<<2) > 0) + msg.Internal = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type ExchangeDeclareOk struct { +} + +func (msg *ExchangeDeclareOk) id() (uint16, uint16) { + return 40, 11 +} + +func (msg *ExchangeDeclareOk) wait() bool { + return true +} + +func (msg *ExchangeDeclareOk) write(w io.Writer) (err error) { + + return +} + +func (msg *ExchangeDeclareOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDelete struct { + reserved1 uint16 + Exchange string + IfUnused bool + NoWait bool +} + +func (msg *exchangeDelete) id() (uint16, uint16) { + return 40, 20 +} + +func (msg *exchangeDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.NoWait { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *exchangeDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.NoWait = (bits&(1<<1) > 0) + + return +} + +type exchangeDeleteOk struct { +} + +func (msg *exchangeDeleteOk) id() (uint16, uint16) { + return 40, 21 +} + +func (msg *exchangeDeleteOk) wait() bool { + return true +} + +func (msg *exchangeDeleteOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeDeleteOk) read(r io.Reader) (err error) { + + return +} + +type exchangeBind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeBind) id() (uint16, uint16) { + return 40, 30 +} + +func (msg *exchangeBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeBindOk struct { +} + +func (msg *exchangeBindOk) id() (uint16, uint16) { + return 40, 31 +} + +func (msg *exchangeBindOk) wait() bool { + return true +} + +func (msg *exchangeBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeBindOk) read(r io.Reader) (err error) { + + return +} + +type exchangeUnbind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeUnbind) id() (uint16, uint16) { + return 40, 40 +} + +func (msg *exchangeUnbind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeUnbind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeUnbind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeUnbindOk struct { +} + +func (msg *exchangeUnbindOk) id() (uint16, uint16) { + return 40, 51 +} + +func (msg *exchangeUnbindOk) wait() bool { + return true +} + +func (msg *exchangeUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeUnbindOk) read(r io.Reader) (err error) { + + return +} + +type QueueDeclare struct { + reserved1 uint16 + Queue string + Passive bool + Durable bool + Exclusive bool + AutoDelete bool + NoWait bool + Arguments Table +} + +func (msg *QueueDeclare) id() (uint16, uint16) { + return 50, 10 +} + +func (msg *QueueDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *QueueDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.AutoDelete { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *QueueDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.AutoDelete = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type QueueDeclareOk struct { + Queue string + MessageCount uint32 + ConsumerCount uint32 +} + +func (msg *QueueDeclareOk) id() (uint16, uint16) { + return 50, 11 +} + +func (msg *QueueDeclareOk) wait() bool { + return true +} + +func (msg *QueueDeclareOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.ConsumerCount); err != nil { + return + } + + return +} + +func (msg *QueueDeclareOk) read(r io.Reader) (err error) { + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.ConsumerCount); err != nil { + return + } + + return +} + +type QueueBind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *QueueBind) id() (uint16, uint16) { + return 50, 20 +} + +func (msg *QueueBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *QueueBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *QueueBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type QueueBindOk struct { +} + +func (msg *QueueBindOk) id() (uint16, uint16) { + return 50, 21 +} + +func (msg *QueueBindOk) wait() bool { + return true +} + +func (msg *QueueBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *QueueBindOk) read(r io.Reader) (err error) { + + return +} + +type queueUnbind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + Arguments Table +} + +func (msg *queueUnbind) id() (uint16, uint16) { + return 50, 50 +} + +func (msg *queueUnbind) wait() bool { + return true +} + +func (msg *queueUnbind) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueUnbind) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueUnbindOk struct { +} + +func (msg *queueUnbindOk) id() (uint16, uint16) { + return 50, 51 +} + +func (msg *queueUnbindOk) wait() bool { + return true +} + +func (msg *queueUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *queueUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queuePurge struct { + reserved1 uint16 + Queue string + NoWait bool +} + +func (msg *queuePurge) id() (uint16, uint16) { + return 50, 30 +} + +func (msg *queuePurge) wait() bool { + return true && !msg.NoWait +} + +func (msg *queuePurge) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queuePurge) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type queuePurgeOk struct { + MessageCount uint32 +} + +func (msg *queuePurgeOk) id() (uint16, uint16) { + return 50, 31 +} + +func (msg *queuePurgeOk) wait() bool { + return true +} + +func (msg *queuePurgeOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queuePurgeOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type queueDelete struct { + reserved1 uint16 + Queue string + IfUnused bool + IfEmpty bool + NoWait bool +} + +func (msg *queueDelete) id() (uint16, uint16) { + return 50, 40 +} + +func (msg *queueDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.IfEmpty { + bits |= 1 << 1 + } + + if msg.NoWait { + bits |= 1 << 2 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queueDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.IfEmpty = (bits&(1<<1) > 0) + msg.NoWait = (bits&(1<<2) > 0) + + return +} + +type queueDeleteOk struct { + MessageCount uint32 +} + +func (msg *queueDeleteOk) id() (uint16, uint16) { + return 50, 41 +} + +func (msg *queueDeleteOk) wait() bool { + return true +} + +func (msg *queueDeleteOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queueDeleteOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicQos struct { + PrefetchSize uint32 + PrefetchCount uint16 + Global bool +} + +func (msg *basicQos) id() (uint16, uint16) { + return 60, 10 +} + +func (msg *basicQos) wait() bool { + return true +} + +func (msg *basicQos) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchSize); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchCount); err != nil { + return + } + + if msg.Global { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicQos) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchSize); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchCount); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Global = (bits&(1<<0) > 0) + + return +} + +type basicQosOk struct { +} + +func (msg *basicQosOk) id() (uint16, uint16) { + return 60, 11 +} + +func (msg *basicQosOk) wait() bool { + return true +} + +func (msg *basicQosOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicQosOk) read(r io.Reader) (err error) { + + return +} + +type BasicConsume struct { + reserved1 uint16 + Queue string + ConsumerTag string + NoLocal bool + NoAck bool + Exclusive bool + NoWait bool + Arguments Table +} + +func (msg *BasicConsume) id() (uint16, uint16) { + return 60, 20 +} + +func (msg *BasicConsume) wait() bool { + return true && !msg.NoWait +} + +func (msg *BasicConsume) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoLocal { + bits |= 1 << 0 + } + + if msg.NoAck { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.NoWait { + bits |= 1 << 3 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *BasicConsume) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoLocal = (bits&(1<<0) > 0) + msg.NoAck = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.NoWait = (bits&(1<<3) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type BasicConsumeOk struct { + ConsumerTag string +} + +func (msg *BasicConsumeOk) id() (uint16, uint16) { + return 60, 21 +} + +func (msg *BasicConsumeOk) wait() bool { + return true +} + +func (msg *BasicConsumeOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *BasicConsumeOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicCancel struct { + ConsumerTag string + NoWait bool +} + +func (msg *basicCancel) id() (uint16, uint16) { + return 60, 30 +} + +func (msg *basicCancel) wait() bool { + return true && !msg.NoWait +} + +func (msg *basicCancel) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicCancel) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type basicCancelOk struct { + ConsumerTag string +} + +func (msg *basicCancelOk) id() (uint16, uint16) { + return 60, 31 +} + +func (msg *basicCancelOk) wait() bool { + return true +} + +func (msg *basicCancelOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *basicCancelOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type BasicPublish struct { + reserved1 uint16 + Exchange string + RoutingKey string + Mandatory bool + Immediate bool + Properties Properties + Body []byte +} + +func (msg *BasicPublish) id() (uint16, uint16) { + return 60, 40 +} + +func (msg *BasicPublish) wait() bool { + return false +} + +func (msg *BasicPublish) getContent() (Properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *BasicPublish) setContent(props Properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *BasicPublish) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.Mandatory { + bits |= 1 << 0 + } + + if msg.Immediate { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *BasicPublish) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Mandatory = (bits&(1<<0) > 0) + msg.Immediate = (bits&(1<<1) > 0) + + return +} + +type basicReturn struct { + ReplyCode uint16 + ReplyText string + Exchange string + RoutingKey string + Properties Properties + Body []byte +} + +func (msg *basicReturn) id() (uint16, uint16) { + return 60, 50 +} + +func (msg *basicReturn) wait() bool { + return false +} + +func (msg *basicReturn) getContent() (Properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicReturn) setContent(props Properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicReturn) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *basicReturn) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type BasicDeliver struct { + ConsumerTag string + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + Properties Properties + Body []byte +} + +func (msg *BasicDeliver) id() (uint16, uint16) { + return 60, 60 +} + +func (msg *BasicDeliver) wait() bool { + return false +} + +func (msg *BasicDeliver) getContent() (Properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *BasicDeliver) setContent(props Properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *BasicDeliver) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *BasicDeliver) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicGet struct { + reserved1 uint16 + Queue string + NoAck bool +} + +func (msg *basicGet) id() (uint16, uint16) { + return 60, 70 +} + +func (msg *basicGet) wait() bool { + return true +} + +func (msg *basicGet) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoAck { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicGet) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoAck = (bits&(1<<0) > 0) + + return +} + +type basicGetOk struct { + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + MessageCount uint32 + Properties Properties + Body []byte +} + +func (msg *basicGetOk) id() (uint16, uint16) { + return 60, 71 +} + +func (msg *basicGetOk) wait() bool { + return true +} + +func (msg *basicGetOk) getContent() (Properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicGetOk) setContent(props Properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicGetOk) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *basicGetOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicGetEmpty struct { + reserved1 string +} + +func (msg *basicGetEmpty) id() (uint16, uint16) { + return 60, 72 +} + +func (msg *basicGetEmpty) wait() bool { + return true +} + +func (msg *basicGetEmpty) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *basicGetEmpty) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicAck struct { + DeliveryTag uint64 + Multiple bool +} + +func (msg *basicAck) id() (uint16, uint16) { + return 60, 80 +} + +func (msg *basicAck) wait() bool { + return false +} + +func (msg *basicAck) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicAck) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + + return +} + +type basicReject struct { + DeliveryTag uint64 + Requeue bool +} + +func (msg *basicReject) id() (uint16, uint16) { + return 60, 90 +} + +func (msg *basicReject) wait() bool { + return false +} + +func (msg *basicReject) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicReject) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverAsync struct { + Requeue bool +} + +func (msg *basicRecoverAsync) id() (uint16, uint16) { + return 60, 100 +} + +func (msg *basicRecoverAsync) wait() bool { + return false +} + +func (msg *basicRecoverAsync) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecoverAsync) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecover struct { + Requeue bool +} + +func (msg *basicRecover) id() (uint16, uint16) { + return 60, 110 +} + +func (msg *basicRecover) wait() bool { + return true +} + +func (msg *basicRecover) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecover) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverOk struct { +} + +func (msg *basicRecoverOk) id() (uint16, uint16) { + return 60, 111 +} + +func (msg *basicRecoverOk) wait() bool { + return true +} + +func (msg *basicRecoverOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicRecoverOk) read(r io.Reader) (err error) { + + return +} + +type basicNack struct { + DeliveryTag uint64 + Multiple bool + Requeue bool +} + +func (msg *basicNack) id() (uint16, uint16) { + return 60, 120 +} + +func (msg *basicNack) wait() bool { + return false +} + +func (msg *basicNack) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if msg.Requeue { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicNack) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + msg.Requeue = (bits&(1<<1) > 0) + + return +} + +type txSelect struct { +} + +func (msg *txSelect) id() (uint16, uint16) { + return 90, 10 +} + +func (msg *txSelect) wait() bool { + return true +} + +func (msg *txSelect) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelect) read(r io.Reader) (err error) { + + return +} + +type txSelectOk struct { +} + +func (msg *txSelectOk) id() (uint16, uint16) { + return 90, 11 +} + +func (msg *txSelectOk) wait() bool { + return true +} + +func (msg *txSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelectOk) read(r io.Reader) (err error) { + + return +} + +type txCommit struct { +} + +func (msg *txCommit) id() (uint16, uint16) { + return 90, 20 +} + +func (msg *txCommit) wait() bool { + return true +} + +func (msg *txCommit) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommit) read(r io.Reader) (err error) { + + return +} + +type txCommitOk struct { +} + +func (msg *txCommitOk) id() (uint16, uint16) { + return 90, 21 +} + +func (msg *txCommitOk) wait() bool { + return true +} + +func (msg *txCommitOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommitOk) read(r io.Reader) (err error) { + + return +} + +type txRollback struct { +} + +func (msg *txRollback) id() (uint16, uint16) { + return 90, 30 +} + +func (msg *txRollback) wait() bool { + return true +} + +func (msg *txRollback) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollback) read(r io.Reader) (err error) { + + return +} + +type txRollbackOk struct { +} + +func (msg *txRollbackOk) id() (uint16, uint16) { + return 90, 31 +} + +func (msg *txRollbackOk) wait() bool { + return true +} + +func (msg *txRollbackOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollbackOk) read(r io.Reader) (err error) { + + return +} + +type confirmSelect struct { + Nowait bool +} + +func (msg *confirmSelect) id() (uint16, uint16) { + return 85, 10 +} + +func (msg *confirmSelect) wait() bool { + return true +} + +func (msg *confirmSelect) write(w io.Writer) (err error) { + var bits byte + + if msg.Nowait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *confirmSelect) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Nowait = (bits&(1<<0) > 0) + + return +} + +type confirmSelectOk struct { +} + +func (msg *confirmSelectOk) id() (uint16, uint16) { + return 85, 11 +} + +func (msg *confirmSelectOk) wait() bool { + return true +} + +func (msg *confirmSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *confirmSelectOk) read(r io.Reader) (err error) { + + return +} + +func (r *AmqpReader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &MethodFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.R, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(r.R, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + + case 10: // connection + switch mf.MethodId { + + case 10: // connection start + //fmt.Println("NextMethod: class:10 method:10") + method := &ConnectionStart{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 11: // connection start-ok + //fmt.Println("NextMethod: class:10 method:11") + method := &ConnectionStartOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 20: // connection secure + //fmt.Println("NextMethod: class:10 method:20") + method := &connectionSecure{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 21: // connection secure-ok + //fmt.Println("NextMethod: class:10 method:21") + method := &connectionSecureOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 30: // connection tune + //fmt.Println("NextMethod: class:10 method:30") + method := &connectionTune{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 31: // connection tune-ok + //fmt.Println("NextMethod: class:10 method:31") + method := &connectionTuneOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 40: // connection open + //fmt.Println("NextMethod: class:10 method:40") + method := &connectionOpen{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 41: // connection open-ok + //fmt.Println("NextMethod: class:10 method:41") + method := &connectionOpenOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 50: // connection close + //fmt.Println("NextMethod: class:10 method:50") + method := &ConnectionClose{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 51: // connection close-ok + //fmt.Println("NextMethod: class:10 method:51") + method := &ConnectionCloseOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 60: // connection blocked + //fmt.Println("NextMethod: class:10 method:60") + method := &connectionBlocked{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 61: // connection unblocked + //fmt.Println("NextMethod: class:10 method:61") + method := &connectionUnblocked{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 20: // channel + switch mf.MethodId { + + case 10: // channel open + //fmt.Println("NextMethod: class:20 method:10") + method := &channelOpen{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 11: // channel open-ok + //fmt.Println("NextMethod: class:20 method:11") + method := &channelOpenOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 20: // channel flow + //fmt.Println("NextMethod: class:20 method:20") + method := &channelFlow{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 21: // channel flow-ok + //fmt.Println("NextMethod: class:20 method:21") + method := &channelFlowOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 40: // channel close + //fmt.Println("NextMethod: class:20 method:40") + method := &channelClose{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 41: // channel close-ok + //fmt.Println("NextMethod: class:20 method:41") + method := &channelCloseOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 40: // exchange + switch mf.MethodId { + + case 10: // exchange declare + //fmt.Println("NextMethod: class:40 method:10") + method := &ExchangeDeclare{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 11: // exchange declare-ok + //fmt.Println("NextMethod: class:40 method:11") + method := &ExchangeDeclareOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 20: // exchange delete + //fmt.Println("NextMethod: class:40 method:20") + method := &exchangeDelete{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 21: // exchange delete-ok + //fmt.Println("NextMethod: class:40 method:21") + method := &exchangeDeleteOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 30: // exchange bind + //fmt.Println("NextMethod: class:40 method:30") + method := &exchangeBind{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 31: // exchange bind-ok + //fmt.Println("NextMethod: class:40 method:31") + method := &exchangeBindOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 40: // exchange unbind + //fmt.Println("NextMethod: class:40 method:40") + method := &exchangeUnbind{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 51: // exchange unbind-ok + //fmt.Println("NextMethod: class:40 method:51") + method := &exchangeUnbindOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 50: // queue + switch mf.MethodId { + + case 10: // queue declare + //fmt.Println("NextMethod: class:50 method:10") + method := &QueueDeclare{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 11: // queue declare-ok + //fmt.Println("NextMethod: class:50 method:11") + method := &QueueDeclareOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 20: // queue bind + //fmt.Println("NextMethod: class:50 method:20") + method := &QueueBind{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 21: // queue bind-ok + //fmt.Println("NextMethod: class:50 method:21") + method := &QueueBindOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 50: // queue unbind + //fmt.Println("NextMethod: class:50 method:50") + method := &queueUnbind{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 51: // queue unbind-ok + //fmt.Println("NextMethod: class:50 method:51") + method := &queueUnbindOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 30: // queue purge + //fmt.Println("NextMethod: class:50 method:30") + method := &queuePurge{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 31: // queue purge-ok + //fmt.Println("NextMethod: class:50 method:31") + method := &queuePurgeOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 40: // queue delete + //fmt.Println("NextMethod: class:50 method:40") + method := &queueDelete{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 41: // queue delete-ok + //fmt.Println("NextMethod: class:50 method:41") + method := &queueDeleteOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 60: // basic + switch mf.MethodId { + + case 10: // basic qos + //fmt.Println("NextMethod: class:60 method:10") + method := &basicQos{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 11: // basic qos-ok + //fmt.Println("NextMethod: class:60 method:11") + method := &basicQosOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 20: // basic consume + //fmt.Println("NextMethod: class:60 method:20") + method := &BasicConsume{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 21: // basic consume-ok + //fmt.Println("NextMethod: class:60 method:21") + method := &BasicConsumeOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 30: // basic cancel + //fmt.Println("NextMethod: class:60 method:30") + method := &basicCancel{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 31: // basic cancel-ok + //fmt.Println("NextMethod: class:60 method:31") + method := &basicCancelOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 40: // basic publish + //fmt.Println("NextMethod: class:60 method:40") + method := &BasicPublish{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 50: // basic return + //fmt.Println("NextMethod: class:60 method:50") + method := &basicReturn{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 60: // basic deliver + //fmt.Println("NextMethod: class:60 method:60") + method := &BasicDeliver{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 70: // basic get + //fmt.Println("NextMethod: class:60 method:70") + method := &basicGet{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 71: // basic get-ok + //fmt.Println("NextMethod: class:60 method:71") + method := &basicGetOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 72: // basic get-empty + //fmt.Println("NextMethod: class:60 method:72") + method := &basicGetEmpty{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 80: // basic ack + //fmt.Println("NextMethod: class:60 method:80") + method := &basicAck{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 90: // basic reject + //fmt.Println("NextMethod: class:60 method:90") + method := &basicReject{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 100: // basic recover-async + //fmt.Println("NextMethod: class:60 method:100") + method := &basicRecoverAsync{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 110: // basic recover + //fmt.Println("NextMethod: class:60 method:110") + method := &basicRecover{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 111: // basic recover-ok + //fmt.Println("NextMethod: class:60 method:111") + method := &basicRecoverOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 120: // basic nack + //fmt.Println("NextMethod: class:60 method:120") + method := &basicNack{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 90: // tx + switch mf.MethodId { + + case 10: // tx select + //fmt.Println("NextMethod: class:90 method:10") + method := &txSelect{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 11: // tx select-ok + //fmt.Println("NextMethod: class:90 method:11") + method := &txSelectOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 20: // tx commit + //fmt.Println("NextMethod: class:90 method:20") + method := &txCommit{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 21: // tx commit-ok + //fmt.Println("NextMethod: class:90 method:21") + method := &txCommitOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 30: // tx rollback + //fmt.Println("NextMethod: class:90 method:30") + method := &txRollback{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 31: // tx rollback-ok + //fmt.Println("NextMethod: class:90 method:31") + method := &txRollbackOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 85: // confirm + switch mf.MethodId { + + case 10: // confirm select + //fmt.Println("NextMethod: class:85 method:10") + method := &confirmSelect{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + case 11: // confirm select-ok + //fmt.Println("NextMethod: class:85 method:11") + method := &confirmSelectOk{} + if err = method.read(r.R); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil +} diff --git a/tap/extensions/amqp/structs.go b/tap/extensions/amqp/structs.go new file mode 100644 index 000000000..4f7bb93df --- /dev/null +++ b/tap/extensions/amqp/structs.go @@ -0,0 +1,17 @@ +package main + +import ( + "encoding/json" +) + +type AMQPPayload struct { + Data interface{} +} + +type AMQPPayloader interface { + MarshalJSON() ([]byte, error) +} + +func (h AMQPPayload) MarshalJSON() ([]byte, error) { + return json.Marshal(h.Data) +} diff --git a/tap/extensions/amqp/types.go b/tap/extensions/amqp/types.go new file mode 100644 index 000000000..78a1cf3b8 --- /dev/null +++ b/tap/extensions/amqp/types.go @@ -0,0 +1,431 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package main + +import ( + "fmt" + "io" + "time" +) + +// Constants for standard AMQP 0-9-1 exchange types. +const ( + ExchangeDirect = "direct" + ExchangeFanout = "fanout" + ExchangeTopic = "topic" + ExchangeHeaders = "headers" +) + +var ( + // ErrClosed is returned when the channel or connection is not open + ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"} + + // ErrChannelMax is returned when Connection.Channel has been called enough + // times that all channel IDs have been exhausted in the client or the + // server. + ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"} + + // ErrSASL is returned from Dial when the authentication mechanism could not + // be negoated. + ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"} + + // ErrCredentials is returned when the authenticated client is not authorized + // to any vhost. + ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"} + + // ErrVhost is returned when the authenticated user is not permitted to + // access the requested Vhost. + ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"} + + // ErrSyntax is hard protocol error, indicating an unsupported protocol, + // implementation or encoding. + ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"} + + // ErrFrame is returned when the protocol frame cannot be read from the + // server, indicating an unsupported protocol or unsupported frame type. + ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"} + + // ErrCommandInvalid is returned when the server sends an unexpected response + // to this requested message type. This indicates a bug in this client. + ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"} + + // ErrUnexpectedFrame is returned when something other than a method or + // heartbeat frame is delivered to the Connection, indicating a bug in the + // client. + ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"} + + // ErrFieldType is returned when writing a message containing a Go type unsupported by AMQP. + ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"} + + // ErrClosed is returned when the channel or connection is not open + ErrMaxSize = &Error{Code: MaxSizeError, Reason: "an AMQP message cannot be bigger than 1MB"} +) + +// Error captures the code and reason a channel or connection has been closed +// by the server. +type Error struct { + Code int // constant code from the specification + Reason string // description of the error + Server bool // true when initiated from the server, false when from this library + Recover bool // true when this error can be recovered by retrying later or with different parameters +} + +func newError(code uint16, text string) *Error { + return &Error{ + Code: int(code), + Reason: text, + Recover: isSoftExceptionCode(int(code)), + Server: true, + } +} + +func (e Error) Error() string { + return fmt.Sprintf("Exception (%d) Reason: %q", e.Code, e.Reason) +} + +// Used by header frames to capture routing and header information +type Properties struct { + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implementation use - Transient (1) or Persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + reserved1 string // was cluster-id - process for buffer consumption +} + +// DeliveryMode. Transient means higher throughput but messages will not be +// restored on broker restart. The delivery mode of publishings is unrelated +// to the durability of the queues they reside on. Transient messages will +// not be restored to durable queues, persistent messages will be restored to +// durable queues and lost on non-durable queues during server restart. +// +// This remains typed as uint8 to match Publishing.DeliveryMode. Other +// delivery modes specific to custom queue implementations are not enumerated +// here. +const ( + Transient uint8 = 1 + Persistent uint8 = 2 +) + +// The property flags are an array of bits that indicate the presence or +// absence of each property value in sequence. The bits are ordered from most +// high to low - bit 15 indicates the first property. +const ( + flagContentType = 0x8000 + flagContentEncoding = 0x4000 + flagHeaders = 0x2000 + flagDeliveryMode = 0x1000 + flagPriority = 0x0800 + flagCorrelationId = 0x0400 + flagReplyTo = 0x0200 + flagExpiration = 0x0100 + flagMessageId = 0x0080 + flagTimestamp = 0x0040 + flagType = 0x0020 + flagUserId = 0x0010 + flagAppId = 0x0008 + flagReserved1 = 0x0004 +) + +// Queue captures the current server state of the queue on the server returned +// from Channel.QueueDeclare or Channel.QueueInspect. +type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries +} + +// Publishing captures the client message sent to the server. The fields +// outside of the Headers table included in this struct mirror the underlying +// fields in the content frame. They use native types for convenience and +// efficiency. +type Publishing struct { + // Application or exchange specific fields, + // the headers exchange will inspect this field. + Headers Table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + Expiration string // message expiration spec + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id + + // The application specific payload of the message + Body []byte +} + +// Blocking notifies the server's TCP flow control of the Connection. When a +// server hits a memory or disk alarm it will block all connections until the +// resources are reclaimed. Use NotifyBlock on the Connection to receive these +// events. +type Blocking struct { + Active bool // TCP pushback active/inactive on server + Reason string // Server reason for activation +} + +// Confirmation notifies the acknowledgment or negative acknowledgement of a +// publishing identified by its delivery tag. Use NotifyPublish on the Channel +// to consume these events. +type Confirmation struct { + DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode + Ack bool // True when the server successfully received the publishing +} + +// Decimal matches the AMQP decimal type. Scale is the number of decimal +// digits Scale == 2, Value == 12345, Decimal == 123.45 +type Decimal struct { + Scale uint8 + Value int32 +} + +// Table stores user supplied fields of the following types: +// +// bool +// byte +// float32 +// float64 +// int +// int16 +// int32 +// int64 +// nil +// string +// time.Time +// amqp.Decimal +// amqp.Table +// []byte +// []interface{} - containing above types +// +// Functions taking a table will immediately fail when the table contains a +// value of an unsupported type. +// +// The caller must be specific in which precision of integer it wishes to +// encode. +// +// Use a type assertion when reading values from a table for type conversion. +// +// RabbitMQ expects int32 for integer values. +// +type Table map[string]interface{} + +func validateField(f interface{}) error { + switch fv := f.(type) { + case nil, bool, byte, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time: + return nil + + case []interface{}: + for _, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("in array %s", err) + } + } + return nil + + case Table: + for k, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("table field %q %s", k, err) + } + } + return nil + } + + return fmt.Errorf("value %T not supported", f) +} + +// Validate returns and error if any Go types in the table are incompatible with AMQP types. +func (t Table) Validate() error { + return validateField(t) +} + +// Heap interface for maintaining delivery tags +type tagSet []uint64 + +func (set tagSet) Len() int { return len(set) } +func (set tagSet) Less(i, j int) bool { return (set)[i] < (set)[j] } +func (set tagSet) Swap(i, j int) { (set)[i], (set)[j] = (set)[j], (set)[i] } +func (set *tagSet) Push(tag interface{}) { *set = append(*set, tag.(uint64)) } +func (set *tagSet) Pop() interface{} { + val := (*set)[len(*set)-1] + *set = (*set)[:len(*set)-1] + return val +} + +type Message interface { + id() (uint16, uint16) + wait() bool + read(io.Reader) error + write(io.Writer) error +} + +type messageWithContent interface { + Message + getContent() (Properties, []byte) + setContent(Properties, []byte) +} + +/* +The base interface implemented as: + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects +malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or “gathering reads” to avoid doing three separate +system calls to read a frame. + +*/ +type frame interface { + write(io.Writer) error + channel() uint16 +} + +type AmqpReader struct { + R io.Reader +} + +type writer struct { + w io.Writer +} + +// Implements the frame interface for Connection RPC +type protocolHeader struct{} + +func (protocolHeader) write(w io.Writer) error { + _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) + return err +} + +func (protocolHeader) channel() uint16 { + panic("only valid as initial handshake") +} + +/* +Method frames carry the high-level protocol commands (which we call "methods"). +One method frame carries one command. The method frame payload has this format: + + 0 2 4 + +----------+-----------+-------------- - - + | class-id | method-id | arguments... + +----------+-----------+-------------- - - + short short ... + +To process a method frame, we: + 1. Read the method frame payload. + 2. Unpack it into a structure. A given method always has the same structure, + so we can unpack the method rapidly. 3. Check that the method is allowed in + the current context. + 4. Check that the method arguments are valid. + 5. Execute the method. + +Method frame bodies are constructed as a list of AMQP data fields (bits, +integers, strings and string tables). The marshalling code is trivially +generated directly from the protocol specifications, and can be very rapid. +*/ +type MethodFrame struct { + ChannelId uint16 + ClassId uint16 + MethodId uint16 + Method Message +} + +func (f *MethodFrame) channel() uint16 { return f.ChannelId } + +/* +Heartbeating is a technique designed to undo one of TCP/IP's features, namely +its ability to recover from a broken physical connection by closing only after +a quite long time-out. In some scenarios we need to know very rapidly if a +peer is disconnected or not responding for other reasons (e.g. it is looping). +Since heartbeating can be done at a low level, we implement this as a special +type of frame that peers exchange at the transport level, rather than as a +class method. +*/ +type HeartbeatFrame struct { + ChannelId uint16 +} + +func (f *HeartbeatFrame) channel() uint16 { return f.ChannelId } + +/* +Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally +defined as carrying content. When a peer sends such a method frame, it always +follows it with a content header and zero or more content body frames. + +A content header frame has this format: + + 0 2 4 12 14 + +----------+--------+-----------+----------------+------------- - - + | class-id | weight | body size | property flags | property list... + +----------+--------+-----------+----------------+------------- - - + short short long long short remainder... + +We place content body in distinct frames (rather than including it in the +method) so that AMQP may support "zero copy" techniques in which content is +never marshalled or encoded. We place the content properties in their own +frame so that recipients can selectively discard contents they do not want to +process +*/ +type HeaderFrame struct { + ChannelId uint16 + ClassId uint16 + weight uint16 + Size uint64 + Properties Properties +} + +func (f *HeaderFrame) channel() uint16 { return f.ChannelId } + +/* +Content is the application data we carry from client-to-client via the AMQP +server. Content is, roughly speaking, a set of properties plus a binary data +part. The set of allowed properties are defined by the Basic class, and these +form the "content header frame". The data can be any size, and MAY be broken +into several (or many) chunks, each forming a "content body frame". + +Looking at the frames for a specific channel, as they pass on the wire, we +might see something like this: + + [method] + [method] [header] [body] [body] + [method] + ... +*/ +type BodyFrame struct { + ChannelId uint16 + Body []byte +} + +func (f *BodyFrame) channel() uint16 { return f.ChannelId } diff --git a/tap/extensions/amqp/write.go b/tap/extensions/amqp/write.go new file mode 100644 index 000000000..89432fdb2 --- /dev/null +++ b/tap/extensions/amqp/write.go @@ -0,0 +1,416 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package main + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "time" +) + +func (w *writer) WriteFrame(frame frame) (err error) { + if err = frame.write(w.w); err != nil { + return + } + + if buf, ok := w.w.(*bufio.Writer); ok { + err = buf.Flush() + } + + return +} + +func (f *MethodFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + + if f.Method == nil { + return errors.New("malformed frame: missing method") + } + + class, method := f.Method.id() + + if err = binary.Write(&payload, binary.BigEndian, class); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, method); err != nil { + return + } + + if err = f.Method.write(&payload); err != nil { + return + } + + return writeFrame(w, frameMethod, f.ChannelId, payload.Bytes()) +} + +// Heartbeat +// +// Payload is empty +func (f *HeartbeatFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameHeartbeat, f.ChannelId, []byte{}) +} + +// CONTENT HEADER +// 0 2 4 12 14 +// +----------+--------+-----------+----------------+------------- - - +// | class-id | weight | body size | property flags | property list... +// +----------+--------+-----------+----------------+------------- - - +// short short long long short remainder... +// +func (f *HeaderFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + var zeroTime time.Time + + if err = binary.Write(&payload, binary.BigEndian, f.ClassId); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.weight); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.Size); err != nil { + return + } + + // First pass will build the mask to be serialized, second pass will serialize + // each of the fields that appear in the mask. + + var mask uint16 + + if len(f.Properties.ContentType) > 0 { + mask = mask | flagContentType + } + if len(f.Properties.ContentEncoding) > 0 { + mask = mask | flagContentEncoding + } + if f.Properties.Headers != nil && len(f.Properties.Headers) > 0 { + mask = mask | flagHeaders + } + if f.Properties.DeliveryMode > 0 { + mask = mask | flagDeliveryMode + } + if f.Properties.Priority > 0 { + mask = mask | flagPriority + } + if len(f.Properties.CorrelationId) > 0 { + mask = mask | flagCorrelationId + } + if len(f.Properties.ReplyTo) > 0 { + mask = mask | flagReplyTo + } + if len(f.Properties.Expiration) > 0 { + mask = mask | flagExpiration + } + if len(f.Properties.MessageId) > 0 { + mask = mask | flagMessageId + } + if f.Properties.Timestamp != zeroTime { + mask = mask | flagTimestamp + } + if len(f.Properties.Type) > 0 { + mask = mask | flagType + } + if len(f.Properties.UserId) > 0 { + mask = mask | flagUserId + } + if len(f.Properties.AppId) > 0 { + mask = mask | flagAppId + } + + if err = binary.Write(&payload, binary.BigEndian, mask); err != nil { + return + } + + if hasProperty(mask, flagContentType) { + if err = writeShortstr(&payload, f.Properties.ContentType); err != nil { + return + } + } + if hasProperty(mask, flagContentEncoding) { + if err = writeShortstr(&payload, f.Properties.ContentEncoding); err != nil { + return + } + } + if hasProperty(mask, flagHeaders) { + if err = writeTable(&payload, f.Properties.Headers); err != nil { + return + } + } + if hasProperty(mask, flagDeliveryMode) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(mask, flagPriority) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.Priority); err != nil { + return + } + } + if hasProperty(mask, flagCorrelationId) { + if err = writeShortstr(&payload, f.Properties.CorrelationId); err != nil { + return + } + } + if hasProperty(mask, flagReplyTo) { + if err = writeShortstr(&payload, f.Properties.ReplyTo); err != nil { + return + } + } + if hasProperty(mask, flagExpiration) { + if err = writeShortstr(&payload, f.Properties.Expiration); err != nil { + return + } + } + if hasProperty(mask, flagMessageId) { + if err = writeShortstr(&payload, f.Properties.MessageId); err != nil { + return + } + } + if hasProperty(mask, flagTimestamp) { + if err = binary.Write(&payload, binary.BigEndian, uint64(f.Properties.Timestamp.Unix())); err != nil { + return + } + } + if hasProperty(mask, flagType) { + if err = writeShortstr(&payload, f.Properties.Type); err != nil { + return + } + } + if hasProperty(mask, flagUserId) { + if err = writeShortstr(&payload, f.Properties.UserId); err != nil { + return + } + } + if hasProperty(mask, flagAppId) { + if err = writeShortstr(&payload, f.Properties.AppId); err != nil { + return + } + } + + return writeFrame(w, frameHeader, f.ChannelId, payload.Bytes()) +} + +// Body +// +// Payload is one byterange from the full body who's size is declared in the +// Header frame +func (f *BodyFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameBody, f.ChannelId, f.Body) +} + +func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) { + end := []byte{frameEnd} + size := uint(len(payload)) + + _, err = w.Write([]byte{ + byte(typ), + byte((channel & 0xff00) >> 8), + byte((channel & 0x00ff) >> 0), + byte((size & 0xff000000) >> 24), + byte((size & 0x00ff0000) >> 16), + byte((size & 0x0000ff00) >> 8), + byte((size & 0x000000ff) >> 0), + }) + + if err != nil { + return + } + + if _, err = w.Write(payload); err != nil { + return + } + + if _, err = w.Write(end); err != nil { + return + } + + return +} + +func writeShortstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint8(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +func writeLongstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint32(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func writeField(w io.Writer, value interface{}) (err error) { + var buf [9]byte + var enc []byte + + switch v := value.(type) { + case bool: + buf[0] = 't' + if v { + buf[1] = byte(1) + } else { + buf[1] = byte(0) + } + enc = buf[:2] + + case byte: + buf[0] = 'b' + buf[1] = byte(v) + enc = buf[:2] + + case int16: + buf[0] = 's' + binary.BigEndian.PutUint16(buf[1:3], uint16(v)) + enc = buf[:3] + + case int: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int32: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int64: + buf[0] = 'l' + binary.BigEndian.PutUint64(buf[1:9], uint64(v)) + enc = buf[:9] + + case float32: + buf[0] = 'f' + binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v)) + enc = buf[:5] + + case float64: + buf[0] = 'd' + binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v)) + enc = buf[:9] + + case Decimal: + buf[0] = 'D' + buf[1] = byte(v.Scale) + binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value)) + enc = buf[:6] + + case string: + buf[0] = 'S' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + enc = append(buf[:5], []byte(v)...) + + case []interface{}: // field-array + buf[0] = 'A' + + sec := new(bytes.Buffer) + for _, val := range v { + if err = writeField(sec, val); err != nil { + return + } + } + + binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + if _, err = w.Write(sec.Bytes()); err != nil { + return + } + + return + + case time.Time: + buf[0] = 'T' + binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix())) + enc = buf[:9] + + case Table: + if _, err = w.Write([]byte{'F'}); err != nil { + return + } + return writeTable(w, v) + + case []byte: + buf[0] = 'x' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + if _, err = w.Write(buf[0:5]); err != nil { + return + } + if _, err = w.Write(v); err != nil { + return + } + return + + case nil: + buf[0] = 'V' + enc = buf[:1] + + default: + return ErrFieldType + } + + _, err = w.Write(enc) + + return +} + +func writeTable(w io.Writer, table Table) (err error) { + var buf bytes.Buffer + + for key, val := range table { + if err = writeShortstr(&buf, key); err != nil { + return + } + if err = writeField(&buf, val); err != nil { + return + } + } + + return writeLongstr(w, string(buf.Bytes())) +} diff --git a/tap/extensions/http/go.mod b/tap/extensions/http/go.mod new file mode 100644 index 000000000..61b21db71 --- /dev/null +++ b/tap/extensions/http/go.mod @@ -0,0 +1,13 @@ +module github.com/up9inc/mizu/tap/extensions/http + +go 1.16 + +require ( + github.com/google/martian v2.1.0+incompatible + github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7 + github.com/up9inc/mizu/tap/api v0.0.0 + golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 + golang.org/x/text v0.3.5 // indirect +) + +replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api diff --git a/tap/extensions/http/go.sum b/tap/extensions/http/go.sum new file mode 100644 index 000000000..1ff511257 --- /dev/null +++ b/tap/extensions/http/go.sum @@ -0,0 +1,12 @@ +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7 h1:jkvpcEatpwuMF5O5LVxTnehj6YZ/aEZN4NWD/Xml4pI= +github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7/go.mod h1:KTrHyWpO1sevuXPZwyeZc72ddWRFqNSKDFl7uVWKpg0= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/tap/grpc_assembler.go b/tap/extensions/http/grpc_assembler.go similarity index 87% rename from tap/grpc_assembler.go rename to tap/extensions/http/grpc_assembler.go index 72b5665f1..0f7a8c00f 100644 --- a/tap/grpc_assembler.go +++ b/tap/extensions/http/grpc_assembler.go @@ -1,4 +1,4 @@ -package tap +package main import ( "bufio" @@ -17,17 +17,19 @@ import ( ) const frameHeaderLen = 9 + var clientPreface = []byte(http2.ClientPreface) + const initialHeaderTableSize = 4096 const protoHTTP2 = "HTTP/2.0" const protoMajorHTTP2 = 2 const protoMinorHTTP2 = 0 -var maxHTTP2DataLen int = maxHTTP2DataLenDefault // value initialized during init +var maxHTTP2DataLen = 1 * 1024 * 1024 // 1MB type messageFragment struct { headers []hpack.HeaderField - data []byte + data []byte } type fragmentsByStream map[uint32]*messageFragment @@ -46,7 +48,7 @@ func (fbs *fragmentsByStream) appendFrame(streamID uint32, frame http2.Frame) { if existingFragment, ok := (*fbs)[streamID]; ok { existingDataLen := len(existingFragment.data) // Never save more than maxHTTP2DataLen bytes - numBytesToAppend := int(math.Min(float64(maxHTTP2DataLen - existingDataLen), float64(newDataLen))) + numBytesToAppend := int(math.Min(float64(maxHTTP2DataLen-existingDataLen), float64(newDataLen))) existingFragment.data = append(existingFragment.data, frame.Data()[:numBytesToAppend]...) } else { @@ -69,19 +71,19 @@ func (fbs *fragmentsByStream) pop(streamID uint32) ([]hpack.HeaderField, []byte) return headers, data } -func createGrpcAssembler(b *bufio.Reader) GrpcAssembler { +func createGrpcAssembler(b *bufio.Reader) *GrpcAssembler { var framerOutput bytes.Buffer framer := http2.NewFramer(&framerOutput, b) framer.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - return GrpcAssembler{ + return &GrpcAssembler{ fragmentsByStream: make(fragmentsByStream), - framer: framer, + framer: framer, } } type GrpcAssembler struct { fragmentsByStream fragmentsByStream - framer *http2.Framer + framer *http2.Framer } func (ga *GrpcAssembler) readMessage() (uint32, interface{}, error) { @@ -118,26 +120,26 @@ func (ga *GrpcAssembler) readMessage() (uint32, interface{}, error) { var messageHTTP1 interface{} if _, ok := headersHTTP1[":method"]; ok { messageHTTP1 = http.Request{ - URL: &url.URL{}, - Method: "POST", - Header: headersHTTP1, - Proto: protoHTTP2, - ProtoMajor: protoMajorHTTP2, - ProtoMinor: protoMinorHTTP2, - Body: io.NopCloser(strings.NewReader(dataString)), + URL: &url.URL{}, + Method: "POST", + Header: headersHTTP1, + Proto: protoHTTP2, + ProtoMajor: protoMajorHTTP2, + ProtoMinor: protoMinorHTTP2, + Body: io.NopCloser(strings.NewReader(dataString)), ContentLength: int64(len(dataString)), } } else if _, ok := headersHTTP1[":status"]; ok { messageHTTP1 = http.Response{ - Header: headersHTTP1, - Proto: protoHTTP2, - ProtoMajor: protoMajorHTTP2, - ProtoMinor: protoMinorHTTP2, - Body: io.NopCloser(strings.NewReader(dataString)), + Header: headersHTTP1, + Proto: protoHTTP2, + ProtoMajor: protoMajorHTTP2, + ProtoMinor: protoMinorHTTP2, + Body: io.NopCloser(strings.NewReader(dataString)), ContentLength: int64(len(dataString)), } } else { - return 0, nil, errors.New("Failed to assemble stream: neither a request nor a message") + return 0, nil, errors.New("failed to assemble stream: neither a request nor a message") } return streamID, messageHTTP1, nil @@ -225,7 +227,7 @@ func checkClientPreface(b *bufio.Reader) (bool, error) { func discardClientPreface(b *bufio.Reader) error { if isClientPrefacePresent, err := checkClientPreface(b); err != nil { return err - } else if !isClientPrefacePresent{ + } else if !isClientPrefacePresent { return errors.New("discardClientPreface: does not begin with client preface") } diff --git a/tap/extensions/http/handlers.go b/tap/extensions/http/handlers.go new file mode 100644 index 000000000..ef615f8f6 --- /dev/null +++ b/tap/extensions/http/handlers.go @@ -0,0 +1,164 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/romana/rlog" + + "github.com/up9inc/mizu/tap/api" +) + +func handleHTTP2Stream(grpcAssembler *GrpcAssembler, tcpID *api.TcpID, emitter api.Emitter) error { + streamID, messageHTTP1, err := grpcAssembler.readMessage() + if err != nil { + return err + } + + var item *api.OutputChannelItem + + switch messageHTTP1 := messageHTTP1.(type) { + case http.Request: + ident := fmt.Sprintf( + "%s->%s %s->%s %d", + tcpID.SrcIP, + tcpID.DstIP, + tcpID.SrcPort, + tcpID.DstPort, + streamID, + ) + item = reqResMatcher.registerRequest(ident, &messageHTTP1, time.Now()) + if item != nil { + item.ConnectionInfo = &api.ConnectionInfo{ + ClientIP: tcpID.SrcIP, + ClientPort: tcpID.SrcPort, + ServerIP: tcpID.DstIP, + ServerPort: tcpID.DstPort, + IsOutgoing: true, + } + } + case http.Response: + ident := fmt.Sprintf( + "%s->%s %s->%s %d", + tcpID.DstIP, + tcpID.SrcIP, + tcpID.DstPort, + tcpID.SrcPort, + streamID, + ) + item = reqResMatcher.registerResponse(ident, &messageHTTP1, time.Now()) + if item != nil { + item.ConnectionInfo = &api.ConnectionInfo{ + ClientIP: tcpID.DstIP, + ClientPort: tcpID.DstPort, + ServerIP: tcpID.SrcIP, + ServerPort: tcpID.SrcPort, + IsOutgoing: false, + } + } + } + + if item != nil { + item.Protocol = http2Protocol + emitter.Emit(item) + } + + return nil +} + +func handleHTTP1ClientStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, emitter api.Emitter) error { + req, err := http.ReadRequest(b) + if err != nil { + // log.Println("Error reading stream:", err) + return err + } + counterPair.Request++ + + body, err := ioutil.ReadAll(req.Body) + req.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind + s := len(body) + if err != nil { + rlog.Debugf("[HTTP-request-body] stream %s Got body err: %s", tcpID.Ident, err) + } + if err := req.Body.Close(); err != nil { + rlog.Debugf("[HTTP-request-body-close] stream %s Failed to close request body: %s", tcpID.Ident, err) + } + encoding := req.Header["Content-Encoding"] + rlog.Tracef(1, "HTTP/1 Request: %s %s %s (Body:%d) -> %s", tcpID.Ident, req.Method, req.URL, s, encoding) + + ident := fmt.Sprintf( + "%s->%s %s->%s %d", + tcpID.SrcIP, + tcpID.DstIP, + tcpID.SrcPort, + tcpID.DstPort, + counterPair.Request, + ) + item := reqResMatcher.registerRequest(ident, req, time.Now()) + if item != nil { + item.ConnectionInfo = &api.ConnectionInfo{ + ClientIP: tcpID.SrcIP, + ClientPort: tcpID.SrcPort, + ServerIP: tcpID.DstIP, + ServerPort: tcpID.DstPort, + IsOutgoing: true, + } + emitter.Emit(item) + } + return nil +} + +func handleHTTP1ServerStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, emitter api.Emitter) error { + res, err := http.ReadResponse(b, nil) + if err != nil { + // log.Println("Error reading stream:", err) + return err + } + counterPair.Response++ + + body, err := ioutil.ReadAll(res.Body) + res.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind + s := len(body) + if err != nil { + rlog.Debugf("[HTTP-response-body] HTTP/%s: failed to get body(parsed len:%d): %s", tcpID.Ident, s, err) + } + if err := res.Body.Close(); err != nil { + rlog.Debugf("[HTTP-response-body-close] HTTP/%s: failed to close body(parsed len:%d): %s", tcpID.Ident, s, err) + } + sym := "," + if res.ContentLength > 0 && res.ContentLength != int64(s) { + sym = "!=" + } + contentType, ok := res.Header["Content-Type"] + if !ok { + contentType = []string{http.DetectContentType(body)} + } + encoding := res.Header["Content-Encoding"] + rlog.Tracef(1, "HTTP/1 Response: %s %s (%d%s%d%s) -> %s", tcpID.Ident, res.Status, res.ContentLength, sym, s, contentType, encoding) + + ident := fmt.Sprintf( + "%s->%s %s->%s %d", + tcpID.DstIP, + tcpID.SrcIP, + tcpID.DstPort, + tcpID.SrcPort, + counterPair.Response, + ) + item := reqResMatcher.registerResponse(ident, res, time.Now()) + if item != nil { + item.ConnectionInfo = &api.ConnectionInfo{ + ClientIP: tcpID.DstIP, + ClientPort: tcpID.DstPort, + ServerIP: tcpID.SrcIP, + ServerPort: tcpID.SrcPort, + IsOutgoing: false, + } + emitter.Emit(item) + } + return nil +} diff --git a/tap/extensions/http/main.go b/tap/extensions/http/main.go new file mode 100644 index 000000000..033a17648 --- /dev/null +++ b/tap/extensions/http/main.go @@ -0,0 +1,384 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "log" + "net/url" + + "github.com/romana/rlog" + + "github.com/up9inc/mizu/tap/api" +) + +var protocol api.Protocol = api.Protocol{ + Name: "http", + LongName: "Hypertext Transfer Protocol -- HTTP/1.1", + Abbreviation: "HTTP", + Version: "1.1", + BackgroundColor: "#205cf5", + ForegroundColor: "#ffffff", + FontSize: 12, + ReferenceLink: "https://datatracker.ietf.org/doc/html/rfc2616", + Ports: []string{"80", "8080", "50051"}, + Priority: 0, +} + +var http2Protocol api.Protocol = api.Protocol{ + Name: "http", + LongName: "Hypertext Transfer Protocol Version 2 (HTTP/2) (gRPC)", + Abbreviation: "HTTP/2", + Version: "2.0", + BackgroundColor: "#244c5a", + ForegroundColor: "#ffffff", + FontSize: 11, + ReferenceLink: "https://datatracker.ietf.org/doc/html/rfc7540", + Ports: []string{"80", "8080"}, + Priority: 0, +} + +const ( + TypeHttpRequest = iota + TypeHttpResponse +) + +func init() { + log.Println("Initializing HTTP extension...") +} + +type dissecting string + +func (d dissecting) Register(extension *api.Extension) { + extension.Protocol = protocol + extension.MatcherMap = reqResMatcher.openMessagesMap +} + +func (d dissecting) Ping() { + log.Printf("pong %s\n", protocol.Name) +} + +func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, emitter api.Emitter) error { + ident := fmt.Sprintf("%s->%s:%s->%s", tcpID.SrcIP, tcpID.DstIP, tcpID.SrcPort, tcpID.DstPort) + isHTTP2, err := checkIsHTTP2Connection(b, isClient) + if err != nil { + rlog.Debugf("[HTTP/2-Prepare-Connection] stream %s Failed to check if client is HTTP/2: %s (%v,%+v)", ident, err, err, err) + // Do something? + } + + var grpcAssembler *GrpcAssembler + if isHTTP2 { + err := prepareHTTP2Connection(b, isClient) + if err != nil { + rlog.Debugf("[HTTP/2-Prepare-Connection-After-Check] stream %s error: %s (%v,%+v)", ident, err, err, err) + } + grpcAssembler = createGrpcAssembler(b) + } + + success := false + for { + if isHTTP2 { + err = handleHTTP2Stream(grpcAssembler, tcpID, emitter) + if err == io.EOF || err == io.ErrUnexpectedEOF { + break + } else if err != nil { + rlog.Debugf("[HTTP/2] stream %s error: %s (%v,%+v)", ident, err, err, err) + continue + } + success = true + } else if isClient { + err = handleHTTP1ClientStream(b, tcpID, counterPair, emitter) + if err == io.EOF || err == io.ErrUnexpectedEOF { + break + } else if err != nil { + rlog.Debugf("[HTTP-request] stream %s Request error: %s (%v,%+v)", ident, err, err, err) + continue + } + success = true + } else { + err = handleHTTP1ServerStream(b, tcpID, counterPair, emitter) + if err == io.EOF || err == io.ErrUnexpectedEOF { + break + } else if err != nil { + rlog.Debugf("[HTTP-response], stream %s Response error: %s (%v,%+v)", ident, err, err, err) + continue + } + success = true + } + } + + if !success { + return err + } + return nil +} + +func SetHostname(address, newHostname string) string { + replacedUrl, err := url.Parse(address) + if err != nil { + log.Printf("error replacing hostname to %s in address %s, returning original %v", newHostname, address, err) + return address + } + replacedUrl.Host = newHostname + return replacedUrl.String() +} + +func (d dissecting) Analyze(item *api.OutputChannelItem, entryId string, resolvedSource string, resolvedDestination string) *api.MizuEntry { + var host, scheme, authority, path, service string + + request := item.Pair.Request.Payload.(map[string]interface{}) + response := item.Pair.Response.Payload.(map[string]interface{}) + reqDetails := request["details"].(map[string]interface{}) + resDetails := response["details"].(map[string]interface{}) + + for _, header := range reqDetails["headers"].([]interface{}) { + h := header.(map[string]interface{}) + if h["name"] == "Host" { + host = h["value"].(string) + } + if h["name"] == ":authority" { + authority = h["value"].(string) + } + if h["name"] == ":scheme" { + scheme = h["value"].(string) + } + if h["name"] == ":path" { + path = h["value"].(string) + } + } + + if item.Protocol.Version == "2.0" { + service = fmt.Sprintf("%s://%s", scheme, authority) + } else { + service = fmt.Sprintf("http://%s", host) + path = reqDetails["url"].(string) + } + + request["url"] = path + if resolvedDestination != "" { + service = SetHostname(service, resolvedDestination) + } else if resolvedSource != "" { + service = SetHostname(service, resolvedSource) + } + entryBytes, _ := json.Marshal(item.Pair) + return &api.MizuEntry{ + ProtocolName: protocol.Name, + ProtocolVersion: item.Protocol.Version, + EntryId: entryId, + Entry: string(entryBytes), + Url: fmt.Sprintf("%s%s", service, path), + Method: reqDetails["method"].(string), + Status: int(resDetails["status"].(float64)), + RequestSenderIp: item.ConnectionInfo.ClientIP, + Service: service, + Timestamp: item.Timestamp, + Path: path, + ResolvedSource: resolvedSource, + ResolvedDestination: resolvedDestination, + SourceIp: item.ConnectionInfo.ClientIP, + DestinationIp: item.ConnectionInfo.ServerIP, + SourcePort: item.ConnectionInfo.ClientPort, + DestinationPort: item.ConnectionInfo.ServerPort, + IsOutgoing: item.ConnectionInfo.IsOutgoing, + } +} + +func (d dissecting) Summarize(entry *api.MizuEntry) *api.BaseEntryDetails { + var p api.Protocol + if entry.ProtocolVersion == "2.0" { + p = http2Protocol + } else { + p = protocol + } + return &api.BaseEntryDetails{ + Id: entry.EntryId, + Protocol: p, + Url: entry.Url, + RequestSenderIp: entry.RequestSenderIp, + Service: entry.Service, + Summary: entry.Path, + StatusCode: entry.Status, + Method: entry.Method, + Timestamp: entry.Timestamp, + SourceIp: entry.SourceIp, + DestinationIp: entry.DestinationIp, + SourcePort: entry.SourcePort, + DestinationPort: entry.DestinationPort, + IsOutgoing: entry.IsOutgoing, + Latency: 0, + Rules: api.ApplicableRules{ + Latency: 0, + Status: false, + }, + } +} + +func representRequest(request map[string]interface{}) []interface{} { + repRequest := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Method", + "value": request["method"].(string), + }, + { + "name": "URL", + "value": request["url"].(string), + }, + { + "name": "Body Size", + "value": fmt.Sprintf("%g bytes", request["bodySize"].(float64)), + }, + }) + repRequest = append(repRequest, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + headers, _ := json.Marshal(request["headers"].([]interface{})) + repRequest = append(repRequest, map[string]string{ + "type": "table", + "title": "Headers", + "data": string(headers), + }) + + cookies, _ := json.Marshal(request["cookies"].([]interface{})) + repRequest = append(repRequest, map[string]string{ + "type": "table", + "title": "Cookies", + "data": string(cookies), + }) + + queryString, _ := json.Marshal(request["queryString"].([]interface{})) + repRequest = append(repRequest, map[string]string{ + "type": "table", + "title": "Query String", + "data": string(queryString), + }) + + postData, _ := request["postData"].(map[string]interface{}) + mimeType, _ := postData["mimeType"] + if mimeType == nil || len(mimeType.(string)) == 0 { + mimeType = "text/html" + } + text, _ := postData["text"] + if text != nil { + repRequest = append(repRequest, map[string]string{ + "type": "body", + "title": "POST Data (text/plain)", + "encoding": "", + "mime_type": mimeType.(string), + "data": text.(string), + }) + } + + if postData["params"] != nil { + params, _ := json.Marshal(postData["params"].([]interface{})) + if len(params) > 0 { + if mimeType == "multipart/form-data" { + multipart, _ := json.Marshal([]map[string]string{ + { + "name": "Files", + "value": string(params), + }, + }) + repRequest = append(repRequest, map[string]string{ + "type": "table", + "title": "POST Data (multipart/form-data)", + "data": string(multipart), + }) + } else { + repRequest = append(repRequest, map[string]string{ + "type": "table", + "title": "POST Data (application/x-www-form-urlencoded)", + "data": string(params), + }) + } + } + } + + return repRequest +} + +func representResponse(response map[string]interface{}) []interface{} { + repResponse := make([]interface{}, 0) + + details, _ := json.Marshal([]map[string]string{ + { + "name": "Status", + "value": fmt.Sprintf("%g", response["status"].(float64)), + }, + { + "name": "Status Text", + "value": response["statusText"].(string), + }, + { + "name": "Body Size", + "value": fmt.Sprintf("%g bytes", response["bodySize"].(float64)), + }, + }) + repResponse = append(repResponse, map[string]string{ + "type": "table", + "title": "Details", + "data": string(details), + }) + + headers, _ := json.Marshal(response["headers"].([]interface{})) + repResponse = append(repResponse, map[string]string{ + "type": "table", + "title": "Headers", + "data": string(headers), + }) + + cookies, _ := json.Marshal(response["cookies"].([]interface{})) + repResponse = append(repResponse, map[string]string{ + "type": "table", + "title": "Cookies", + "data": string(cookies), + }) + + content, _ := response["content"].(map[string]interface{}) + mimeType, _ := content["mimeType"] + if mimeType == nil || len(mimeType.(string)) == 0 { + mimeType = "text/html" + } + encoding, _ := content["encoding"] + text, _ := content["text"] + if text != nil { + repResponse = append(repResponse, map[string]string{ + "type": "body", + "title": "Body", + "encoding": encoding.(string), + "mime_type": mimeType.(string), + "data": text.(string), + }) + } + + return repResponse +} + +func (d dissecting) Represent(entry *api.MizuEntry) (api.Protocol, []byte, error) { + var p api.Protocol + if entry.ProtocolVersion == "2.0" { + p = http2Protocol + } else { + p = protocol + } + var root map[string]interface{} + json.Unmarshal([]byte(entry.Entry), &root) + representation := make(map[string]interface{}, 0) + request := root["request"].(map[string]interface{})["payload"].(map[string]interface{}) + response := root["response"].(map[string]interface{})["payload"].(map[string]interface{}) + reqDetails := request["details"].(map[string]interface{}) + resDetails := response["details"].(map[string]interface{}) + repRequest := representRequest(reqDetails) + repResponse := representResponse(resDetails) + representation["request"] = repRequest + representation["response"] = repResponse + object, err := json.Marshal(representation) + return p, object, err +} + +var Dissector dissecting diff --git a/tap/extensions/http/matcher.go b/tap/extensions/http/matcher.go new file mode 100644 index 000000000..9692b5461 --- /dev/null +++ b/tap/extensions/http/matcher.go @@ -0,0 +1,105 @@ +package main + +import ( + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/romana/rlog" + + "github.com/up9inc/mizu/tap/api" +) + +var reqResMatcher = createResponseRequestMatcher() // global + +// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}_{incremental_counter} +type requestResponseMatcher struct { + openMessagesMap *sync.Map +} + +func createResponseRequestMatcher() requestResponseMatcher { + newMatcher := &requestResponseMatcher{openMessagesMap: &sync.Map{}} + return *newMatcher +} + +func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time) *api.OutputChannelItem { + split := splitIdent(ident) + key := genKey(split) + + requestHTTPMessage := api.GenericMessage{ + IsRequest: true, + CaptureTime: captureTime, + Payload: HTTPPayload{ + Type: TypeHttpRequest, + Data: request, + }, + } + + if response, found := matcher.openMessagesMap.LoadAndDelete(key); found { + // Type assertion always succeeds because all of the map's values are of api.GenericMessage type + responseHTTPMessage := response.(*api.GenericMessage) + if responseHTTPMessage.IsRequest { + rlog.Debugf("[Request-Duplicate] Got duplicate request with same identifier") + return nil + } + rlog.Tracef(1, "Matched open Response for %s", key) + return matcher.preparePair(&requestHTTPMessage, responseHTTPMessage) + } + + matcher.openMessagesMap.Store(key, &requestHTTPMessage) + rlog.Tracef(1, "Registered open Request for %s", key) + return nil +} + +func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time) *api.OutputChannelItem { + split := splitIdent(ident) + key := genKey(split) + + responseHTTPMessage := api.GenericMessage{ + IsRequest: false, + CaptureTime: captureTime, + Payload: HTTPPayload{ + Type: TypeHttpResponse, + Data: response, + }, + } + + if request, found := matcher.openMessagesMap.LoadAndDelete(key); found { + // Type assertion always succeeds because all of the map's values are of api.GenericMessage type + requestHTTPMessage := request.(*api.GenericMessage) + if !requestHTTPMessage.IsRequest { + rlog.Debugf("[Response-Duplicate] Got duplicate response with same identifier") + return nil + } + rlog.Tracef(1, "Matched open Request for %s", key) + return matcher.preparePair(requestHTTPMessage, &responseHTTPMessage) + } + + matcher.openMessagesMap.Store(key, &responseHTTPMessage) + rlog.Tracef(1, "Registered open Response for %s", key) + return nil +} + +func (matcher *requestResponseMatcher) preparePair(requestHTTPMessage *api.GenericMessage, responseHTTPMessage *api.GenericMessage) *api.OutputChannelItem { + return &api.OutputChannelItem{ + Protocol: protocol, + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + ConnectionInfo: nil, + Pair: &api.RequestResponsePair{ + Request: *requestHTTPMessage, + Response: *responseHTTPMessage, + }, + } +} + +func splitIdent(ident string) []string { + ident = strings.Replace(ident, "->", " ", -1) + return strings.Split(ident, " ") +} + +func genKey(split []string) string { + key := fmt.Sprintf("%s:%s->%s:%s,%s", split[0], split[2], split[1], split[3], split[4]) + return key +} diff --git a/tap/extensions/http/structs.go b/tap/extensions/http/structs.go new file mode 100644 index 000000000..6ec868832 --- /dev/null +++ b/tap/extensions/http/structs.go @@ -0,0 +1,55 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/google/martian/har" + "github.com/romana/rlog" +) + +type HTTPPayload struct { + Type uint8 + Data interface{} +} + +type HTTPPayloader interface { + MarshalJSON() ([]byte, error) +} + +type HTTPWrapper struct { + Method string `json:"method"` + Url string `json:"url"` + Details interface{} `json:"details"` +} + +func (h HTTPPayload) MarshalJSON() ([]byte, error) { + switch h.Type { + case TypeHttpRequest: + harRequest, err := har.NewRequest(h.Data.(*http.Request), true) + if err != nil { + rlog.Debugf("convert-request-to-har", "Failed converting request to HAR %s (%v,%+v)", err, err, err) + return nil, errors.New("Failed converting request to HAR") + } + return json.Marshal(&HTTPWrapper{ + Method: harRequest.Method, + Url: "", + Details: harRequest, + }) + case TypeHttpResponse: + harResponse, err := har.NewResponse(h.Data.(*http.Response), true) + if err != nil { + rlog.Debugf("convert-response-to-har", "Failed converting response to HAR %s (%v,%+v)", err, err, err) + return nil, errors.New("Failed converting response to HAR") + } + return json.Marshal(&HTTPWrapper{ + Method: "", + Url: "", + Details: harResponse, + }) + default: + panic(fmt.Sprintf("HTTP payload cannot be marshaled: %s\n", h.Type)) + } +} diff --git a/tap/extensions/kafka/buffer.go b/tap/extensions/kafka/buffer.go new file mode 100644 index 000000000..d57d2c96c --- /dev/null +++ b/tap/extensions/kafka/buffer.go @@ -0,0 +1,645 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "math" + "sync" + "sync/atomic" +) + +// Bytes is an interface implemented by types that represent immutable +// sequences of bytes. +// +// Bytes values are used to abstract the location where record keys and +// values are read from (e.g. in-memory buffers, network sockets, files). +// +// The Close method should be called to release resources held by the object +// when the program is done with it. +// +// Bytes values are generally not safe to use concurrently from multiple +// goroutines. +type Bytes interface { + io.ReadCloser + // Returns the number of bytes remaining to be read from the payload. + Len() int +} + +// NewBytes constructs a Bytes value from b. +// +// The returned value references b, it does not make a copy of the backing +// array. +// +// If b is nil, nil is returned to represent a null BYTES value in the kafka +// protocol. +func NewBytes(b []byte) Bytes { + if b == nil { + return nil + } + r := new(bytesReader) + r.Reset(b) + return r +} + +// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the +// length of b to minimize the memory footprint. +// +// The function returns a nil slice if b is nil. +// func ReadAll(b Bytes) ([]byte, error) { +// if b == nil { +// return nil, nil +// } +// s := make([]byte, b.Len()) +// _, err := io.ReadFull(b, s) +// return s, err +// } + +type bytesReader struct{ bytes.Reader } + +func (*bytesReader) Close() error { return nil } + +type refCount uintptr + +func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) } + +func (rc *refCount) unref(onZero func()) { + if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 { + onZero() + } +} + +const ( + // Size of the memory buffer for a single page. We use a farily + // large size here (64 KiB) because batches exchanged with kafka + // tend to be multiple kilobytes in size, sometimes hundreds. + // Using large pages amortizes the overhead of the page metadata + // and algorithms to manage the pages. + pageSize = 65536 +) + +type page struct { + refc refCount + offset int64 + length int + buffer *[pageSize]byte +} + +func newPage(offset int64) *page { + p, _ := pagePool.Get().(*page) + if p != nil { + p.offset = offset + p.length = 0 + p.ref() + } else { + p = &page{ + refc: 1, + offset: offset, + buffer: &[pageSize]byte{}, + } + } + return p +} + +func (p *page) ref() { p.refc.ref() } + +func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) } + +func (p *page) slice(begin, end int64) []byte { + i, j := begin-p.offset, end-p.offset + + if i < 0 { + i = 0 + } else if i > pageSize { + i = pageSize + } + + if j < 0 { + j = 0 + } else if j > pageSize { + j = pageSize + } + + if i < j { + return p.buffer[i:j] + } + + return nil +} + +func (p *page) Cap() int { return pageSize } + +func (p *page) Len() int { return p.length } + +func (p *page) Size() int64 { return int64(p.length) } + +func (p *page) Truncate(n int) { + if n < p.length { + p.length = n + } +} + +func (p *page) ReadAt(b []byte, off int64) (int, error) { + if off -= p.offset; off < 0 || off > pageSize { + panic("offset out of range") + } + if off > int64(p.length) { + return 0, nil + } + return copy(b, p.buffer[off:p.length]), nil +} + +func (p *page) ReadFrom(r io.Reader) (int64, error) { + n, err := io.ReadFull(r, p.buffer[p.length:]) + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = nil + } + p.length += n + return int64(n), err +} + +func (p *page) WriteAt(b []byte, off int64) (int, error) { + if off -= p.offset; off < 0 || off > pageSize { + panic("offset out of range") + } + n := copy(p.buffer[off:], b) + if end := int(off) + n; end > p.length { + p.length = end + } + return n, nil +} + +func (p *page) Write(b []byte) (int, error) { + return p.WriteAt(b, p.offset+int64(p.length)) +} + +var ( + _ io.ReaderAt = (*page)(nil) + _ io.ReaderFrom = (*page)(nil) + _ io.Writer = (*page)(nil) + _ io.WriterAt = (*page)(nil) +) + +type pageBuffer struct { + refc refCount + pages contiguousPages + length int + cursor int +} + +func newPageBuffer() *pageBuffer { + b, _ := pageBufferPool.Get().(*pageBuffer) + if b != nil { + b.cursor = 0 + b.refc.ref() + } else { + b = &pageBuffer{ + refc: 1, + pages: make(contiguousPages, 0, 16), + } + } + return b +} + +func (pb *pageBuffer) refTo(ref *pageRef, begin, end int64) { + length := end - begin + + if length > math.MaxUint32 { + panic("reference to contiguous buffer pages exceeds the maximum size of 4 GB") + } + + ref.pages = append(ref.buffer[:0], pb.pages.slice(begin, end)...) + ref.pages.ref() + ref.offset = begin + ref.length = uint32(length) +} + +func (pb *pageBuffer) ref(begin, end int64) *pageRef { + ref := new(pageRef) + pb.refTo(ref, begin, end) + return ref +} + +func (pb *pageBuffer) unref() { + pb.refc.unref(func() { + pb.pages.unref() + pb.pages.clear() + pb.pages = pb.pages[:0] + pb.length = 0 + pageBufferPool.Put(pb) + }) +} + +func (pb *pageBuffer) newPage() *page { + return newPage(int64(pb.length)) +} + +func (pb *pageBuffer) Close() error { + return nil +} + +func (pb *pageBuffer) Len() int { + return pb.length - pb.cursor +} + +func (pb *pageBuffer) Size() int64 { + return int64(pb.length) +} + +func (pb *pageBuffer) Discard(n int) (int, error) { + remain := pb.length - pb.cursor + if remain < n { + n = remain + } + pb.cursor += n + return n, nil +} + +func (pb *pageBuffer) Truncate(n int) { + if n < pb.length { + pb.length = n + + if n < pb.cursor { + pb.cursor = n + } + + for i := range pb.pages { + if p := pb.pages[i]; p.length <= n { + n -= p.length + } else { + if n > 0 { + pb.pages[i].Truncate(n) + i++ + } + pb.pages[i:].unref() + pb.pages[i:].clear() + pb.pages = pb.pages[:i] + break + } + } + } +} + +func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) { + c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence) + if err != nil { + return -1, err + } + pb.cursor = int(c) + return c, nil +} + +func (pb *pageBuffer) ReadByte() (byte, error) { + b := [1]byte{} + _, err := pb.Read(b[:]) + return b[0], err +} + +func (pb *pageBuffer) Read(b []byte) (int, error) { + if pb.cursor >= pb.length { + return 0, io.EOF + } + n, err := pb.ReadAt(b, int64(pb.cursor)) + pb.cursor += n + return n, err +} + +func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) { + return pb.pages.ReadAt(b, off) +} + +func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) { + if len(pb.pages) == 0 { + pb.pages = append(pb.pages, pb.newPage()) + } + + rn := int64(0) + + for { + tail := pb.pages[len(pb.pages)-1] + free := tail.Cap() - tail.Len() + + if free == 0 { + tail = pb.newPage() + free = pageSize + pb.pages = append(pb.pages, tail) + } + + n, err := tail.ReadFrom(r) + pb.length += int(n) + rn += n + if n < int64(free) { + return rn, err + } + } +} + +func (pb *pageBuffer) WriteString(s string) (int, error) { + return pb.Write([]byte(s)) +} + +func (pb *pageBuffer) Write(b []byte) (int, error) { + wn := len(b) + if wn == 0 { + return 0, nil + } + + if len(pb.pages) == 0 { + pb.pages = append(pb.pages, pb.newPage()) + } + + for len(b) != 0 { + tail := pb.pages[len(pb.pages)-1] + free := tail.Cap() - tail.Len() + + if len(b) <= free { + tail.Write(b) + pb.length += len(b) + break + } + + tail.Write(b[:free]) + b = b[free:] + + pb.length += free + pb.pages = append(pb.pages, pb.newPage()) + } + + return wn, nil +} + +func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) { + n, err := pb.pages.WriteAt(b, off) + if err != nil { + return n, err + } + if n < len(b) { + pb.Write(b[n:]) + } + return len(b), nil +} + +func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) { + var wn int + var err error + pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool { + var n int + n, err = w.Write(b) + wn += n + return err == nil + }) + pb.cursor += wn + return int64(wn), err +} + +var ( + _ io.ReaderAt = (*pageBuffer)(nil) + _ io.ReaderFrom = (*pageBuffer)(nil) + _ io.StringWriter = (*pageBuffer)(nil) + _ io.Writer = (*pageBuffer)(nil) + _ io.WriterAt = (*pageBuffer)(nil) + _ io.WriterTo = (*pageBuffer)(nil) + + pagePool sync.Pool + pageBufferPool sync.Pool +) + +type contiguousPages []*page + +func (pages contiguousPages) ref() { + for _, p := range pages { + p.ref() + } +} + +func (pages contiguousPages) unref() { + for _, p := range pages { + p.unref() + } +} + +func (pages contiguousPages) clear() { + for i := range pages { + pages[i] = nil + } +} + +func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) { + rn := 0 + + for _, p := range pages.slice(off, off+int64(len(b))) { + n, _ := p.ReadAt(b, off) + b = b[n:] + rn += n + off += int64(n) + } + + return rn, nil +} + +func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) { + wn := 0 + + for _, p := range pages.slice(off, off+int64(len(b))) { + n, _ := p.WriteAt(b, off) + b = b[n:] + wn += n + off += int64(n) + } + + return wn, nil +} + +func (pages contiguousPages) slice(begin, end int64) contiguousPages { + i := pages.indexOf(begin) + j := pages.indexOf(end) + if j < len(pages) { + j++ + } + return pages[i:j] +} + +func (pages contiguousPages) indexOf(offset int64) int { + if len(pages) == 0 { + return 0 + } + return int((offset - pages[0].offset) / pageSize) +} + +func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) { + for _, p := range pages.slice(begin, end) { + if !f(p.slice(begin, end)) { + break + } + } +} + +var ( + _ io.ReaderAt = contiguousPages{} + _ io.WriterAt = contiguousPages{} +) + +type pageRef struct { + buffer [2]*page + pages contiguousPages + offset int64 + cursor int64 + length uint32 + once uint32 +} + +func (ref *pageRef) unref() { + if atomic.CompareAndSwapUint32(&ref.once, 0, 1) { + ref.pages.unref() + ref.pages.clear() + ref.pages = nil + ref.offset = 0 + ref.cursor = 0 + ref.length = 0 + } +} + +func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) } + +func (ref *pageRef) Size() int64 { return int64(ref.length) } + +func (ref *pageRef) Close() error { ref.unref(); return nil } + +func (ref *pageRef) String() string { + return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length) +} + +func (ref *pageRef) Seek(offset int64, whence int) (int64, error) { + c, err := seek(ref.cursor, int64(ref.length), offset, whence) + if err != nil { + return -1, err + } + ref.cursor = c + return c, nil +} + +func (ref *pageRef) ReadByte() (byte, error) { + var c byte + var ok bool + ref.scan(ref.cursor, func(b []byte) bool { + c, ok = b[0], true + return false + }) + if ok { + ref.cursor++ + } else { + return 0, io.EOF + } + return c, nil +} + +func (ref *pageRef) Read(b []byte) (int, error) { + if ref.cursor >= int64(ref.length) { + return 0, io.EOF + } + n, err := ref.ReadAt(b, ref.cursor) + ref.cursor += int64(n) + return n, err +} + +func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) { + limit := ref.offset + int64(ref.length) + off += ref.offset + + if off >= limit { + return 0, io.EOF + } + + if off+int64(len(b)) > limit { + b = b[:limit-off] + } + + if len(b) == 0 { + return 0, nil + } + + n, err := ref.pages.ReadAt(b, off) + if n == 0 && err == nil { + err = io.EOF + } + return n, err +} + +func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) { + ref.scan(ref.cursor, func(b []byte) bool { + var n int + n, err = w.Write(b) + wn += int64(n) + return err == nil + }) + ref.cursor += wn + return +} + +func (ref *pageRef) scan(off int64, f func([]byte) bool) { + begin := ref.offset + off + end := ref.offset + int64(ref.length) + ref.pages.scan(begin, end, f) +} + +var ( + _ io.Closer = (*pageRef)(nil) + _ io.Seeker = (*pageRef)(nil) + _ io.Reader = (*pageRef)(nil) + _ io.ReaderAt = (*pageRef)(nil) + _ io.WriterTo = (*pageRef)(nil) +) + +type pageRefAllocator struct { + refs []pageRef + head int + size int +} + +func (a *pageRefAllocator) newPageRef() *pageRef { + if a.head == len(a.refs) { + a.refs = make([]pageRef, a.size) + a.head = 0 + } + ref := &a.refs[a.head] + a.head++ + return ref +} + +func unref(x interface{}) { + if r, _ := x.(interface{ unref() }); r != nil { + r.unref() + } +} + +func seek(cursor, limit, offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + // absolute offset + case io.SeekCurrent: + offset = cursor + offset + case io.SeekEnd: + offset = limit - offset + default: + return -1, fmt.Errorf("seek: invalid whence value: %d", whence) + } + if offset < 0 { + offset = 0 + } + if offset > limit { + offset = limit + } + return offset, nil +} + +func closeBytes(b Bytes) { + if b != nil { + b.Close() + } +} + +func resetBytes(b Bytes) { + if r, _ := b.(interface{ Reset() }); r != nil { + r.Reset() + } +} diff --git a/tap/extensions/kafka/cluster.go b/tap/extensions/kafka/cluster.go new file mode 100644 index 000000000..a1e693581 --- /dev/null +++ b/tap/extensions/kafka/cluster.go @@ -0,0 +1,143 @@ +package main + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" +) + +type Cluster struct { + ClusterID string + Controller int32 + Brokers map[int32]Broker + Topics map[string]Topic +} + +func (c Cluster) BrokerIDs() []int32 { + brokerIDs := make([]int32, 0, len(c.Brokers)) + for id := range c.Brokers { + brokerIDs = append(brokerIDs, id) + } + sort.Slice(brokerIDs, func(i, j int) bool { + return brokerIDs[i] < brokerIDs[j] + }) + return brokerIDs +} + +func (c Cluster) TopicNames() []string { + topicNames := make([]string, 0, len(c.Topics)) + for name := range c.Topics { + topicNames = append(topicNames, name) + } + sort.Strings(topicNames) + return topicNames +} + +func (c Cluster) IsZero() bool { + return c.ClusterID == "" && c.Controller == 0 && len(c.Brokers) == 0 && len(c.Topics) == 0 +} + +func (c Cluster) Format(w fmt.State, _ rune) { + tw := new(tabwriter.Writer) + fmt.Fprintf(w, "CLUSTER: %q\n\n", c.ClusterID) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " BROKER\tHOST\tPORT\tRACK\tCONTROLLER\n") + + for _, id := range c.BrokerIDs() { + broker := c.Brokers[id] + fmt.Fprintf(tw, " %d\t%s\t%d\t%s\t%t\n", broker.ID, broker.Host, broker.Port, broker.Rack, broker.ID == c.Controller) + } + + tw.Flush() + fmt.Fprintln(w) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " TOPIC\tPARTITIONS\tBROKERS\n") + topicNames := c.TopicNames() + brokers := make(map[int32]struct{}, len(c.Brokers)) + brokerIDs := make([]int32, 0, len(c.Brokers)) + + for _, name := range topicNames { + topic := c.Topics[name] + + for _, p := range topic.Partitions { + for _, id := range p.Replicas { + brokers[id] = struct{}{} + } + } + + for id := range brokers { + brokerIDs = append(brokerIDs, id) + } + + fmt.Fprintf(tw, " %s\t%d\t%s\n", topic.Name, len(topic.Partitions), formatBrokerIDs(brokerIDs, -1)) + + for id := range brokers { + delete(brokers, id) + } + + brokerIDs = brokerIDs[:0] + } + + tw.Flush() + fmt.Fprintln(w) + + if w.Flag('+') { + for _, name := range topicNames { + fmt.Fprintf(w, " TOPIC: %q\n\n", name) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " PARTITION\tREPLICAS\tISR\tOFFLINE\n") + + for _, p := range c.Topics[name].Partitions { + fmt.Fprintf(tw, " %d\t%s\t%s\t%s\n", p.ID, + formatBrokerIDs(p.Replicas, -1), + formatBrokerIDs(p.ISR, p.Leader), + formatBrokerIDs(p.Offline, -1), + ) + } + + tw.Flush() + fmt.Fprintln(w) + } + } +} + +func formatBrokerIDs(brokerIDs []int32, leader int32) string { + if len(brokerIDs) == 0 { + return "" + } + + if len(brokerIDs) == 1 { + return itoa(brokerIDs[0]) + } + + sort.Slice(brokerIDs, func(i, j int) bool { + id1 := brokerIDs[i] + id2 := brokerIDs[j] + + if id1 == leader { + return true + } + + if id2 == leader { + return false + } + + return id1 < id2 + }) + + brokerNames := make([]string, len(brokerIDs)) + + for i, id := range brokerIDs { + brokerNames[i] = itoa(id) + } + + return strings.Join(brokerNames, ",") +} + +var ( + _ fmt.Formatter = Cluster{} +) diff --git a/tap/extensions/kafka/compression.go b/tap/extensions/kafka/compression.go new file mode 100644 index 000000000..e5b0485b8 --- /dev/null +++ b/tap/extensions/kafka/compression.go @@ -0,0 +1,30 @@ +package main + +import ( + "errors" + + "github.com/segmentio/kafka-go/compress" +) + +type Compression = compress.Compression + +type CompressionCodec = compress.Codec + +// TODO: this file should probably go away once the internals of the package +// have moved to use the protocol package. +const ( + compressionCodecMask = 0x07 +) + +var ( + errUnknownCodec = errors.New("the compression code is invalid or its codec has not been imported") +) + +// resolveCodec looks up a codec by Code() +func resolveCodec(code int8) (CompressionCodec, error) { + codec := compress.Compression(code).Codec() + if codec == nil { + return nil, errUnknownCodec + } + return codec, nil +} diff --git a/tap/extensions/kafka/decode.go b/tap/extensions/kafka/decode.go new file mode 100644 index 000000000..04a083a03 --- /dev/null +++ b/tap/extensions/kafka/decode.go @@ -0,0 +1,598 @@ +package main + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type discarder interface { + Discard(int) (int, error) +} + +type decoder struct { + reader io.Reader + remain int + buffer [8]byte + err error + table *crc32.Table + crc32 uint32 +} + +func (d *decoder) Reset(r io.Reader, n int) { + d.reader = r + d.remain = n + d.buffer = [8]byte{} + d.err = nil + d.table = nil + d.crc32 = 0 +} + +func (d *decoder) Read(b []byte) (int, error) { + if d.err != nil { + return 0, d.err + } + if d.remain == 0 { + return 0, io.EOF + } + if len(b) > d.remain { + b = b[:d.remain] + } + n, err := d.reader.Read(b) + if n > 0 && d.table != nil { + d.crc32 = crc32.Update(d.crc32, d.table, b[:n]) + } + d.remain -= n + return n, err +} + +func (d *decoder) ReadByte() (byte, error) { + c := d.readByte() + return c, d.err +} + +func (d *decoder) done() bool { + return d.remain == 0 || d.err != nil +} + +func (d *decoder) setCRC(table *crc32.Table) { + d.table, d.crc32 = table, 0 +} + +func (d *decoder) decodeBool(v value) { + v.setBool(d.readBool()) +} + +func (d *decoder) decodeInt8(v value) { + v.setInt8(d.readInt8()) +} + +func (d *decoder) decodeInt16(v value) { + v.setInt16(d.readInt16()) +} + +func (d *decoder) decodeInt32(v value) { + v.setInt32(d.readInt32()) +} + +func (d *decoder) decodeInt64(v value) { + v.setInt64(d.readInt64()) +} + +func (d *decoder) decodeString(v value) { + v.setString(d.readString()) +} + +func (d *decoder) decodeCompactString(v value) { + v.setString(d.readCompactString()) +} + +func (d *decoder) decodeBytes(v value) { + v.setBytes(d.readBytes()) +} + +func (d *decoder) decodeCompactBytes(v value) { + v.setBytes(d.readCompactBytes()) +} + +func (d *decoder) decodeArray(v value, elemType reflect.Type, decodeElem decodeFunc) { + if n := d.readInt32(); n < 0 || n > 65535 { + v.setArray(array{}) + } else { + a := makeArray(elemType, int(n)) + for i := 0; i < int(n) && d.remain > 0; i++ { + decodeElem(d, a.index(i)) + } + v.setArray(a) + } +} + +func (d *decoder) decodeCompactArray(v value, elemType reflect.Type, decodeElem decodeFunc) { + if n := d.readUnsignedVarInt(); n < 1 || n > 65535 { + v.setArray(array{}) + } else { + a := makeArray(elemType, int(n-1)) + for i := 0; i < int(n-1) && d.remain > 0; i++ { + decodeElem(d, a.index(i)) + } + v.setArray(a) + } +} + +func (d *decoder) decodeRecordV0(v value) { + x := &RecordV0{} + x.Unknown = d.readInt8() + x.Attributes = d.readInt8() + x.TimestampDelta = d.readInt8() + x.OffsetDelta = d.readInt8() + + x.KeyLength = int8(d.readVarInt()) + key := strings.Builder{} + for i := 0; i < int(x.KeyLength); i++ { + key.WriteString(fmt.Sprintf("%c", d.readInt8())) + } + x.Key = key.String() + + x.ValueLen = int8(d.readVarInt()) + value := strings.Builder{} + for i := 0; i < int(x.ValueLen); i++ { + value.WriteString(fmt.Sprintf("%c", d.readInt8())) + } + x.Value = value.String() + + headerLen := d.readInt8() / 2 + headers := make([]RecordHeader, 0) + for i := 0; i < int(headerLen); i++ { + header := &RecordHeader{} + + header.HeaderKeyLength = int8(d.readVarInt()) + headerKey := strings.Builder{} + for j := 0; j < int(header.HeaderKeyLength); j++ { + headerKey.WriteString(fmt.Sprintf("%c", d.readInt8())) + } + header.HeaderKey = headerKey.String() + + header.HeaderValueLength = int8(d.readVarInt()) + headerValue := strings.Builder{} + for j := 0; j < int(header.HeaderValueLength); j++ { + headerValue.WriteString(fmt.Sprintf("%c", d.readInt8())) + } + header.Value = headerValue.String() + + headers = append(headers, *header) + } + x.Headers = headers + + v.val.Set(valueOf(x).val) +} + +func (d *decoder) discardAll() { + d.discard(d.remain) +} + +func (d *decoder) discard(n int) { + if n > d.remain { + n = d.remain + } + var err error + if r, _ := d.reader.(discarder); r != nil { + n, err = r.Discard(n) + d.remain -= n + } else { + _, err = io.Copy(ioutil.Discard, d) + } + d.setError(err) +} + +func (d *decoder) read(n int) []byte { + b := make([]byte, n) + n, err := io.ReadFull(d, b) + b = b[:n] + d.setError(err) + return b +} + +func (d *decoder) writeTo(w io.Writer, n int) { + limit := d.remain + if n < limit { + d.remain = n + } + c, err := io.Copy(w, d) + if int(c) < n && err == nil { + err = io.ErrUnexpectedEOF + } + d.remain = limit - int(c) + d.setError(err) +} + +func (d *decoder) setError(err error) { + if d.err == nil && err != nil { + d.err = err + d.discardAll() + } +} + +func (d *decoder) readFull(b []byte) bool { + n, err := io.ReadFull(d, b) + d.setError(err) + return n == len(b) +} + +func (d *decoder) readByte() byte { + if d.readFull(d.buffer[:1]) { + return d.buffer[0] + } + return 0 +} + +func (d *decoder) readBool() bool { + return d.readByte() != 0 +} + +func (d *decoder) readInt8() int8 { + if d.readFull(d.buffer[:1]) { + return decodeReadInt8(d.buffer[:1]) + } + return 0 +} + +func (d *decoder) readInt16() int16 { + if d.readFull(d.buffer[:2]) { + return decodeReadInt16(d.buffer[:2]) + } + return 0 +} + +func (d *decoder) readInt32() int32 { + if d.readFull(d.buffer[:4]) { + return decodeReadInt32(d.buffer[:4]) + } + return 0 +} + +func (d *decoder) readInt64() int64 { + if d.readFull(d.buffer[:8]) { + return decodeReadInt64(d.buffer[:8]) + } + return 0 +} + +func (d *decoder) readString() string { + if n := d.readInt16(); n < 0 { + return "" + } else { + return bytesToString(d.read(int(n))) + } +} + +func (d *decoder) readVarString() string { + if n := d.readVarInt(); n < 0 { + return "" + } else { + return bytesToString(d.read(int(n))) + } +} + +func (d *decoder) readCompactString() string { + if n := d.readUnsignedVarInt(); n < 1 { + return "" + } else { + return bytesToString(d.read(int(n - 1))) + } +} + +func (d *decoder) readBytes() []byte { + if n := d.readInt32(); n < 0 { + return nil + } else { + return d.read(int(n)) + } +} + +func (d *decoder) readBytesTo(w io.Writer) bool { + if n := d.readInt32(); n < 0 { + return false + } else { + d.writeTo(w, int(n)) + return d.err == nil + } +} + +func (d *decoder) readVarBytes() []byte { + if n := d.readVarInt(); n < 0 { + return nil + } else { + return d.read(int(n)) + } +} + +func (d *decoder) readVarBytesTo(w io.Writer) bool { + if n := d.readVarInt(); n < 0 { + return false + } else { + d.writeTo(w, int(n)) + return d.err == nil + } +} + +func (d *decoder) readCompactBytes() []byte { + if n := d.readUnsignedVarInt(); n < 1 { + return nil + } else { + return d.read(int(n - 1)) + } +} + +func (d *decoder) readCompactBytesTo(w io.Writer) bool { + if n := d.readUnsignedVarInt(); n < 1 { + return false + } else { + d.writeTo(w, int(n-1)) + return d.err == nil + } +} + +func (d *decoder) readVarInt() int64 { + n := 11 // varints are at most 11 bytes + + if n > d.remain { + n = d.remain + } + + x := uint64(0) + s := uint(0) + + for n > 0 { + b := d.readByte() + + if (b & 0x80) == 0 { + x |= uint64(b) << s + return int64(x>>1) ^ -(int64(x) & 1) + } + + x |= uint64(b&0x7f) << s + s += 7 + n-- + } + + d.setError(fmt.Errorf("cannot decode varint from input stream")) + return 0 +} + +func (d *decoder) readUnsignedVarInt() uint64 { + n := 11 // varints are at most 11 bytes + + if n > d.remain { + n = d.remain + } + + x := uint64(0) + s := uint(0) + + for n > 0 { + b := d.readByte() + + if (b & 0x80) == 0 { + x |= uint64(b) << s + return x + } + + x |= uint64(b&0x7f) << s + s += 7 + n-- + } + + d.setError(fmt.Errorf("cannot decode unsigned varint from input stream")) + return 0 +} + +type decodeFunc func(*decoder, value) + +var ( + _ io.Reader = (*decoder)(nil) + _ io.ByteReader = (*decoder)(nil) + + readerFrom = reflect.TypeOf((*io.ReaderFrom)(nil)).Elem() +) + +func decodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc { + if reflect.PtrTo(typ).Implements(readerFrom) { + return readerDecodeFuncOf(typ) + } + switch typ.Kind() { + case reflect.Bool: + return (*decoder).decodeBool + case reflect.Int8: + return (*decoder).decodeInt8 + case reflect.Int16: + return (*decoder).decodeInt16 + case reflect.Int32: + return (*decoder).decodeInt32 + case reflect.Int64: + return (*decoder).decodeInt64 + case reflect.String: + return stringDecodeFuncOf(flexible, tag) + case reflect.Struct: + return structDecodeFuncOf(typ, version, flexible) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { // []byte + return bytesDecodeFuncOf(flexible, tag) + } + return arrayDecodeFuncOf(typ, version, flexible, tag) + default: + panic("unsupported type: " + typ.String()) + } +} + +func stringDecodeFuncOf(flexible bool, tag structTag) decodeFunc { + if flexible { + // In flexible messages, all strings are compact + return (*decoder).decodeCompactString + } + return (*decoder).decodeString +} + +func bytesDecodeFuncOf(flexible bool, tag structTag) decodeFunc { + if flexible { + // In flexible messages, all arrays are compact + return (*decoder).decodeCompactBytes + } + return (*decoder).decodeBytes +} + +func structDecodeFuncOf(typ reflect.Type, version int16, flexible bool) decodeFunc { + type field struct { + decode decodeFunc + index index + tagID int + } + + var fields []field + taggedFields := map[int]*field{} + + if typ == reflect.TypeOf(RecordV0{}) { + return (*decoder).decodeRecordV0 + } + + forEachStructField(typ, func(typ reflect.Type, index index, tag string) { + forEachStructTag(tag, func(tag structTag) bool { + if tag.MinVersion <= version && version <= tag.MaxVersion { + f := field{ + decode: decodeFuncOf(typ, version, flexible, tag), + index: index, + tagID: tag.TagID, + } + + if tag.TagID < -1 { + // Normal required field + fields = append(fields, f) + } else { + // Optional tagged field (flexible messages only) + taggedFields[tag.TagID] = &f + } + return false + } + return true + }) + }) + + return func(d *decoder, v value) { + for i := range fields { + f := &fields[i] + f.decode(d, v.fieldByIndex(f.index)) + } + + if flexible { + // See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details of tag buffers in "flexible" messages. + n := int(d.readUnsignedVarInt()) + + for i := 0; i < n; i++ { + tagID := int(d.readUnsignedVarInt()) + size := int(d.readUnsignedVarInt()) + + f, ok := taggedFields[tagID] + if ok { + f.decode(d, v.fieldByIndex(f.index)) + } else { + d.read(size) + } + } + } + } +} + +func arrayDecodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc { + elemType := typ.Elem() + elemFunc := decodeFuncOf(elemType, version, flexible, tag) + if flexible { + // In flexible messages, all arrays are compact + return func(d *decoder, v value) { d.decodeCompactArray(v, elemType, elemFunc) } + } + + return func(d *decoder, v value) { d.decodeArray(v, elemType, elemFunc) } +} + +func readerDecodeFuncOf(typ reflect.Type) decodeFunc { + typ = reflect.PtrTo(typ) + return func(d *decoder, v value) { + if d.err == nil { + _, err := v.iface(typ).(io.ReaderFrom).ReadFrom(d) + if err != nil { + d.setError(err) + } + } + } +} + +func decodeReadInt8(b []byte) int8 { + return int8(b[0]) +} + +func decodeReadInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(b)) +} + +func decodeReadInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(b)) +} + +func decodeReadInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func Unmarshal(data []byte, version int16, value interface{}) error { + typ := elemTypeOf(value) + cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc) + key := versionedType{typ: typ, version: version} + decode := cache[key] + + if decode == nil { + decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{ + MinVersion: -1, + MaxVersion: -1, + TagID: -2, + Compact: true, + Nullable: true, + }) + + newCache := make(map[versionedType]decodeFunc, len(cache)+1) + newCache[key] = decode + + for typ, fun := range cache { + newCache[typ] = fun + } + + unmarshalers.Store(newCache) + } + + d, _ := decoders.Get().(*decoder) + if d == nil { + d = &decoder{reader: bytes.NewReader(nil)} + } + + d.remain = len(data) + r, _ := d.reader.(*bytes.Reader) + r.Reset(data) + + defer func() { + r.Reset(nil) + d.Reset(r, 0) + decoders.Put(d) + }() + + decode(d, valueOf(value)) + return dontExpectEOF(d.err) +} + +var ( + decoders sync.Pool // *decoder + unmarshalers atomic.Value // map[versionedType]decodeFunc +) diff --git a/tap/extensions/kafka/discard.go b/tap/extensions/kafka/discard.go new file mode 100644 index 000000000..cff70c9b9 --- /dev/null +++ b/tap/extensions/kafka/discard.go @@ -0,0 +1,50 @@ +package main + +import "bufio" + +func discardN(r *bufio.Reader, sz int, n int) (int, error) { + var err error + if n <= sz { + n, err = r.Discard(n) + } else { + n, err = r.Discard(sz) + if err == nil { + err = errShortRead + } + } + return sz - n, err +} + +func discardInt8(r *bufio.Reader, sz int) (int, error) { + return discardN(r, sz, 1) +} + +func discardInt16(r *bufio.Reader, sz int) (int, error) { + return discardN(r, sz, 2) +} + +func discardInt32(r *bufio.Reader, sz int) (int, error) { + return discardN(r, sz, 4) +} + +func discardInt64(r *bufio.Reader, sz int) (int, error) { + return discardN(r, sz, 8) +} + +func discardString(r *bufio.Reader, sz int) (int, error) { + return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { + if n < 0 { + return sz, nil + } + return discardN(r, sz, n) + }) +} + +func discardBytes(r *bufio.Reader, sz int) (int, error) { + return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { + if n < 0 { + return sz, nil + } + return discardN(r, sz, n) + }) +} diff --git a/tap/extensions/kafka/encode.go b/tap/extensions/kafka/encode.go new file mode 100644 index 000000000..10d126994 --- /dev/null +++ b/tap/extensions/kafka/encode.go @@ -0,0 +1,645 @@ +package main + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "reflect" + "sync" + "sync/atomic" +) + +type encoder struct { + writer io.Writer + err error + table *crc32.Table + crc32 uint32 + buffer [32]byte +} + +type encoderChecksum struct { + reader io.Reader + encoder *encoder +} + +func (e *encoderChecksum) Read(b []byte) (int, error) { + n, err := e.reader.Read(b) + if n > 0 { + e.encoder.update(b[:n]) + } + return n, err +} + +func (e *encoder) Reset(w io.Writer) { + e.writer = w + e.err = nil + e.table = nil + e.crc32 = 0 + e.buffer = [32]byte{} +} + +func (e *encoder) ReadFrom(r io.Reader) (int64, error) { + if e.table != nil { + r = &encoderChecksum{ + reader: r, + encoder: e, + } + } + return io.Copy(e.writer, r) +} + +func (e *encoder) Write(b []byte) (int, error) { + if e.err != nil { + return 0, e.err + } + n, err := e.writer.Write(b) + if n > 0 { + e.update(b[:n]) + } + if err != nil { + e.err = err + } + return n, err +} + +func (e *encoder) WriteByte(b byte) error { + e.buffer[0] = b + _, err := e.Write(e.buffer[:1]) + return err +} + +func (e *encoder) WriteString(s string) (int, error) { + // This implementation is an optimization to avoid the heap allocation that + // would occur when converting the string to a []byte to call crc32.Update. + // + // Strings are rarely long in the kafka protocol, so the use of a 32 byte + // buffer is a good comprise between keeping the encoder value small and + // limiting the number of calls to Write. + // + // We introduced this optimization because memory profiles on the benchmarks + // showed that most heap allocations were caused by this code path. + n := 0 + + for len(s) != 0 { + c := copy(e.buffer[:], s) + w, err := e.Write(e.buffer[:c]) + n += w + if err != nil { + return n, err + } + s = s[c:] + } + + return n, nil +} + +func (e *encoder) setCRC(table *crc32.Table) { + e.table, e.crc32 = table, 0 +} + +func (e *encoder) update(b []byte) { + if e.table != nil { + e.crc32 = crc32.Update(e.crc32, e.table, b) + } +} + +func (e *encoder) encodeBool(v value) { + b := int8(0) + if v.bool() { + b = 1 + } + e.writeInt8(b) +} + +func (e *encoder) encodeInt8(v value) { + e.writeInt8(v.int8()) +} + +func (e *encoder) encodeInt16(v value) { + e.writeInt16(v.int16()) +} + +func (e *encoder) encodeInt32(v value) { + e.writeInt32(v.int32()) +} + +func (e *encoder) encodeInt64(v value) { + e.writeInt64(v.int64()) +} + +func (e *encoder) encodeString(v value) { + e.writeString(v.string()) +} + +func (e *encoder) encodeVarString(v value) { + e.writeVarString(v.string()) +} + +func (e *encoder) encodeCompactString(v value) { + e.writeCompactString(v.string()) +} + +func (e *encoder) encodeNullString(v value) { + e.writeNullString(v.string()) +} + +func (e *encoder) encodeVarNullString(v value) { + e.writeVarNullString(v.string()) +} + +func (e *encoder) encodeCompactNullString(v value) { + e.writeCompactNullString(v.string()) +} + +func (e *encoder) encodeBytes(v value) { + e.writeBytes(v.bytes()) +} + +func (e *encoder) encodeVarBytes(v value) { + e.writeVarBytes(v.bytes()) +} + +func (e *encoder) encodeCompactBytes(v value) { + e.writeCompactBytes(v.bytes()) +} + +func (e *encoder) encodeNullBytes(v value) { + e.writeNullBytes(v.bytes()) +} + +func (e *encoder) encodeVarNullBytes(v value) { + e.writeVarNullBytes(v.bytes()) +} + +func (e *encoder) encodeCompactNullBytes(v value) { + e.writeCompactNullBytes(v.bytes()) +} + +func (e *encoder) encodeArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + n := a.length() + e.writeInt32(int32(n)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeCompactArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + n := a.length() + e.writeUnsignedVarInt(uint64(n + 1)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + if a.isNil() { + e.writeInt32(-1) + return + } + + n := a.length() + e.writeInt32(int32(n)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeCompactNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + if a.isNil() { + e.writeUnsignedVarInt(0) + return + } + + n := a.length() + e.writeUnsignedVarInt(uint64(n + 1)) + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) writeInt8(i int8) { + writeInt8(e.buffer[:1], i) + e.Write(e.buffer[:1]) +} + +func (e *encoder) writeInt16(i int16) { + writeInt16(e.buffer[:2], i) + e.Write(e.buffer[:2]) +} + +func (e *encoder) writeInt32(i int32) { + writeInt32(e.buffer[:4], i) + e.Write(e.buffer[:4]) +} + +func (e *encoder) writeInt64(i int64) { + writeInt64(e.buffer[:8], i) + e.Write(e.buffer[:8]) +} + +func (e *encoder) writeString(s string) { + e.writeInt16(int16(len(s))) + e.WriteString(s) +} + +func (e *encoder) writeVarString(s string) { + e.writeVarInt(int64(len(s))) + e.WriteString(s) +} + +func (e *encoder) writeCompactString(s string) { + e.writeUnsignedVarInt(uint64(len(s)) + 1) + e.WriteString(s) +} + +func (e *encoder) writeNullString(s string) { + if s == "" { + e.writeInt16(-1) + } else { + e.writeInt16(int16(len(s))) + e.WriteString(s) + } +} + +func (e *encoder) writeVarNullString(s string) { + if s == "" { + e.writeVarInt(-1) + } else { + e.writeVarInt(int64(len(s))) + e.WriteString(s) + } +} + +func (e *encoder) writeCompactNullString(s string) { + if s == "" { + e.writeUnsignedVarInt(0) + } else { + e.writeUnsignedVarInt(uint64(len(s)) + 1) + e.WriteString(s) + } +} + +func (e *encoder) writeBytes(b []byte) { + e.writeInt32(int32(len(b))) + e.Write(b) +} + +func (e *encoder) writeVarBytes(b []byte) { + e.writeVarInt(int64(len(b))) + e.Write(b) +} + +func (e *encoder) writeCompactBytes(b []byte) { + e.writeUnsignedVarInt(uint64(len(b)) + 1) + e.Write(b) +} + +func (e *encoder) writeNullBytes(b []byte) { + if b == nil { + e.writeInt32(-1) + } else { + e.writeInt32(int32(len(b))) + e.Write(b) + } +} + +func (e *encoder) writeVarNullBytes(b []byte) { + if b == nil { + e.writeVarInt(-1) + } else { + e.writeVarInt(int64(len(b))) + e.Write(b) + } +} + +func (e *encoder) writeCompactNullBytes(b []byte) { + if b == nil { + e.writeUnsignedVarInt(0) + } else { + e.writeUnsignedVarInt(uint64(len(b)) + 1) + e.Write(b) + } +} + +func (e *encoder) writeBytesFrom(b Bytes) error { + size := int64(b.Len()) + e.writeInt32(int32(size)) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err +} + +func (e *encoder) writeNullBytesFrom(b Bytes) error { + if b == nil { + e.writeInt32(-1) + return nil + } else { + size := int64(b.Len()) + e.writeInt32(int32(size)) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err + } +} + +func (e *encoder) writeVarNullBytesFrom(b Bytes) error { + if b == nil { + e.writeVarInt(-1) + return nil + } else { + size := int64(b.Len()) + e.writeVarInt(size) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err + } +} + +func (e *encoder) writeCompactNullBytesFrom(b Bytes) error { + if b == nil { + e.writeUnsignedVarInt(0) + return nil + } else { + size := int64(b.Len()) + e.writeUnsignedVarInt(uint64(size + 1)) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of compact nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err + } +} + +func (e *encoder) writeVarInt(i int64) { + e.writeUnsignedVarInt(uint64((i << 1) ^ (i >> 63))) +} + +func (e *encoder) writeUnsignedVarInt(i uint64) { + b := e.buffer[:] + n := 0 + + for i >= 0x80 && n < len(b) { + b[n] = byte(i) | 0x80 + i >>= 7 + n++ + } + + if n < len(b) { + b[n] = byte(i) + n++ + } + + e.Write(b[:n]) +} + +type encodeFunc func(*encoder, value) + +var ( + _ io.ReaderFrom = (*encoder)(nil) + _ io.Writer = (*encoder)(nil) + _ io.ByteWriter = (*encoder)(nil) + _ io.StringWriter = (*encoder)(nil) + + writerTo = reflect.TypeOf((*io.WriterTo)(nil)).Elem() +) + +func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc { + if reflect.PtrTo(typ).Implements(writerTo) { + return writerEncodeFuncOf(typ) + } + switch typ.Kind() { + case reflect.Bool: + return (*encoder).encodeBool + case reflect.Int8: + return (*encoder).encodeInt8 + case reflect.Int16: + return (*encoder).encodeInt16 + case reflect.Int32: + return (*encoder).encodeInt32 + case reflect.Int64: + return (*encoder).encodeInt64 + case reflect.String: + return stringEncodeFuncOf(flexible, tag) + case reflect.Struct: + return structEncodeFuncOf(typ, version, flexible) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { // []byte + return bytesEncodeFuncOf(flexible, tag) + } + return arrayEncodeFuncOf(typ, version, flexible, tag) + default: + panic("unsupported type: " + typ.String()) + } +} + +func stringEncodeFuncOf(flexible bool, tag structTag) encodeFunc { + switch { + case flexible && tag.Nullable: + // In flexible messages, all strings are compact + return (*encoder).encodeCompactNullString + case flexible: + // In flexible messages, all strings are compact + return (*encoder).encodeCompactString + case tag.Nullable: + return (*encoder).encodeNullString + default: + return (*encoder).encodeString + } +} + +func bytesEncodeFuncOf(flexible bool, tag structTag) encodeFunc { + switch { + case flexible && tag.Nullable: + // In flexible messages, all arrays are compact + return (*encoder).encodeCompactNullBytes + case flexible: + // In flexible messages, all arrays are compact + return (*encoder).encodeCompactBytes + case tag.Nullable: + return (*encoder).encodeNullBytes + default: + return (*encoder).encodeBytes + } +} + +func structEncodeFuncOf(typ reflect.Type, version int16, flexible bool) encodeFunc { + type field struct { + encode encodeFunc + index index + tagID int + } + + var fields []field + var taggedFields []field + + forEachStructField(typ, func(typ reflect.Type, index index, tag string) { + if typ.Size() != 0 { // skip struct{} + forEachStructTag(tag, func(tag structTag) bool { + if tag.MinVersion <= version && version <= tag.MaxVersion { + f := field{ + encode: encodeFuncOf(typ, version, flexible, tag), + index: index, + tagID: tag.TagID, + } + + if tag.TagID < -1 { + // Normal required field + fields = append(fields, f) + } else { + // Optional tagged field (flexible messages only) + taggedFields = append(taggedFields, f) + } + return false + } + return true + }) + } + }) + + return func(e *encoder, v value) { + for i := range fields { + f := &fields[i] + f.encode(e, v.fieldByIndex(f.index)) + } + + if flexible { + // See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details of tag buffers in "flexible" messages. + e.writeUnsignedVarInt(uint64(len(taggedFields))) + + for i := range taggedFields { + f := &taggedFields[i] + e.writeUnsignedVarInt(uint64(f.tagID)) + + buf := &bytes.Buffer{} + se := &encoder{writer: buf} + f.encode(se, v.fieldByIndex(f.index)) + e.writeUnsignedVarInt(uint64(buf.Len())) + e.Write(buf.Bytes()) + } + } + } +} + +func arrayEncodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc { + elemType := typ.Elem() + elemFunc := encodeFuncOf(elemType, version, flexible, tag) + switch { + case flexible && tag.Nullable: + // In flexible messages, all arrays are compact + return func(e *encoder, v value) { e.encodeCompactNullArray(v, elemType, elemFunc) } + case flexible: + // In flexible messages, all arrays are compact + return func(e *encoder, v value) { e.encodeCompactArray(v, elemType, elemFunc) } + case tag.Nullable: + return func(e *encoder, v value) { e.encodeNullArray(v, elemType, elemFunc) } + default: + return func(e *encoder, v value) { e.encodeArray(v, elemType, elemFunc) } + } +} + +func writerEncodeFuncOf(typ reflect.Type) encodeFunc { + typ = reflect.PtrTo(typ) + return func(e *encoder, v value) { + // Optimization to write directly into the buffer when the encoder + // does no need to compute a crc32 checksum. + w := io.Writer(e) + if e.table == nil { + w = e.writer + } + _, err := v.iface(typ).(io.WriterTo).WriteTo(w) + if err != nil { + e.err = err + } + } +} + +func writeInt8(b []byte, i int8) { + b[0] = byte(i) +} + +func writeInt16(b []byte, i int16) { + binary.BigEndian.PutUint16(b, uint16(i)) +} + +func writeInt32(b []byte, i int32) { + binary.BigEndian.PutUint32(b, uint32(i)) +} + +func writeInt64(b []byte, i int64) { + binary.BigEndian.PutUint64(b, uint64(i)) +} + +func Marshal(version int16, value interface{}) ([]byte, error) { + typ := typeOf(value) + cache, _ := marshalers.Load().(map[versionedType]encodeFunc) + key := versionedType{typ: typ, version: version} + encode := cache[key] + + if encode == nil { + encode = encodeFuncOf(reflect.TypeOf(value), version, false, structTag{ + MinVersion: -1, + MaxVersion: -1, + TagID: -2, + Compact: true, + Nullable: true, + }) + + newCache := make(map[versionedType]encodeFunc, len(cache)+1) + newCache[key] = encode + + for typ, fun := range cache { + newCache[typ] = fun + } + + marshalers.Store(newCache) + } + + e, _ := encoders.Get().(*encoder) + if e == nil { + e = &encoder{writer: new(bytes.Buffer)} + } + + b, _ := e.writer.(*bytes.Buffer) + defer func() { + b.Reset() + e.Reset(b) + encoders.Put(e) + }() + + encode(e, nonAddressableValueOf(value)) + + if e.err != nil { + return nil, e.err + } + + buf := b.Bytes() + out := make([]byte, len(buf)) + copy(out, buf) + return out, nil +} + +type versionedType struct { + typ _type + version int16 +} + +var ( + encoders sync.Pool // *encoder + marshalers atomic.Value // map[versionedType]encodeFunc +) diff --git a/tap/extensions/kafka/error.go b/tap/extensions/kafka/error.go new file mode 100644 index 000000000..b5f53d8fb --- /dev/null +++ b/tap/extensions/kafka/error.go @@ -0,0 +1,91 @@ +package main + +import ( + "fmt" +) + +// Error represents client-side protocol errors. +type Error string + +func (e Error) Error() string { return string(e) } + +func Errorf(msg string, args ...interface{}) Error { + return Error(fmt.Sprintf(msg, args...)) +} + +const ( + // ErrNoTopic is returned when a request needs to be sent to a specific + ErrNoTopic Error = "topic not found" + + // ErrNoPartition is returned when a request needs to be sent to a specific + // partition, but the client did not find it in the cluster metadata. + ErrNoPartition Error = "topic partition not found" + + // ErrNoLeader is returned when a request needs to be sent to a partition + // leader, but the client could not determine what the leader was at this + // time. + ErrNoLeader Error = "topic partition has no leader" + + // ErrNoRecord is returned when attempting to write a message containing an + // empty record set (which kafka forbids). + // + // We handle this case client-side because kafka will close the connection + // that it received an empty produce request on, causing all concurrent + // requests to be aborted. + ErrNoRecord Error = "record set contains no records" + + // ErrNoReset is returned by ResetRecordReader when the record reader does + // not support being reset. + ErrNoReset Error = "record sequence does not support reset" +) + +type TopicError struct { + Topic string + Err error +} + +func NewTopicError(topic string, err error) *TopicError { + return &TopicError{Topic: topic, Err: err} +} + +func NewErrNoTopic(topic string) *TopicError { + return NewTopicError(topic, ErrNoTopic) +} + +func (e *TopicError) Error() string { + return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic) +} + +func (e *TopicError) Unwrap() error { + return e.Err +} + +type TopicPartitionError struct { + Topic string + Partition int32 + Err error +} + +func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError { + return &TopicPartitionError{ + Topic: topic, + Partition: partition, + Err: err, + } +} + +func NewErrNoPartition(topic string, partition int32) *TopicPartitionError { + return NewTopicPartitionError(topic, partition, ErrNoPartition) +} + +func NewErrNoLeader(topic string, partition int32) *TopicPartitionError { + return NewTopicPartitionError(topic, partition, ErrNoLeader) +} + +func (e *TopicPartitionError) Error() string { + return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition) +} + +func (e *TopicPartitionError) Unwrap() error { + return e.Err +} diff --git a/tap/extensions/kafka/go.mod b/tap/extensions/kafka/go.mod new file mode 100644 index 000000000..113627f94 --- /dev/null +++ b/tap/extensions/kafka/go.mod @@ -0,0 +1,10 @@ +module github.com/up9inc/mizu/tap/extensions/kafka + +go 1.16 + +require ( + github.com/segmentio/kafka-go v0.4.17 + github.com/up9inc/mizu/tap/api v0.0.0 +) + +replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api diff --git a/tap/extensions/kafka/go.sum b/tap/extensions/kafka/go.sum new file mode 100644 index 000000000..32124810d --- /dev/null +++ b/tap/extensions/kafka/go.sum @@ -0,0 +1,35 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= +github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/segmentio/kafka-go v0.4.17 h1:IyqRstL9KUTDb3kyGPOOa5VffokKWSEzN6geJ92dSDY= +github.com/segmentio/kafka-go v0.4.17/go.mod h1:19+Eg7KwrNKy/PFhiIthEPkO8k+ac7/ZYXwYM9Df10w= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tap/extensions/kafka/helpers.go b/tap/extensions/kafka/helpers.go new file mode 100644 index 000000000..d5867c195 --- /dev/null +++ b/tap/extensions/kafka/helpers.go @@ -0,0 +1,648 @@ +package main + +import ( + "encoding/json" + "fmt" + "strconv" +) + +type KafkaPayload struct { + Data interface{} +} + +type KafkaPayloader interface { + MarshalJSON() ([]byte, error) +} + +func (h KafkaPayload) MarshalJSON() ([]byte, error) { + return json.Marshal(h.Data) +} + +type KafkaWrapper struct { + Method string `json:"method"` + Url string `json:"url"` + Details interface{} `json:"details"` +} + +func representRequestHeader(data map[string]interface{}, rep []interface{}) []interface{} { + requestHeader, _ := json.Marshal([]map[string]string{ + { + "name": "ApiKey", + "value": apiNames[int(data["ApiKey"].(float64))], + }, + { + "name": "ApiVersion", + "value": fmt.Sprintf("%d", int(data["ApiVersion"].(float64))), + }, + { + "name": "Client ID", + "value": data["ClientID"].(string), + }, + { + "name": "Correlation ID", + "value": fmt.Sprintf("%d", int(data["CorrelationID"].(float64))), + }, + { + "name": "Size", + "value": fmt.Sprintf("%d", int(data["Size"].(float64))), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Request Header", + "data": string(requestHeader), + }) + + return rep +} + +func representResponseHeader(data map[string]interface{}, rep []interface{}) []interface{} { + requestHeader, _ := json.Marshal([]map[string]string{ + { + "name": "Correlation ID", + "value": fmt.Sprintf("%d", int(data["CorrelationID"].(float64))), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Response Header", + "data": string(requestHeader), + }) + + return rep +} + +func representMetadataRequest(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representRequestHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics := "" + allowAutoTopicCreation := "" + includeClusterAuthorizedOperations := "" + includeTopicAuthorizedOperations := "" + if payload["Topics"] != nil { + x, _ := json.Marshal(payload["Topics"].([]interface{})) + topics = string(x) + } + if payload["AllowAutoTopicCreation"] != nil { + allowAutoTopicCreation = strconv.FormatBool(payload["AllowAutoTopicCreation"].(bool)) + } + if payload["IncludeClusterAuthorizedOperations"] != nil { + includeClusterAuthorizedOperations = strconv.FormatBool(payload["IncludeClusterAuthorizedOperations"].(bool)) + } + if payload["IncludeTopicAuthorizedOperations"] != nil { + includeTopicAuthorizedOperations = strconv.FormatBool(payload["IncludeTopicAuthorizedOperations"].(bool)) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Topics", + "value": topics, + }, + { + "name": "Allow Auto Topic Creation", + "value": allowAutoTopicCreation, + }, + { + "name": "Include Cluster Authorized Operations", + "value": includeClusterAuthorizedOperations, + }, + { + "name": "Include Topic Authorized Operations", + "value": includeTopicAuthorizedOperations, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representMetadataResponse(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representResponseHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics, _ := json.Marshal(payload["Topics"].([]interface{})) + brokers, _ := json.Marshal(payload["Brokers"].([]interface{})) + controllerID := "" + clusterID := "" + throttleTimeMs := "" + clusterAuthorizedOperations := "" + if payload["ControllerID"] != nil { + controllerID = fmt.Sprintf("%d", int(payload["ControllerID"].(float64))) + } + if payload["ClusterID"] != nil { + clusterID = payload["ClusterID"].(string) + } + if payload["ThrottleTimeMs"] != nil { + throttleTimeMs = fmt.Sprintf("%d", int(payload["ThrottleTimeMs"].(float64))) + } + if payload["ClusterAuthorizedOperations"] != nil { + clusterAuthorizedOperations = fmt.Sprintf("%d", int(payload["ClusterAuthorizedOperations"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Throttle Time (ms)", + "value": throttleTimeMs, + }, + { + "name": "Brokers", + "value": string(brokers), + }, + { + "name": "Cluster ID", + "value": clusterID, + }, + { + "name": "Controller ID", + "value": controllerID, + }, + { + "name": "Topics", + "value": string(topics), + }, + { + "name": "Cluster Authorized Operations", + "value": clusterAuthorizedOperations, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representApiVersionsRequest(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representRequestHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + clientSoftwareName := "" + clientSoftwareVersion := "" + if payload["ClientSoftwareName"] != nil { + clientSoftwareName = payload["ClientSoftwareName"].(string) + } + if payload["ClientSoftwareVersion"] != nil { + clientSoftwareVersion = payload["ClientSoftwareVersion"].(string) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Client Software Name", + "value": clientSoftwareName, + }, + { + "name": "Client Software Version", + "value": clientSoftwareVersion, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representApiVersionsResponse(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representResponseHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + apiKeys := "" + if payload["TopicNames"] != nil { + x, _ := json.Marshal(payload["ApiKeys"].([]interface{})) + apiKeys = string(x) + } + throttleTimeMs := "" + if payload["ThrottleTimeMs"] != nil { + throttleTimeMs = fmt.Sprintf("%d", int(payload["ThrottleTimeMs"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Error Code", + "value": fmt.Sprintf("%d", int(payload["ErrorCode"].(float64))), + }, + { + "name": "ApiKeys", + "value": apiKeys, + }, + { + "name": "Throttle Time (ms)", + "value": throttleTimeMs, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representProduceRequest(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representRequestHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topicData := "" + _topicData := payload["TopicData"] + if _topicData != nil { + x, _ := json.Marshal(_topicData.([]interface{})) + topicData = string(x) + } + transactionalID := "" + if payload["TransactionalID"] != nil { + transactionalID = payload["TransactionalID"].(string) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Transactional ID", + "value": transactionalID, + }, + { + "name": "Required Acknowledgements", + "value": fmt.Sprintf("%d", int(payload["RequiredAcks"].(float64))), + }, + { + "name": "Timeout", + "value": fmt.Sprintf("%d", int(payload["Timeout"].(float64))), + }, + { + "name": "Topic Data", + "value": topicData, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representProduceResponse(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representResponseHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + responses, _ := json.Marshal(payload["Responses"].([]interface{})) + throttleTimeMs := "" + if payload["ThrottleTimeMs"] != nil { + throttleTimeMs = fmt.Sprintf("%d", int(payload["ThrottleTimeMs"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Responses", + "value": string(responses), + }, + { + "name": "Throttle Time (ms)", + "value": throttleTimeMs, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representFetchRequest(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representRequestHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics, _ := json.Marshal(payload["Topics"].([]interface{})) + replicaId := "" + if payload["ReplicaId"] != nil { + replicaId = fmt.Sprintf("%d", int(payload["ReplicaId"].(float64))) + } + maxBytes := "" + if payload["MaxBytes"] != nil { + maxBytes = fmt.Sprintf("%d", int(payload["MaxBytes"].(float64))) + } + isolationLevel := "" + if payload["IsolationLevel"] != nil { + isolationLevel = fmt.Sprintf("%d", int(payload["IsolationLevel"].(float64))) + } + sessionId := "" + if payload["SessionId"] != nil { + sessionId = fmt.Sprintf("%d", int(payload["SessionId"].(float64))) + } + sessionEpoch := "" + if payload["SessionEpoch"] != nil { + sessionEpoch = fmt.Sprintf("%d", int(payload["SessionEpoch"].(float64))) + } + forgottenTopicsData := "" + if payload["ForgottenTopicsData"] != nil { + x, _ := json.Marshal(payload["ForgottenTopicsData"].(map[string]interface{})) + forgottenTopicsData = string(x) + } + rackId := "" + if payload["RackId"] != nil { + rackId = fmt.Sprintf("%d", int(payload["RackId"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Replica ID", + "value": replicaId, + }, + { + "name": "Maximum Wait (ms)", + "value": fmt.Sprintf("%d", int(payload["MaxWaitMs"].(float64))), + }, + { + "name": "Minimum Bytes", + "value": fmt.Sprintf("%d", int(payload["MinBytes"].(float64))), + }, + { + "name": "Maximum Bytes", + "value": maxBytes, + }, + { + "name": "Isolation Level", + "value": isolationLevel, + }, + { + "name": "Session ID", + "value": sessionId, + }, + { + "name": "Session Epoch", + "value": sessionEpoch, + }, + { + "name": "Topics", + "value": string(topics), + }, + { + "name": "Forgotten Topics Data", + "value": forgottenTopicsData, + }, + { + "name": "Rack ID", + "value": rackId, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representFetchResponse(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representResponseHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + responses, _ := json.Marshal(payload["Responses"].([]interface{})) + throttleTimeMs := "" + if payload["ThrottleTimeMs"] != nil { + throttleTimeMs = fmt.Sprintf("%d", int(payload["ThrottleTimeMs"].(float64))) + } + errorCode := "" + if payload["ErrorCode"] != nil { + errorCode = fmt.Sprintf("%d", int(payload["ErrorCode"].(float64))) + } + sessionId := "" + if payload["SessionId"] != nil { + sessionId = fmt.Sprintf("%d", int(payload["SessionId"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Throttle Time (ms)", + "value": throttleTimeMs, + }, + { + "name": "Error Code", + "value": errorCode, + }, + { + "name": "Session ID", + "value": sessionId, + }, + { + "name": "Responses", + "value": string(responses), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representListOffsetsRequest(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representRequestHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics, _ := json.Marshal(payload["Topics"].([]interface{})) + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Replica ID", + "value": fmt.Sprintf("%d", int(payload["ReplicaId"].(float64))), + }, + { + "name": "Topics", + "value": string(topics), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representListOffsetsResponse(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representResponseHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics, _ := json.Marshal(payload["Topics"].([]interface{})) + throttleTimeMs := "" + if payload["ThrottleTimeMs"] != nil { + throttleTimeMs = fmt.Sprintf("%d", int(payload["ThrottleTimeMs"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Throttle Time (ms)", + "value": throttleTimeMs, + }, + { + "name": "Topics", + "value": string(topics), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representCreateTopicsRequest(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representRequestHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics, _ := json.Marshal(payload["Topics"].([]interface{})) + validateOnly := "" + if payload["ValidateOnly"] != nil { + validateOnly = strconv.FormatBool(payload["ValidateOnly"].(bool)) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Topics", + "value": string(topics), + }, + { + "name": "Timeout (ms)", + "value": fmt.Sprintf("%d", int(payload["TimeoutMs"].(float64))), + }, + { + "name": "Validate Only", + "value": validateOnly, + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representCreateTopicsResponse(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representResponseHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics, _ := json.Marshal(payload["Topics"].([]interface{})) + throttleTimeMs := "" + if payload["ThrottleTimeMs"] != nil { + throttleTimeMs = fmt.Sprintf("%d", int(payload["ThrottleTimeMs"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Throttle Time (ms)", + "value": throttleTimeMs, + }, + { + "name": "Topics", + "value": string(topics), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representDeleteTopicsRequest(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representRequestHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + topics := "" + if payload["Topics"] != nil { + x, _ := json.Marshal(payload["Topics"].([]interface{})) + topics = string(x) + } + topicNames := "" + if payload["TopicNames"] != nil { + x, _ := json.Marshal(payload["TopicNames"].([]interface{})) + topicNames = string(x) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "TopicNames", + "value": string(topicNames), + }, + { + "name": "Topics", + "value": string(topics), + }, + { + "name": "Timeout (ms)", + "value": fmt.Sprintf("%d", int(payload["TimeoutMs"].(float64))), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} + +func representDeleteTopicsResponse(data map[string]interface{}) []interface{} { + rep := make([]interface{}, 0) + + rep = representResponseHeader(data, rep) + + payload := data["Payload"].(map[string]interface{}) + responses, _ := json.Marshal(payload["Responses"].([]interface{})) + throttleTimeMs := "" + if payload["ThrottleTimeMs"] != nil { + throttleTimeMs = fmt.Sprintf("%d", int(payload["ThrottleTimeMs"].(float64))) + } + repPayload, _ := json.Marshal([]map[string]string{ + { + "name": "Throttle Time (ms)", + "value": throttleTimeMs, + }, + { + "name": "Responses", + "value": string(responses), + }, + }) + rep = append(rep, map[string]string{ + "type": "table", + "title": "Payload", + "data": string(repPayload), + }) + + return rep +} diff --git a/tap/extensions/kafka/main.go b/tap/extensions/kafka/main.go new file mode 100644 index 000000000..46b9b710b --- /dev/null +++ b/tap/extensions/kafka/main.go @@ -0,0 +1,231 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "log" + + "github.com/up9inc/mizu/tap/api" +) + +var _protocol api.Protocol = api.Protocol{ + Name: "kafka", + LongName: "Apache Kafka Protocol", + Abbreviation: "KAFKA", + Version: "12", + BackgroundColor: "#000000", + ForegroundColor: "#ffffff", + FontSize: 11, + ReferenceLink: "https://kafka.apache.org/protocol", + Ports: []string{"9092"}, + Priority: 2, +} + +func init() { + log.Println("Initializing Kafka extension...") +} + +type dissecting string + +func (d dissecting) Register(extension *api.Extension) { + extension.Protocol = _protocol + extension.MatcherMap = reqResMatcher.openMessagesMap +} + +func (d dissecting) Ping() { + log.Printf("pong %s\n", _protocol.Name) +} + +func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, emitter api.Emitter) error { + for { + if isClient { + _, _, err := ReadRequest(b, tcpID) + if err != nil { + return err + } + } else { + err := ReadResponse(b, tcpID, emitter) + if err != nil { + return err + } + } + } +} + +func (d dissecting) Analyze(item *api.OutputChannelItem, entryId string, resolvedSource string, resolvedDestination string) *api.MizuEntry { + request := item.Pair.Request.Payload.(map[string]interface{}) + reqDetails := request["details"].(map[string]interface{}) + service := "kafka" + if resolvedDestination != "" { + service = resolvedDestination + } else if resolvedSource != "" { + service = resolvedSource + } + apiKey := ApiKey(reqDetails["ApiKey"].(float64)) + + summary := "" + switch apiKey { + case Metadata: + _topics := reqDetails["Payload"].(map[string]interface{})["Topics"] + if _topics == nil { + break + } + topics := _topics.([]interface{}) + for _, topic := range topics { + summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["Name"].(string)) + } + if len(summary) > 0 { + summary = summary[:len(summary)-2] + } + break + case ApiVersions: + summary = reqDetails["ClientID"].(string) + break + case Produce: + _topics := reqDetails["Payload"].(map[string]interface{})["TopicData"] + if _topics == nil { + break + } + topics := _topics.([]interface{}) + for _, topic := range topics { + summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["Topic"].(string)) + } + if len(summary) > 0 { + summary = summary[:len(summary)-2] + } + break + case Fetch: + topics := reqDetails["Payload"].(map[string]interface{})["Topics"].([]interface{}) + for _, topic := range topics { + summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["Topic"].(string)) + } + if len(summary) > 0 { + summary = summary[:len(summary)-2] + } + break + case ListOffsets: + topics := reqDetails["Payload"].(map[string]interface{})["Topics"].([]interface{}) + for _, topic := range topics { + summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["Name"].(string)) + } + if len(summary) > 0 { + summary = summary[:len(summary)-2] + } + break + case CreateTopics: + topics := reqDetails["Payload"].(map[string]interface{})["Topics"].([]interface{}) + for _, topic := range topics { + summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["Name"].(string)) + } + if len(summary) > 0 { + summary = summary[:len(summary)-2] + } + break + case DeleteTopics: + topicNames := reqDetails["TopicNames"].([]string) + for _, name := range topicNames { + summary += fmt.Sprintf("%s, ", name) + } + break + } + + request["url"] = summary + entryBytes, _ := json.Marshal(item.Pair) + return &api.MizuEntry{ + ProtocolName: _protocol.Name, + ProtocolVersion: _protocol.Version, + EntryId: entryId, + Entry: string(entryBytes), + Url: fmt.Sprintf("%s%s", service, summary), + Method: apiNames[apiKey], + Status: 0, + RequestSenderIp: item.ConnectionInfo.ClientIP, + Service: service, + Timestamp: item.Timestamp, + Path: summary, + ResolvedSource: resolvedSource, + ResolvedDestination: resolvedDestination, + SourceIp: item.ConnectionInfo.ClientIP, + DestinationIp: item.ConnectionInfo.ServerIP, + SourcePort: item.ConnectionInfo.ClientPort, + DestinationPort: item.ConnectionInfo.ServerPort, + IsOutgoing: item.ConnectionInfo.IsOutgoing, + } +} + +func (d dissecting) Summarize(entry *api.MizuEntry) *api.BaseEntryDetails { + return &api.BaseEntryDetails{ + Id: entry.EntryId, + Protocol: _protocol, + Url: entry.Url, + RequestSenderIp: entry.RequestSenderIp, + Service: entry.Service, + Summary: entry.Path, + StatusCode: entry.Status, + Method: entry.Method, + Timestamp: entry.Timestamp, + SourceIp: entry.SourceIp, + DestinationIp: entry.DestinationIp, + SourcePort: entry.SourcePort, + DestinationPort: entry.DestinationPort, + IsOutgoing: entry.IsOutgoing, + Latency: 0, + Rules: api.ApplicableRules{ + Latency: 0, + Status: false, + }, + } +} + +func (d dissecting) Represent(entry *api.MizuEntry) (api.Protocol, []byte, error) { + var root map[string]interface{} + json.Unmarshal([]byte(entry.Entry), &root) + representation := make(map[string]interface{}, 0) + request := root["request"].(map[string]interface{})["payload"].(map[string]interface{}) + response := root["response"].(map[string]interface{})["payload"].(map[string]interface{}) + reqDetails := request["details"].(map[string]interface{}) + resDetails := response["details"].(map[string]interface{}) + + apiKey := ApiKey(reqDetails["ApiKey"].(float64)) + + var repRequest []interface{} + var repResponse []interface{} + switch apiKey { + case Metadata: + repRequest = representMetadataRequest(reqDetails) + repResponse = representMetadataResponse(resDetails) + break + case ApiVersions: + repRequest = representApiVersionsRequest(reqDetails) + repResponse = representApiVersionsResponse(resDetails) + break + case Produce: + repRequest = representProduceRequest(reqDetails) + repResponse = representProduceResponse(resDetails) + break + case Fetch: + repRequest = representFetchRequest(reqDetails) + repResponse = representFetchResponse(resDetails) + break + case ListOffsets: + repRequest = representListOffsetsRequest(reqDetails) + repResponse = representListOffsetsResponse(resDetails) + break + case CreateTopics: + repRequest = representCreateTopicsRequest(reqDetails) + repResponse = representCreateTopicsResponse(resDetails) + break + case DeleteTopics: + repRequest = representDeleteTopicsRequest(reqDetails) + repResponse = representDeleteTopicsResponse(resDetails) + break + } + + representation["request"] = repRequest + representation["response"] = repResponse + object, err := json.Marshal(representation) + return _protocol, object, err +} + +var Dissector dissecting diff --git a/tap/extensions/kafka/matcher.go b/tap/extensions/kafka/matcher.go new file mode 100644 index 000000000..4b150f28b --- /dev/null +++ b/tap/extensions/kafka/matcher.go @@ -0,0 +1,58 @@ +package main + +import ( + "sync" + "time" +) + +var reqResMatcher = CreateResponseRequestMatcher() // global +const maxTry int = 3000 + +type RequestResponsePair struct { + Request Request + Response Response +} + +// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}::{correlation_id} +type requestResponseMatcher struct { + openMessagesMap *sync.Map +} + +func CreateResponseRequestMatcher() requestResponseMatcher { + newMatcher := &requestResponseMatcher{openMessagesMap: &sync.Map{}} + return *newMatcher +} + +func (matcher *requestResponseMatcher) registerRequest(key string, request *Request) *RequestResponsePair { + if response, found := matcher.openMessagesMap.LoadAndDelete(key); found { + // Check for a situation that only occurs when a Kafka broker is initiating + switch response.(type) { + case *Response: + return matcher.preparePair(request, response.(*Response)) + } + } + + matcher.openMessagesMap.Store(key, request) + return nil +} + +func (matcher *requestResponseMatcher) registerResponse(key string, response *Response) *RequestResponsePair { + try := 0 + for { + try++ + if try > maxTry { + return nil + } + if request, found := matcher.openMessagesMap.LoadAndDelete(key); found { + return matcher.preparePair(request.(*Request), response) + } + time.Sleep(1 * time.Millisecond) + } +} + +func (matcher *requestResponseMatcher) preparePair(request *Request, response *Response) *RequestResponsePair { + return &RequestResponsePair{ + Request: *request, + Response: *response, + } +} diff --git a/tap/extensions/kafka/protocol.go b/tap/extensions/kafka/protocol.go new file mode 100644 index 000000000..ec8af3298 --- /dev/null +++ b/tap/extensions/kafka/protocol.go @@ -0,0 +1,480 @@ +package main + +import ( + "fmt" + "io" + "net" + "reflect" + "strconv" + "strings" +) + +// Message is an interface implemented by all request and response types of the +// kafka protocol. +// +// This interface is used mostly as a safe-guard to provide a compile-time check +// for values passed to functions dealing kafka message types. +type Message interface { + ApiKey() ApiKey +} + +type ApiKey int16 + +func (k ApiKey) String() string { + if i := int(k); i >= 0 && i < len(apiNames) { + return apiNames[i] + } + return strconv.Itoa(int(k)) +} + +func (k ApiKey) MinVersion() int16 { return k.apiType().minVersion() } + +func (k ApiKey) MaxVersion() int16 { return k.apiType().maxVersion() } + +func (k ApiKey) SelectVersion(minVersion, maxVersion int16) int16 { + min := k.MinVersion() + max := k.MaxVersion() + switch { + case min > maxVersion: + return min + case max < maxVersion: + return max + default: + return maxVersion + } +} + +func (k ApiKey) apiType() apiType { + if i := int(k); i >= 0 && i < len(apiTypes) { + return apiTypes[i] + } + return apiType{} +} + +const ( + Produce ApiKey = 0 + Fetch ApiKey = 1 + ListOffsets ApiKey = 2 + Metadata ApiKey = 3 + LeaderAndIsr ApiKey = 4 + StopReplica ApiKey = 5 + UpdateMetadata ApiKey = 6 + ControlledShutdown ApiKey = 7 + OffsetCommit ApiKey = 8 + OffsetFetch ApiKey = 9 + FindCoordinator ApiKey = 10 + JoinGroup ApiKey = 11 + Heartbeat ApiKey = 12 + LeaveGroup ApiKey = 13 + SyncGroup ApiKey = 14 + DescribeGroups ApiKey = 15 + ListGroups ApiKey = 16 + SaslHandshake ApiKey = 17 + ApiVersions ApiKey = 18 + CreateTopics ApiKey = 19 + DeleteTopics ApiKey = 20 + DeleteRecords ApiKey = 21 + InitProducerId ApiKey = 22 + OffsetForLeaderEpoch ApiKey = 23 + AddPartitionsToTxn ApiKey = 24 + AddOffsetsToTxn ApiKey = 25 + EndTxn ApiKey = 26 + WriteTxnMarkers ApiKey = 27 + TxnOffsetCommit ApiKey = 28 + DescribeAcls ApiKey = 29 + CreateAcls ApiKey = 30 + DeleteAcls ApiKey = 31 + DescribeConfigs ApiKey = 32 + AlterConfigs ApiKey = 33 + AlterReplicaLogDirs ApiKey = 34 + DescribeLogDirs ApiKey = 35 + SaslAuthenticate ApiKey = 36 + CreatePartitions ApiKey = 37 + CreateDelegationToken ApiKey = 38 + RenewDelegationToken ApiKey = 39 + ExpireDelegationToken ApiKey = 40 + DescribeDelegationToken ApiKey = 41 + DeleteGroups ApiKey = 42 + ElectLeaders ApiKey = 43 + IncrementalAlterConfigs ApiKey = 44 + AlterPartitionReassignments ApiKey = 45 + ListPartitionReassignments ApiKey = 46 + OffsetDelete ApiKey = 47 + DescribeClientQuotas ApiKey = 48 + AlterClientQuotas ApiKey = 49 + + numApis = 50 +) + +var apiNames = [numApis]string{ + Produce: "Produce", + Fetch: "Fetch", + ListOffsets: "ListOffsets", + Metadata: "Metadata", + LeaderAndIsr: "LeaderAndIsr", + StopReplica: "StopReplica", + UpdateMetadata: "UpdateMetadata", + ControlledShutdown: "ControlledShutdown", + OffsetCommit: "OffsetCommit", + OffsetFetch: "OffsetFetch", + FindCoordinator: "FindCoordinator", + JoinGroup: "JoinGroup", + Heartbeat: "Heartbeat", + LeaveGroup: "LeaveGroup", + SyncGroup: "SyncGroup", + DescribeGroups: "DescribeGroups", + ListGroups: "ListGroups", + SaslHandshake: "SaslHandshake", + ApiVersions: "ApiVersions", + CreateTopics: "CreateTopics", + DeleteTopics: "DeleteTopics", + DeleteRecords: "DeleteRecords", + InitProducerId: "InitProducerId", + OffsetForLeaderEpoch: "OffsetForLeaderEpoch", + AddPartitionsToTxn: "AddPartitionsToTxn", + AddOffsetsToTxn: "AddOffsetsToTxn", + EndTxn: "EndTxn", + WriteTxnMarkers: "WriteTxnMarkers", + TxnOffsetCommit: "TxnOffsetCommit", + DescribeAcls: "DescribeAcls", + CreateAcls: "CreateAcls", + DeleteAcls: "DeleteAcls", + DescribeConfigs: "DescribeConfigs", + AlterConfigs: "AlterConfigs", + AlterReplicaLogDirs: "AlterReplicaLogDirs", + DescribeLogDirs: "DescribeLogDirs", + SaslAuthenticate: "SaslAuthenticate", + CreatePartitions: "CreatePartitions", + CreateDelegationToken: "CreateDelegationToken", + RenewDelegationToken: "RenewDelegationToken", + ExpireDelegationToken: "ExpireDelegationToken", + DescribeDelegationToken: "DescribeDelegationToken", + DeleteGroups: "DeleteGroups", + ElectLeaders: "ElectLeaders", + IncrementalAlterConfigs: "IncrementalAlterConfigs", + AlterPartitionReassignments: "AlterPartitionReassignments", + ListPartitionReassignments: "ListPartitionReassignments", + OffsetDelete: "OffsetDelete", + DescribeClientQuotas: "DescribeClientQuotas", + AlterClientQuotas: "AlterClientQuotas", +} + +type messageType struct { + version int16 + flexible bool + gotype reflect.Type + decode decodeFunc + encode encodeFunc +} + +func (t *messageType) new() Message { + return reflect.New(t.gotype).Interface().(Message) +} + +type apiType struct { + requests []messageType + responses []messageType +} + +func (t apiType) minVersion() int16 { + if len(t.requests) == 0 { + return 0 + } + return t.requests[0].version +} + +func (t apiType) maxVersion() int16 { + if len(t.requests) == 0 { + return 0 + } + return t.requests[len(t.requests)-1].version +} + +var apiTypes [numApis]apiType + +// Register is automatically called by sub-packages are imported to install a +// new pair of request/response message types. +func Register(req, res Message) { + k1 := req.ApiKey() + k2 := res.ApiKey() + + if k1 != k2 { + panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2)) + } + + apiTypes[k1] = apiType{ + requests: typesOf(req), + responses: typesOf(res), + } +} + +func typesOf(v interface{}) []messageType { + return makeTypes(reflect.TypeOf(v).Elem()) +} + +func makeTypes(t reflect.Type) []messageType { + minVersion := int16(-1) + maxVersion := int16(-1) + + // All future versions will be flexible (according to spec), so don't need to + // worry about maxes here. + minFlexibleVersion := int16(-1) + + forEachStructField(t, func(_ reflect.Type, _ index, tag string) { + forEachStructTag(tag, func(tag structTag) bool { + if minVersion < 0 || tag.MinVersion < minVersion { + minVersion = tag.MinVersion + } + if maxVersion < 0 || tag.MaxVersion > maxVersion { + maxVersion = tag.MaxVersion + } + if tag.TagID > -2 && (minFlexibleVersion < 0 || tag.MinVersion < minFlexibleVersion) { + minFlexibleVersion = tag.MinVersion + } + return true + }) + }) + + types := make([]messageType, 0, (maxVersion-minVersion)+1) + + for v := minVersion; v <= maxVersion; v++ { + flexible := minFlexibleVersion >= 0 && v >= minFlexibleVersion + + types = append(types, messageType{ + version: v, + gotype: t, + flexible: flexible, + decode: decodeFuncOf(t, v, flexible, structTag{}), + encode: encodeFuncOf(t, v, flexible, structTag{}), + }) + } + + return types +} + +type structTag struct { + MinVersion int16 + MaxVersion int16 + Compact bool + Nullable bool + TagID int +} + +func forEachStructTag(tag string, do func(structTag) bool) { + if tag == "-" { + return // special case to ignore the field + } + + forEach(tag, '|', func(s string) bool { + tag := structTag{ + MinVersion: -1, + MaxVersion: -1, + + // Legitimate tag IDs can start at 0. We use -1 as a placeholder to indicate + // that the message type is flexible, so that leaves -2 as the default for + // indicating that there is no tag ID and the message is not flexible. + TagID: -2, + } + + var err error + forEach(s, ',', func(s string) bool { + switch { + case strings.HasPrefix(s, "min="): + tag.MinVersion, err = parseVersion(s[4:]) + case strings.HasPrefix(s, "max="): + tag.MaxVersion, err = parseVersion(s[4:]) + case s == "tag": + tag.TagID = -1 + case strings.HasPrefix(s, "tag="): + tag.TagID, err = strconv.Atoi(s[4:]) + case s == "compact": + tag.Compact = true + case s == "nullable": + tag.Nullable = true + default: + err = fmt.Errorf("unrecognized option: %q", s) + } + return err == nil + }) + + if err != nil { + panic(fmt.Errorf("malformed struct tag: %w", err)) + } + + if tag.MinVersion < 0 && tag.MaxVersion >= 0 { + panic(fmt.Errorf("missing minimum version in struct tag: %q", s)) + } + + if tag.MaxVersion < 0 && tag.MinVersion >= 0 { + panic(fmt.Errorf("missing maximum version in struct tag: %q", s)) + } + + if tag.MinVersion > tag.MaxVersion { + panic(fmt.Errorf("invalid version range in struct tag: %q", s)) + } + + return do(tag) + }) +} + +func forEach(s string, sep byte, do func(string) bool) bool { + for len(s) != 0 { + p := "" + i := strings.IndexByte(s, sep) + if i < 0 { + p, s = s, "" + } else { + p, s = s[:i], s[i+1:] + } + if !do(p) { + return false + } + } + return true +} + +func forEachStructField(t reflect.Type, do func(reflect.Type, index, string)) { + for i, n := 0, t.NumField(); i < n; i++ { + f := t.Field(i) + + if f.PkgPath != "" && f.Name != "_" { + continue + } + + kafkaTag, ok := f.Tag.Lookup("kafka") + if !ok { + kafkaTag = "|" + } + + do(f.Type, indexOf(f), kafkaTag) + } +} + +func parseVersion(s string) (int16, error) { + if !strings.HasPrefix(s, "v") { + return 0, fmt.Errorf("invalid version number: %q", s) + } + i, err := strconv.ParseInt(s[1:], 10, 16) + if err != nil { + return 0, fmt.Errorf("invalid version number: %q: %w", s, err) + } + if i < 0 { + return 0, fmt.Errorf("invalid negative version number: %q", s) + } + return int16(i), nil +} + +func dontExpectEOF(err error) error { + switch err { + case nil: + return nil + case io.EOF: + return io.ErrUnexpectedEOF + default: + return err + } +} + +type Broker struct { + ID int32 + Host string + Port int32 + Rack string +} + +func (b Broker) String() string { + return net.JoinHostPort(b.Host, itoa(b.Port)) +} + +func (b Broker) Format(w fmt.State, v rune) { + switch v { + case 'd': + io.WriteString(w, itoa(b.ID)) + case 's': + io.WriteString(w, b.String()) + case 'v': + io.WriteString(w, itoa(b.ID)) + io.WriteString(w, " ") + io.WriteString(w, b.String()) + if b.Rack != "" { + io.WriteString(w, " ") + io.WriteString(w, b.Rack) + } + } +} + +func itoa(i int32) string { + return strconv.Itoa(int(i)) +} + +type Topic struct { + Name string + Error int16 + Partitions map[int32]Partition +} + +type Partition struct { + ID int32 + Error int16 + Leader int32 + Replicas []int32 + ISR []int32 + Offline []int32 +} + +// BrokerMessage is an extension of the Message interface implemented by some +// request types to customize the broker assignment logic. +type BrokerMessage interface { + // Given a representation of the kafka cluster state as argument, returns + // the broker that the message should be routed to. + Broker(Cluster) (Broker, error) +} + +// GroupMessage is an extension of the Message interface implemented by some +// request types to inform the program that they should be routed to a group +// coordinator. +type GroupMessage interface { + // Returns the group configured on the message. + Group() string +} + +// PreparedMessage is an extension of the Message interface implemented by some +// request types which may need to run some pre-processing on their state before +// being sent. +type PreparedMessage interface { + // Prepares the message before being sent to a kafka broker using the API + // version passed as argument. + Prepare(apiVersion int16) +} + +// Splitter is an interface implemented by messages that can be split into +// multiple requests and have their results merged back by a Merger. +type Splitter interface { + // For a given cluster layout, returns the list of messages constructed + // from the receiver for each requests that should be sent to the cluster. + // The second return value is a Merger which can be used to merge back the + // results of each request into a single message (or an error). + Split(Cluster) ([]Message, Merger, error) +} + +// Merger is an interface implemented by messages which can merge multiple +// results into one response. +type Merger interface { + // Given a list of message and associated results, merge them back into a + // response (or an error). The results must be either Message or error + // values, other types should trigger a panic. + Merge(messages []Message, results []interface{}) (Message, error) +} + +// Result converts r to a Message or and error, or panics if r could be be +// converted to these types. +func Result(r interface{}) (Message, error) { + switch v := r.(type) { + case Message: + return v, nil + case error: + return nil, v + default: + panic(fmt.Errorf("BUG: result must be a message or an error but not %T", v)) + } +} diff --git a/tap/extensions/kafka/protocol_make.go b/tap/extensions/kafka/protocol_make.go new file mode 100644 index 000000000..2fc71a254 --- /dev/null +++ b/tap/extensions/kafka/protocol_make.go @@ -0,0 +1,219 @@ +package main + +import ( + "encoding/binary" + "fmt" + "strconv" +) + +type ApiVersion struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (v ApiVersion) Format(w fmt.State, r rune) { + switch r { + case 's': + fmt.Fprint(w, apiKey(v.ApiKey)) + case 'd': + switch { + case w.Flag('-'): + fmt.Fprint(w, v.MinVersion) + case w.Flag('+'): + fmt.Fprint(w, v.MaxVersion) + default: + fmt.Fprint(w, v.ApiKey) + } + case 'v': + switch { + case w.Flag('-'): + fmt.Fprintf(w, "v%d", v.MinVersion) + case w.Flag('+'): + fmt.Fprintf(w, "v%d", v.MaxVersion) + case w.Flag('#'): + fmt.Fprintf(w, "kafka.ApiVersion{ApiKey:%d MinVersion:%d MaxVersion:%d}", v.ApiKey, v.MinVersion, v.MaxVersion) + default: + fmt.Fprintf(w, "%s[v%d:v%d]", apiKey(v.ApiKey), v.MinVersion, v.MaxVersion) + } + } +} + +type apiKey int16 + +const ( + produce apiKey = 0 + fetch apiKey = 1 + listOffsets apiKey = 2 + metadata apiKey = 3 + leaderAndIsr apiKey = 4 + stopReplica apiKey = 5 + updateMetadata apiKey = 6 + controlledShutdown apiKey = 7 + offsetCommit apiKey = 8 + offsetFetch apiKey = 9 + findCoordinator apiKey = 10 + joinGroup apiKey = 11 + heartbeat apiKey = 12 + leaveGroup apiKey = 13 + syncGroup apiKey = 14 + describeGroups apiKey = 15 + listGroups apiKey = 16 + saslHandshake apiKey = 17 + apiVersions apiKey = 18 + createTopics apiKey = 19 + deleteTopics apiKey = 20 + deleteRecords apiKey = 21 + initProducerId apiKey = 22 + offsetForLeaderEpoch apiKey = 23 + addPartitionsToTxn apiKey = 24 + addOffsetsToTxn apiKey = 25 + endTxn apiKey = 26 + writeTxnMarkers apiKey = 27 + txnOffsetCommit apiKey = 28 + describeAcls apiKey = 29 + createAcls apiKey = 30 + deleteAcls apiKey = 31 + describeConfigs apiKey = 32 + alterConfigs apiKey = 33 + alterReplicaLogDirs apiKey = 34 + describeLogDirs apiKey = 35 + saslAuthenticate apiKey = 36 + createPartitions apiKey = 37 + createDelegationToken apiKey = 38 + renewDelegationToken apiKey = 39 + expireDelegationToken apiKey = 40 + describeDelegationToken apiKey = 41 + deleteGroups apiKey = 42 + electLeaders apiKey = 43 + incrementalAlterConfigs apiKey = 44 + alterPartitionReassignments apiKey = 45 + listPartitionReassignments apiKey = 46 + offsetDelete apiKey = 47 +) + +func (k apiKey) String() string { + if i := int(k); i >= 0 && i < len(apiKeyStrings) { + return apiKeyStrings[i] + } + return strconv.Itoa(int(k)) +} + +type apiVersion int16 + +const ( + v0 = 0 + v1 = 1 + v2 = 2 + v3 = 3 + v4 = 4 + v5 = 5 + v6 = 6 + v7 = 7 + v8 = 8 + v9 = 9 + v10 = 10 +) + +var apiKeyStrings = [...]string{ + produce: "Produce", + fetch: "Fetch", + listOffsets: "ListOffsets", + metadata: "Metadata", + leaderAndIsr: "LeaderAndIsr", + stopReplica: "StopReplica", + updateMetadata: "UpdateMetadata", + controlledShutdown: "ControlledShutdown", + offsetCommit: "OffsetCommit", + offsetFetch: "OffsetFetch", + findCoordinator: "FindCoordinator", + joinGroup: "JoinGroup", + heartbeat: "Heartbeat", + leaveGroup: "LeaveGroup", + syncGroup: "SyncGroup", + describeGroups: "DescribeGroups", + listGroups: "ListGroups", + saslHandshake: "SaslHandshake", + apiVersions: "ApiVersions", + createTopics: "CreateTopics", + deleteTopics: "DeleteTopics", + deleteRecords: "DeleteRecords", + initProducerId: "InitProducerId", + offsetForLeaderEpoch: "OffsetForLeaderEpoch", + addPartitionsToTxn: "AddPartitionsToTxn", + addOffsetsToTxn: "AddOffsetsToTxn", + endTxn: "EndTxn", + writeTxnMarkers: "WriteTxnMarkers", + txnOffsetCommit: "TxnOffsetCommit", + describeAcls: "DescribeAcls", + createAcls: "CreateAcls", + deleteAcls: "DeleteAcls", + describeConfigs: "DescribeConfigs", + alterConfigs: "AlterConfigs", + alterReplicaLogDirs: "AlterReplicaLogDirs", + describeLogDirs: "DescribeLogDirs", + saslAuthenticate: "SaslAuthenticate", + createPartitions: "CreatePartitions", + createDelegationToken: "CreateDelegationToken", + renewDelegationToken: "RenewDelegationToken", + expireDelegationToken: "ExpireDelegationToken", + describeDelegationToken: "DescribeDelegationToken", + deleteGroups: "DeleteGroups", + electLeaders: "ElectLeaders", + incrementalAlterConfigs: "IncrementalAlfterConfigs", + alterPartitionReassignments: "AlterPartitionReassignments", + listPartitionReassignments: "ListPartitionReassignments", + offsetDelete: "OffsetDelete", +} + +type requestHeader struct { + Size int32 + ApiKey int16 + ApiVersion int16 + CorrelationID int32 + ClientID string +} + +func sizeofString(s string) int32 { + return 2 + int32(len(s)) +} + +func (h requestHeader) size() int32 { + return 4 + 2 + 2 + 4 + sizeofString(h.ClientID) +} + +// func (h requestHeader) writeTo(wb *writeBuffer) { +// wb.writeInt32(h.Size) +// wb.writeInt16(h.ApiKey) +// wb.writeInt16(h.ApiVersion) +// wb.writeInt32(h.CorrelationID) +// wb.writeString(h.ClientID) +// } + +type request interface { + size() int32 + // writable +} + +func makeInt8(b []byte) int8 { + return int8(b[0]) +} + +func makeInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(b)) +} + +func makeInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(b)) +} + +func makeInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func expectZeroSize(sz int, err error) error { + if err == nil && sz != 0 { + err = fmt.Errorf("reading a response left %d unread bytes", sz) + } + return err +} diff --git a/tap/extensions/kafka/read.go b/tap/extensions/kafka/read.go new file mode 100644 index 000000000..965891c30 --- /dev/null +++ b/tap/extensions/kafka/read.go @@ -0,0 +1,639 @@ +package main + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" +) + +type readable interface { + readFrom(*bufio.Reader, int) (int, error) +} + +var errShortRead = errors.New("not enough bytes available to load the response") + +func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) { + if n > sz { + return sz, errShortRead + } + b, err := r.Peek(n) + if err != nil { + return sz, err + } + f(b) + return discardN(r, sz, n) +} + +func readInt8(r *bufio.Reader, sz int, v *int8) (int, error) { + return peekRead(r, sz, 1, func(b []byte) { *v = makeInt8(b) }) +} + +func readInt16(r *bufio.Reader, sz int, v *int16) (int, error) { + return peekRead(r, sz, 2, func(b []byte) { *v = makeInt16(b) }) +} + +func readInt32(r *bufio.Reader, sz int, v *int32) (int, error) { + return peekRead(r, sz, 4, func(b []byte) { *v = makeInt32(b) }) +} + +func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) { + return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) }) +} + +func readVarInt(r *bufio.Reader, sz int, v *int64) (remain int, err error) { + // Optimistically assume that most of the time, there will be data buffered + // in the reader. If this is not the case, the buffer will be refilled after + // consuming zero bytes from the input. + input, _ := r.Peek(r.Buffered()) + x := uint64(0) + s := uint(0) + + for { + if len(input) > sz { + input = input[:sz] + } + + for i, b := range input { + if b < 0x80 { + x |= uint64(b) << s + *v = int64(x>>1) ^ -(int64(x) & 1) + n, err := r.Discard(i + 1) + return sz - n, err + } + + x |= uint64(b&0x7f) << s + s += 7 + } + + // Make room in the input buffer to load more data from the underlying + // stream. The x and s variables are left untouched, ensuring that the + // varint decoding can continue on the next loop iteration. + n, _ := r.Discard(len(input)) + sz -= n + if sz == 0 { + return 0, errShortRead + } + + // Fill the buffer: ask for one more byte, but in practice the reader + // will load way more from the underlying stream. + if _, err := r.Peek(1); err != nil { + if err == io.EOF { + err = errShortRead + } + return sz, err + } + + // Grab as many bytes as possible from the buffer, then go on to the + // next loop iteration which is going to consume it. + input, _ = r.Peek(r.Buffered()) + } +} + +func readBool(r *bufio.Reader, sz int, v *bool) (int, error) { + return peekRead(r, sz, 1, func(b []byte) { *v = b[0] != 0 }) +} + +func readString(r *bufio.Reader, sz int, v *string) (int, error) { + return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { + *v, remain, err = readNewString(r, sz, n) + return + }) +} + +func readStringWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { + var err error + var len int16 + + if sz, err = readInt16(r, sz, &len); err != nil { + return sz, err + } + + n := int(len) + if n > sz { + return sz, errShortRead + } + + return cb(r, sz, n) +} + +func readNewString(r *bufio.Reader, sz int, n int) (string, int, error) { + b, sz, err := readNewBytes(r, sz, n) + return string(b), sz, err +} + +func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) { + return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { + *v, remain, err = readNewBytes(r, sz, n) + return + }) +} + +func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { + var err error + var n int + + if sz, err = readArrayLen(r, sz, &n); err != nil { + return sz, err + } + + if n > sz { + return sz, errShortRead + } + + return cb(r, sz, n) +} + +func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) { + var err error + var b []byte + var shortRead bool + + if n > 0 { + if sz < n { + n = sz + shortRead = true + } + + b = make([]byte, n) + n, err = io.ReadFull(r, b) + b = b[:n] + sz -= n + + if err == nil && shortRead { + err = errShortRead + } + } + + return b, sz, err +} + +func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) { + var err error + var len int32 + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + *n = int(len) + return sz, nil +} + +func readArrayWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int) (int, error)) (int, error) { + var err error + var len int32 + + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + + for n := int(len); n > 0; n-- { + if sz, err = cb(r, sz); err != nil { + break + } + } + + return sz, err +} + +func readStringArray(r *bufio.Reader, sz int, v *[]string) (remain int, err error) { + var content []string + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value string + if fnRemain, fnErr = readString(r, size, &value); fnErr != nil { + return + } + content = append(content, value) + return + } + if remain, err = readArrayWith(r, sz, fn); err != nil { + return + } + + *v = content + return +} + +func readMapStringInt32(r *bufio.Reader, sz int, v *map[string][]int32) (remain int, err error) { + var len int32 + if remain, err = readInt32(r, sz, &len); err != nil { + return + } + + content := make(map[string][]int32, len) + for i := 0; i < int(len); i++ { + var key string + var values []int32 + + if remain, err = readString(r, remain, &key); err != nil { + return + } + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value int32 + if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil { + return + } + values = append(values, value) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + content[key] = values + } + *v = content + + return +} + +func read(r *bufio.Reader, sz int, a interface{}) (int, error) { + switch v := a.(type) { + case *int8: + return readInt8(r, sz, v) + case *int16: + return readInt16(r, sz, v) + case *int32: + return readInt32(r, sz, v) + case *int64: + return readInt64(r, sz, v) + case *bool: + return readBool(r, sz, v) + case *string: + return readString(r, sz, v) + case *[]byte: + return readBytes(r, sz, v) + } + switch v := reflect.ValueOf(a).Elem(); v.Kind() { + case reflect.Struct: + return readStruct(r, sz, v) + case reflect.Slice: + return readSlice(r, sz, v) + default: + panic(fmt.Sprintf("unsupported type: %T", a)) + } +} + +func ReadAll(r *bufio.Reader, sz int, ptrs ...interface{}) (int, error) { + var err error + + for _, ptr := range ptrs { + if sz, err = readPtr(r, sz, ptr); err != nil { + break + } + } + + return sz, err +} + +func readPtr(r *bufio.Reader, sz int, ptr interface{}) (int, error) { + switch v := ptr.(type) { + case *int8: + return readInt8(r, sz, v) + case *int16: + return readInt16(r, sz, v) + case *int32: + return readInt32(r, sz, v) + case *int64: + return readInt64(r, sz, v) + case *string: + return readString(r, sz, v) + case *[]byte: + return readBytes(r, sz, v) + case readable: + return v.readFrom(r, sz) + default: + panic(fmt.Sprintf("unsupported type: %T", v)) + } +} + +func readStruct(r *bufio.Reader, sz int, v reflect.Value) (int, error) { + var err error + for i, n := 0, v.NumField(); i != n; i++ { + if sz, err = read(r, sz, v.Field(i).Addr().Interface()); err != nil { + return sz, err + } + } + return sz, nil +} + +func readSlice(r *bufio.Reader, sz int, v reflect.Value) (int, error) { + var err error + var len int32 + + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + + if n := int(len); n < 0 { + v.Set(reflect.Zero(v.Type())) + } else { + v.Set(reflect.MakeSlice(v.Type(), n, n)) + + for i := 0; i != n; i++ { + if sz, err = read(r, sz, v.Index(i).Addr().Interface()); err != nil { + return sz, err + } + } + } + + return sz, nil +} + +func readFetchResponseHeaderV2(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + MessageSetSize int32 + } + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(p.MessageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", p.MessageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return +} + +func readFetchResponseHeaderV5(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} + +func readFetchResponseHeaderV10(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + var errorCode int16 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt16(r, remain, &errorCode); err != nil { + return + } + if errorCode != 0 { + err = Error(errorCode) + return + } + + if remain, err = discardInt32(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} + +func readMessageHeader(r *bufio.Reader, sz int) (offset int64, attributes int8, timestamp int64, remain int, err error) { + var version int8 + + if remain, err = readInt64(r, sz, &offset); err != nil { + return + } + + // On discarding the message size and CRC: + // --------------------------------------- + // + // - Not sure why kafka gives the message size here, we already have the + // number of remaining bytes in the response and kafka should only truncate + // the trailing message. + // + // - TCP is already taking care of ensuring data integrity, no need to + // waste resources doing it a second time so we just skip the message CRC. + // + if remain, err = discardN(r, remain, 8); err != nil { + return + } + + if remain, err = readInt8(r, remain, &version); err != nil { + return + } + + if remain, err = readInt8(r, remain, &attributes); err != nil { + return + } + + switch version { + case 0: + case 1: + remain, err = readInt64(r, remain, ×tamp) + default: + err = fmt.Errorf("unsupported message version %d found in fetch response", version) + } + + return +} diff --git a/tap/extensions/kafka/record.go b/tap/extensions/kafka/record.go new file mode 100644 index 000000000..55a634175 --- /dev/null +++ b/tap/extensions/kafka/record.go @@ -0,0 +1,314 @@ +package main + +import ( + "encoding/binary" + "io" + "time" + + "github.com/segmentio/kafka-go/compress" +) + +// Attributes is a bitset representing special attributes set on records. +type Attributes int16 + +const ( + Gzip Attributes = Attributes(compress.Gzip) // 1 + Snappy Attributes = Attributes(compress.Snappy) // 2 + Lz4 Attributes = Attributes(compress.Lz4) // 3 + Zstd Attributes = Attributes(compress.Zstd) // 4 + Transactional Attributes = 1 << 4 + Control Attributes = 1 << 5 +) + +func (a Attributes) Compression() compress.Compression { + return compress.Compression(a & 7) +} + +func (a Attributes) Transactional() bool { + return (a & Transactional) != 0 +} + +func (a Attributes) Control() bool { + return (a & Control) != 0 +} + +func (a Attributes) String() string { + s := a.Compression().String() + if a.Transactional() { + s += "+transactional" + } + if a.Control() { + s += "+control" + } + return s +} + +// Header represents a single entry in a list of record headers. +type Header struct { + Key string + Value []byte +} + +// Record is an interface representing a single kafka record. +// +// Record values are not safe to use concurrently from multiple goroutines. +type Record struct { + // The offset at which the record exists in a topic partition. This value + // is ignored in produce requests. + Offset int64 + + // Returns the time of the record. This value may be omitted in produce + // requests to let kafka set the time when it saves the record. + Time time.Time + + // Returns a byte sequence containing the key of this record. The returned + // sequence may be nil to indicate that the record has no key. If the record + // is part of a RecordSet, the content of the key must remain valid at least + // until the record set is closed (or until the key is closed). + Key Bytes + + // Returns a byte sequence containing the value of this record. The returned + // sequence may be nil to indicate that the record has no value. If the + // record is part of a RecordSet, the content of the value must remain valid + // at least until the record set is closed (or until the value is closed). + Value Bytes + + // Returns the list of headers associated with this record. The returned + // slice may be reused across calls, the program should use it as an + // immutable value. + Headers []Header +} + +// RecordSet represents a sequence of records in Produce requests and Fetch +// responses. All v0, v1, and v2 formats are supported. +type RecordSet struct { + // The message version that this record set will be represented as, valid + // values are 1, or 2. + // + // When reading, this is the value of the highest version used in the + // batches that compose the record set. + // + // When writing, this value dictates the format that the records will be + // encoded in. + Version int8 + + // Attributes set on the record set. + // + // When reading, the attributes are the combination of all attributes in + // the batches that compose the record set. + // + // When writing, the attributes apply to the whole sequence of records in + // the set. + Attributes Attributes + + // A reader exposing the sequence of records. + // + // When reading a RecordSet from an io.Reader, the Records field will be a + // *RecordStream. If the program needs to access the details of each batch + // that compose the stream, it may use type assertions to access the + // underlying types of each batch. + Records RecordReader +} + +// bufferedReader is an interface implemented by types like bufio.Reader, which +// we use to optimize prefix reads by accessing the internal buffer directly +// through calls to Peek. +type bufferedReader interface { + Discard(int) (int, error) + Peek(int) ([]byte, error) +} + +// bytesBuffer is an interface implemented by types like bytes.Buffer, which we +// use to optimize prefix reads by accessing the internal buffer directly +// through calls to Bytes. +type bytesBuffer interface { + Bytes() []byte +} + +// magicByteOffset is the position of the magic byte in all versions of record +// sets in the kafka protocol. +const magicByteOffset = 16 + +// ReadFrom reads the representation of a record set from r into rs, returning +// the number of bytes consumed from r, and an non-nil error if the record set +// could not be read. +func (rs *RecordSet) ReadFrom(r io.Reader) (int64, error) { + // d, _ := r.(*decoder) + // if d == nil { + // d = &decoder{ + // reader: r, + // remain: 4, + // } + // } + + // *rs = RecordSet{} + // limit := d.remain + // size := d.readInt32() + + // if d.err != nil { + // return int64(limit - d.remain), d.err + // } + + // if size <= 0 { + // return 4, nil + // } + + // stream := &RecordStream{ + // Records: make([]RecordReader, 0, 4), + // } + + // var err error + // d.remain = int(size) + + // for d.remain > 0 && err == nil { + // var version byte + + // if d.remain < (magicByteOffset + 1) { + // if len(stream.Records) != 0 { + // break + // } + // return 4, fmt.Errorf("impossible record set shorter than %d bytes", magicByteOffset+1) + // } + + // switch r := d.reader.(type) { + // case bufferedReader: + // b, err := r.Peek(magicByteOffset + 1) + // if err != nil { + // n, _ := r.Discard(len(b)) + // return 4 + int64(n), dontExpectEOF(err) + // } + // version = b[magicByteOffset] + // case bytesBuffer: + // version = r.Bytes()[magicByteOffset] + // default: + // b := make([]byte, magicByteOffset+1) + // if n, err := io.ReadFull(d.reader, b); err != nil { + // return 4 + int64(n), dontExpectEOF(err) + // } + // version = b[magicByteOffset] + // // Reconstruct the prefix that we had to read to determine the version + // // of the record set from the magic byte. + // // + // // Technically this may recurisvely stack readers when consuming all + // // items of the batch, which could hurt performance. In practice this + // // path should not be taken tho, since the decoder would read from a + // // *bufio.Reader which implements the bufferedReader interface. + // d.reader = io.MultiReader(bytes.NewReader(b), d.reader) + // } + + // var tmp RecordSet + // switch version { + // case 0, 1: + // err = tmp.readFromVersion1(d) + // case 2: + // err = tmp.readFromVersion2(d) + // default: + // err = fmt.Errorf("unsupported message version %d for message of size %d", version, size) + // } + + // if tmp.Version > rs.Version { + // rs.Version = tmp.Version + // } + + // rs.Attributes |= tmp.Attributes + + // if tmp.Records != nil { + // stream.Records = append(stream.Records, tmp.Records) + // } + // } + + // if len(stream.Records) != 0 { + // rs.Records = stream + // // Ignore errors if we've successfully read records, so the + // // program can keep making progress. + // err = nil + // } + + // d.discardAll() + // rn := 4 + (int(size) - d.remain) + // d.remain = limit - rn + // return int64(rn), err + return 0, nil +} + +// WriteTo writes the representation of rs into w. The value of rs.Version +// dictates which format that the record set will be represented as. +// +// The error will be ErrNoRecord if rs contained no records. +// +// Note: since this package is only compatible with kafka 0.10 and above, the +// method never produces messages in version 0. If rs.Version is zero, the +// method defaults to producing messages in version 1. +func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) { + // if rs.Records == nil { + // return 0, ErrNoRecord + // } + + // // This optimization avoids rendering the record set in an intermediary + // // buffer when the writer is already a pageBuffer, which is a common case + // // due to the way WriteRequest and WriteResponse are implemented. + // buffer, _ := w.(*pageBuffer) + // bufferOffset := int64(0) + + // if buffer != nil { + // bufferOffset = buffer.Size() + // } else { + // buffer = newPageBuffer() + // defer buffer.unref() + // } + + // size := packUint32(0) + // buffer.Write(size[:]) // size placeholder + + // var err error + // switch rs.Version { + // case 0, 1: + // err = rs.writeToVersion1(buffer, bufferOffset+4) + // case 2: + // err = rs.writeToVersion2(buffer, bufferOffset+4) + // default: + // err = fmt.Errorf("unsupported record set version %d", rs.Version) + // } + // if err != nil { + // return 0, err + // } + + // n := buffer.Size() - bufferOffset + // if n == 0 { + // size = packUint32(^uint32(0)) + // } else { + // size = packUint32(uint32(n) - 4) + // } + // buffer.WriteAt(size[:], bufferOffset) + + // // This condition indicates that the output writer received by `WriteTo` was + // // not a *pageBuffer, in which case we need to flush the buffered records + // // data into it. + // if buffer != w { + // return buffer.WriteTo(w) + // } + + // return n, nil + return 0, nil +} + +func makeTime(t int64) time.Time { + return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)) +} + +func timestamp(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.UnixNano() / int64(time.Millisecond) +} + +func packUint32(u uint32) (b [4]byte) { + binary.BigEndian.PutUint32(b[:], u) + return +} + +func packUint64(u uint64) (b [8]byte) { + binary.BigEndian.PutUint64(b[:], u) + return +} diff --git a/tap/extensions/kafka/record_bytes.go b/tap/extensions/kafka/record_bytes.go new file mode 100644 index 000000000..cd142f405 --- /dev/null +++ b/tap/extensions/kafka/record_bytes.go @@ -0,0 +1,43 @@ +package main + +import ( + "github.com/segmentio/kafka-go/protocol" +) + +// Header is a key/value pair type representing headers set on records. +// type Header = protocol.Header + +// Bytes is an interface representing a sequence of bytes. This abstraction +// makes it possible for programs to inject data into produce requests without +// having to load in into an intermediary buffer, or read record keys and values +// from a fetch response directly from internal buffers. +// +// Bytes are not safe to use concurrently from multiple goroutines. +// type Bytes = protocol.Bytes + +// NewBytes constructs a Bytes value from a byte slice. +// +// If b is nil, nil is returned. +// func NewBytes(b []byte) Bytes { return protocol.NewBytes(b) } + +// ReadAll reads b into a byte slice. +// func ReadAll(b Bytes) ([]byte, error) { return protocol.ReadAll(b) } + +// Record is an interface representing a single kafka record. +// +// Record values are not safe to use concurrently from multiple goroutines. +// type Record = protocol.Record + +// RecordReader is an interface representing a sequence of records. Record sets +// are used in both produce and fetch requests to represent the sequence of +// records that are sent to or receive from kafka brokers. +// +// RecordReader values are not safe to use concurrently from multiple goroutines. +type RecordReader = protocol.RecordReader + +// NewRecordReade rconstructs a RecordSet which exposes the sequence of records +// passed as arguments. +func NewRecordReader(records ...Record) RecordReader { + // return protocol.NewRecordReader(records...) + return nil +} diff --git a/tap/extensions/kafka/reflect.go b/tap/extensions/kafka/reflect.go new file mode 100644 index 000000000..65b59ab13 --- /dev/null +++ b/tap/extensions/kafka/reflect.go @@ -0,0 +1,101 @@ +// +build !unsafe + +package main + +import ( + "reflect" +) + +type index []int + +type _type struct{ typ reflect.Type } + +func typeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x)) +} + +func elemTypeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x).Elem()) +} + +func makeType(t reflect.Type) _type { + return _type{typ: t} +} + +type value struct { + val reflect.Value +} + +func nonAddressableValueOf(x interface{}) value { + return value{val: reflect.ValueOf(x)} +} + +func valueOf(x interface{}) value { + return value{val: reflect.ValueOf(x).Elem()} +} + +func makeValue(t reflect.Type) value { + return value{val: reflect.New(t).Elem()} +} + +func (v value) bool() bool { return v.val.Bool() } + +func (v value) int8() int8 { return int8(v.int64()) } + +func (v value) int16() int16 { return int16(v.int64()) } + +func (v value) int32() int32 { return int32(v.int64()) } + +func (v value) int64() int64 { return v.val.Int() } + +func (v value) string() string { return v.val.String() } + +func (v value) bytes() []byte { return v.val.Bytes() } + +func (v value) iface(t reflect.Type) interface{} { return v.val.Addr().Interface() } + +func (v value) array(t reflect.Type) array { return array{val: v.val} } + +func (v value) setBool(b bool) { v.val.SetBool(b) } + +func (v value) setInt8(i int8) { v.setInt64(int64(i)) } + +func (v value) setInt16(i int16) { v.setInt64(int64(i)) } + +func (v value) setInt32(i int32) { v.setInt64(int64(i)) } + +func (v value) setInt64(i int64) { v.val.SetInt(i) } + +func (v value) setString(s string) { v.val.SetString(s) } + +func (v value) setBytes(b []byte) { v.val.SetBytes(b) } + +func (v value) setArray(a array) { + if a.val.IsValid() { + v.val.Set(a.val) + } else { + v.val.Set(reflect.Zero(v.val.Type())) + } +} + +func (v value) fieldByIndex(i index) value { + return value{val: v.val.FieldByIndex(i)} +} + +type array struct { + val reflect.Value +} + +func makeArray(t reflect.Type, n int) array { + return array{val: reflect.MakeSlice(reflect.SliceOf(t), n, n)} +} + +func (a array) index(i int) value { return value{val: a.val.Index(i)} } + +func (a array) length() int { return a.val.Len() } + +func (a array) isNil() bool { return a.val.IsNil() } + +func indexOf(s reflect.StructField) index { return index(s.Index) } + +func bytesToString(b []byte) string { return string(b) } diff --git a/tap/extensions/kafka/request.go b/tap/extensions/kafka/request.go new file mode 100644 index 000000000..cb4ac4748 --- /dev/null +++ b/tap/extensions/kafka/request.go @@ -0,0 +1,290 @@ +package main + +import ( + "fmt" + "io" + "reflect" + + "github.com/up9inc/mizu/tap/api" +) + +type Request struct { + Size int32 + ApiKey ApiKey + ApiVersion int16 + CorrelationID int32 + ClientID string + Payload interface{} +} + +func ReadRequest(r io.Reader, tcpID *api.TcpID) (apiKey ApiKey, apiVersion int16, err error) { + d := &decoder{reader: r, remain: 4} + size := d.readInt32() + + if size > 1000000 { + return 0, 0, fmt.Errorf("A Kafka message cannot be bigger than 1MB") + } + + if size < 8 { + return 0, 0, fmt.Errorf("A Kafka request header cannot be smaller than 8 bytes") + } + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return 0, 0, err + } + + d.remain = int(size) + apiKey = ApiKey(d.readInt16()) + apiVersion = d.readInt16() + correlationID := d.readInt32() + clientID := d.readString() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + err = fmt.Errorf("unsupported api key: %d", i) + return apiKey, apiVersion, err + } + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return apiKey, apiVersion, err + } + + t := &apiTypes[apiKey] + if t == nil { + err = fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + return apiKey, apiVersion, err + } + + var payload interface{} + + switch apiKey { + case Metadata: + var mt interface{} + var metadataRequest interface{} + if apiVersion >= 11 { + types := makeTypes(reflect.TypeOf(&MetadataRequestV11{}).Elem()) + mt = types[0] + metadataRequest = &MetadataRequestV11{} + } else if apiVersion >= 10 { + types := makeTypes(reflect.TypeOf(&MetadataRequestV10{}).Elem()) + mt = types[0] + metadataRequest = &MetadataRequestV10{} + } else if apiVersion >= 8 { + types := makeTypes(reflect.TypeOf(&MetadataRequestV8{}).Elem()) + mt = types[0] + metadataRequest = &MetadataRequestV8{} + } else if apiVersion >= 4 { + types := makeTypes(reflect.TypeOf(&MetadataRequestV4{}).Elem()) + mt = types[0] + metadataRequest = &MetadataRequestV4{} + } else { + types := makeTypes(reflect.TypeOf(&MetadataRequestV0{}).Elem()) + mt = types[0] + metadataRequest = &MetadataRequestV0{} + } + mt.(messageType).decode(d, valueOf(metadataRequest)) + payload = metadataRequest + break + case ApiVersions: + var mt interface{} + var apiVersionsRequest interface{} + if apiVersion >= 3 { + types := makeTypes(reflect.TypeOf(&ApiVersionsRequestV3{}).Elem()) + mt = types[0] + apiVersionsRequest = &ApiVersionsRequestV3{} + } else { + types := makeTypes(reflect.TypeOf(&ApiVersionsRequestV0{}).Elem()) + mt = types[0] + apiVersionsRequest = &ApiVersionsRequestV0{} + } + mt.(messageType).decode(d, valueOf(apiVersionsRequest)) + payload = apiVersionsRequest + break + case Produce: + var mt interface{} + var produceRequest interface{} + if apiVersion >= 3 { + types := makeTypes(reflect.TypeOf(&ProduceRequestV3{}).Elem()) + mt = types[0] + produceRequest = &ProduceRequestV3{} + } else { + types := makeTypes(reflect.TypeOf(&ProduceRequestV0{}).Elem()) + mt = types[0] + produceRequest = &ProduceRequestV0{} + } + mt.(messageType).decode(d, valueOf(produceRequest)) + payload = produceRequest + break + case Fetch: + var mt interface{} + var fetchRequest interface{} + if apiVersion >= 11 { + types := makeTypes(reflect.TypeOf(&FetchRequestV11{}).Elem()) + mt = types[0] + fetchRequest = &FetchRequestV11{} + } else if apiVersion >= 9 { + types := makeTypes(reflect.TypeOf(&FetchRequestV9{}).Elem()) + mt = types[0] + fetchRequest = &FetchRequestV9{} + } else if apiVersion >= 7 { + types := makeTypes(reflect.TypeOf(&FetchRequestV7{}).Elem()) + mt = types[0] + fetchRequest = &FetchRequestV7{} + } else if apiVersion >= 5 { + types := makeTypes(reflect.TypeOf(&FetchRequestV5{}).Elem()) + mt = types[0] + fetchRequest = &FetchRequestV5{} + } else if apiVersion >= 4 { + types := makeTypes(reflect.TypeOf(&FetchRequestV4{}).Elem()) + mt = types[0] + fetchRequest = &FetchRequestV4{} + } else if apiVersion >= 3 { + types := makeTypes(reflect.TypeOf(&FetchRequestV3{}).Elem()) + mt = types[0] + fetchRequest = &FetchRequestV3{} + } else { + types := makeTypes(reflect.TypeOf(&FetchRequestV0{}).Elem()) + mt = types[0] + fetchRequest = &FetchRequestV0{} + } + mt.(messageType).decode(d, valueOf(fetchRequest)) + payload = fetchRequest + break + case ListOffsets: + var mt interface{} + var listOffsetsRequest interface{} + if apiVersion >= 4 { + types := makeTypes(reflect.TypeOf(&ListOffsetsRequestV4{}).Elem()) + mt = types[0] + listOffsetsRequest = &ListOffsetsRequestV4{} + } else if apiVersion >= 2 { + types := makeTypes(reflect.TypeOf(&ListOffsetsRequestV2{}).Elem()) + mt = types[0] + listOffsetsRequest = &ListOffsetsRequestV2{} + } else if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&ListOffsetsRequestV1{}).Elem()) + mt = types[0] + listOffsetsRequest = &ListOffsetsRequestV1{} + } else { + types := makeTypes(reflect.TypeOf(&ListOffsetsRequestV0{}).Elem()) + mt = types[0] + listOffsetsRequest = &ListOffsetsRequestV0{} + } + mt.(messageType).decode(d, valueOf(listOffsetsRequest)) + payload = listOffsetsRequest + break + case CreateTopics: + var mt interface{} + var createTopicsRequest interface{} + if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&CreateTopicsRequestV1{}).Elem()) + mt = types[0] + createTopicsRequest = &CreateTopicsRequestV1{} + } else { + types := makeTypes(reflect.TypeOf(&CreateTopicsRequestV0{}).Elem()) + mt = types[0] + createTopicsRequest = &CreateTopicsRequestV0{} + } + mt.(messageType).decode(d, valueOf(createTopicsRequest)) + payload = createTopicsRequest + break + case DeleteTopics: + var mt interface{} + var deleteTopicsRequest interface{} + if apiVersion >= 6 { + types := makeTypes(reflect.TypeOf(&DeleteTopicsRequestV6{}).Elem()) + mt = types[0] + deleteTopicsRequest = &DeleteTopicsRequestV6{} + } else { + types := makeTypes(reflect.TypeOf(&DeleteTopicsRequestV0{}).Elem()) + mt = types[0] + deleteTopicsRequest = &DeleteTopicsRequestV0{} + } + mt.(messageType).decode(d, valueOf(deleteTopicsRequest)) + payload = deleteTopicsRequest + default: + return apiKey, 0, fmt.Errorf("(Request) Not implemented: %s", apiKey) + } + + request := &Request{ + Size: size, + ApiKey: apiKey, + ApiVersion: apiVersion, + CorrelationID: correlationID, + ClientID: clientID, + Payload: payload, + } + + key := fmt.Sprintf( + "%s:%s->%s:%s::%d", + tcpID.SrcIP, + tcpID.SrcPort, + tcpID.DstIP, + tcpID.DstPort, + correlationID, + ) + reqResMatcher.registerRequest(key, request) + + d.discardAll() + + return apiKey, apiVersion, nil +} + +func WriteRequest(w io.Writer, apiVersion int16, correlationID int32, clientID string, msg Message) error { + apiKey := msg.ApiKey() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + return fmt.Errorf("unsupported api key: %d", i) + } + + t := &apiTypes[apiKey] + if t == nil { + return fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + } + + r := &t.requests[apiVersion-minVersion] + v := valueOf(msg) + b := newPageBuffer() + defer b.unref() + + e := &encoder{writer: b} + e.writeInt32(0) // placeholder for the request size + e.writeInt16(int16(apiKey)) + e.writeInt16(apiVersion) + e.writeInt32(correlationID) + + if r.flexible { + // Flexible messages use a nullable string for the client ID, then extra space for a + // tag buffer, which begins with a size value. Since we're not writing any fields into the + // latter, we can just write zero for now. + // + // See + // https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details. + e.writeNullString(clientID) + e.writeUnsignedVarInt(0) + } else { + // Technically, recent versions of kafka interpret this field as a nullable + // string, however kafka 0.10 expected a non-nullable string and fails with + // a NullPointerException when it receives a null client id. + e.writeString(clientID) + } + r.encode(e, v) + err := e.err + + if err == nil { + size := packUint32(uint32(b.Size()) - 4) + b.WriteAt(size[:], 0) + _, err = b.WriteTo(w) + } + + return err +} diff --git a/tap/extensions/kafka/response.go b/tap/extensions/kafka/response.go new file mode 100644 index 000000000..ac4debf8a --- /dev/null +++ b/tap/extensions/kafka/response.go @@ -0,0 +1,343 @@ +package main + +import ( + "fmt" + "io" + "reflect" + "time" + + "github.com/up9inc/mizu/tap/api" +) + +type Response struct { + Size int32 + CorrelationID int32 + Payload interface{} +} + +func ReadResponse(r io.Reader, tcpID *api.TcpID, emitter api.Emitter) (err error) { + d := &decoder{reader: r, remain: 4} + size := d.readInt32() + + if size > 1000000 { + return fmt.Errorf("A Kafka message cannot be bigger than 1MB") + } + + if size < 4 { + return fmt.Errorf("A Kafka response header cannot be smaller than 8 bytes") + } + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return err + } + + d.remain = int(size) + correlationID := d.readInt32() + var payload interface{} + response := &Response{ + Size: size, + CorrelationID: correlationID, + Payload: payload, + } + + key := fmt.Sprintf( + "%s:%s->%s:%s::%d", + tcpID.DstIP, + tcpID.DstPort, + tcpID.SrcIP, + tcpID.SrcPort, + correlationID, + ) + reqResPair := reqResMatcher.registerResponse(key, response) + if reqResPair == nil { + return fmt.Errorf("Couldn't match a Kafka response to a Kafka request in 3 seconds!") + } + apiKey := reqResPair.Request.ApiKey + apiVersion := reqResPair.Request.ApiVersion + + switch apiKey { + case Metadata: + var mt interface{} + var metadataResponse interface{} + if apiVersion >= 11 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV11{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV11{} + } else if apiVersion >= 10 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV10{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV10{} + } else if apiVersion >= 8 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV8{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV8{} + } else if apiVersion >= 7 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV7{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV7{} + } else if apiVersion >= 5 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV5{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV5{} + } else if apiVersion >= 3 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV3{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV3{} + } else if apiVersion >= 2 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV2{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV2{} + } else if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&MetadataResponseV1{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV1{} + } else { + types := makeTypes(reflect.TypeOf(&MetadataResponseV0{}).Elem()) + mt = types[0] + metadataResponse = &MetadataResponseV0{} + } + mt.(messageType).decode(d, valueOf(metadataResponse)) + reqResPair.Response.Payload = metadataResponse + break + case ApiVersions: + var mt interface{} + var apiVersionsResponse interface{} + if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&ApiVersionsResponseV1{}).Elem()) + mt = types[0] + apiVersionsResponse = &ApiVersionsResponseV1{} + } else { + types := makeTypes(reflect.TypeOf(&ApiVersionsResponseV0{}).Elem()) + mt = types[0] + apiVersionsResponse = &ApiVersionsResponseV0{} + } + mt.(messageType).decode(d, valueOf(apiVersionsResponse)) + reqResPair.Response.Payload = apiVersionsResponse + break + case Produce: + var mt interface{} + var produceResponse interface{} + if apiVersion >= 8 { + types := makeTypes(reflect.TypeOf(&ProduceResponseV8{}).Elem()) + mt = types[0] + produceResponse = &ProduceResponseV8{} + } else if apiVersion >= 5 { + types := makeTypes(reflect.TypeOf(&ProduceResponseV5{}).Elem()) + mt = types[0] + produceResponse = &ProduceResponseV5{} + } else if apiVersion >= 2 { + types := makeTypes(reflect.TypeOf(&ProduceResponseV2{}).Elem()) + mt = types[0] + produceResponse = &ProduceResponseV2{} + } else if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&ProduceResponseV1{}).Elem()) + mt = types[0] + produceResponse = &ProduceResponseV1{} + } else { + types := makeTypes(reflect.TypeOf(&ProduceResponseV0{}).Elem()) + mt = types[0] + produceResponse = &ProduceResponseV0{} + } + mt.(messageType).decode(d, valueOf(produceResponse)) + reqResPair.Response.Payload = produceResponse + break + case Fetch: + var mt interface{} + var fetchResponse interface{} + if apiVersion >= 11 { + types := makeTypes(reflect.TypeOf(&FetchResponseV11{}).Elem()) + mt = types[0] + fetchResponse = &FetchResponseV11{} + } else if apiVersion >= 7 { + types := makeTypes(reflect.TypeOf(&FetchResponseV7{}).Elem()) + mt = types[0] + fetchResponse = &FetchResponseV7{} + } else if apiVersion >= 5 { + types := makeTypes(reflect.TypeOf(&FetchResponseV5{}).Elem()) + mt = types[0] + fetchResponse = &FetchResponseV5{} + } else if apiVersion >= 4 { + types := makeTypes(reflect.TypeOf(&FetchResponseV4{}).Elem()) + mt = types[0] + fetchResponse = &FetchResponseV4{} + } else if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&FetchResponseV1{}).Elem()) + mt = types[0] + fetchResponse = &FetchResponseV1{} + } else { + types := makeTypes(reflect.TypeOf(&FetchResponseV0{}).Elem()) + mt = types[0] + fetchResponse = &FetchResponseV0{} + } + mt.(messageType).decode(d, valueOf(fetchResponse)) + reqResPair.Response.Payload = fetchResponse + break + case ListOffsets: + var mt interface{} + var listOffsetsResponse interface{} + if apiVersion >= 4 { + types := makeTypes(reflect.TypeOf(&ListOffsetsResponseV4{}).Elem()) + mt = types[0] + listOffsetsResponse = &ListOffsetsResponseV4{} + } else if apiVersion >= 2 { + types := makeTypes(reflect.TypeOf(&ListOffsetsResponseV2{}).Elem()) + mt = types[0] + listOffsetsResponse = &ListOffsetsResponseV2{} + } else if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&ListOffsetsResponseV1{}).Elem()) + mt = types[0] + listOffsetsResponse = &ListOffsetsResponseV1{} + } else { + types := makeTypes(reflect.TypeOf(&ListOffsetsResponseV0{}).Elem()) + mt = types[0] + listOffsetsResponse = &ListOffsetsResponseV0{} + } + mt.(messageType).decode(d, valueOf(listOffsetsResponse)) + reqResPair.Response.Payload = listOffsetsResponse + case CreateTopics: + var mt interface{} + var createTopicsResponse interface{} + if apiVersion >= 7 { + types := makeTypes(reflect.TypeOf(&CreateTopicsResponseV0{}).Elem()) + mt = types[0] + createTopicsResponse = &CreateTopicsResponseV0{} + } else if apiVersion >= 5 { + types := makeTypes(reflect.TypeOf(&CreateTopicsResponseV5{}).Elem()) + mt = types[0] + createTopicsResponse = &CreateTopicsResponseV5{} + } else if apiVersion >= 2 { + types := makeTypes(reflect.TypeOf(&CreateTopicsResponseV2{}).Elem()) + mt = types[0] + createTopicsResponse = &CreateTopicsResponseV2{} + } else if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&CreateTopicsResponseV1{}).Elem()) + mt = types[0] + createTopicsResponse = &CreateTopicsResponseV1{} + } else { + types := makeTypes(reflect.TypeOf(&CreateTopicsResponseV0{}).Elem()) + mt = types[0] + createTopicsResponse = &CreateTopicsResponseV0{} + } + mt.(messageType).decode(d, valueOf(createTopicsResponse)) + reqResPair.Response.Payload = createTopicsResponse + break + case DeleteTopics: + var mt interface{} + var deleteTopicsResponse interface{} + if apiVersion >= 6 { + types := makeTypes(reflect.TypeOf(&DeleteTopicsReponseV6{}).Elem()) + mt = types[0] + deleteTopicsResponse = &DeleteTopicsReponseV6{} + } else if apiVersion >= 5 { + types := makeTypes(reflect.TypeOf(&DeleteTopicsReponseV5{}).Elem()) + mt = types[0] + deleteTopicsResponse = &DeleteTopicsReponseV5{} + } else if apiVersion >= 1 { + types := makeTypes(reflect.TypeOf(&DeleteTopicsReponseV1{}).Elem()) + mt = types[0] + deleteTopicsResponse = &DeleteTopicsReponseV1{} + } else { + types := makeTypes(reflect.TypeOf(&DeleteTopicsReponseV0{}).Elem()) + mt = types[0] + deleteTopicsResponse = &DeleteTopicsReponseV0{} + } + mt.(messageType).decode(d, valueOf(deleteTopicsResponse)) + reqResPair.Response.Payload = deleteTopicsResponse + default: + return fmt.Errorf("(Response) Not implemented: %s", apiKey) + } + + connectionInfo := &api.ConnectionInfo{ + ClientIP: tcpID.SrcIP, + ClientPort: tcpID.SrcPort, + ServerIP: tcpID.DstIP, + ServerPort: tcpID.DstPort, + IsOutgoing: true, + } + + item := &api.OutputChannelItem{ + Protocol: _protocol, + Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + ConnectionInfo: connectionInfo, + Pair: &api.RequestResponsePair{ + Request: api.GenericMessage{ + IsRequest: true, + CaptureTime: time.Now(), + Payload: KafkaPayload{ + Data: &KafkaWrapper{ + Method: apiNames[apiKey], + Url: "", + Details: reqResPair.Request, + }, + }, + }, + Response: api.GenericMessage{ + IsRequest: false, + CaptureTime: time.Now(), + Payload: KafkaPayload{ + Data: &KafkaWrapper{ + Method: apiNames[apiKey], + Url: "", + Details: reqResPair.Response, + }, + }, + }, + }, + } + emitter.Emit(item) + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + err = fmt.Errorf("unsupported api key: %d", i) + return err + } + + t := &apiTypes[apiKey] + if t == nil { + err = fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + return err + } + + d.discardAll() + + return nil +} + +func WriteResponse(w io.Writer, apiVersion int16, correlationID int32, msg Message) error { + apiKey := msg.ApiKey() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + return fmt.Errorf("unsupported api key: %d", i) + } + + t := &apiTypes[apiKey] + if t == nil { + return fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + } + + r := &t.responses[apiVersion-minVersion] + v := valueOf(msg) + b := newPageBuffer() + defer b.unref() + + e := &encoder{writer: b} + e.writeInt32(0) // placeholder for the response size + e.writeInt32(correlationID) + r.encode(e, v) + err := e.err + + if err == nil { + size := packUint32(uint32(b.Size()) - 4) + b.WriteAt(size[:], 0) + _, err = b.WriteTo(w) + } + + return err +} diff --git a/tap/extensions/kafka/structs.go b/tap/extensions/kafka/structs.go new file mode 100644 index 000000000..d9aa5c1cb --- /dev/null +++ b/tap/extensions/kafka/structs.go @@ -0,0 +1,1000 @@ +package main + +import ( + "time" +) + +type RequiredAcks int16 + +const ( + RequireNone RequiredAcks = 0 + RequireOne RequiredAcks = 1 + RequireAll RequiredAcks = -1 +) + +func (acks RequiredAcks) String() string { + switch acks { + case RequireNone: + return "none" + case RequireOne: + return "one" + case RequireAll: + return "all" + default: + return "unknown" + } +} + +type UUID struct { + TimeLow int32 + TimeMid int16 + TimeHiAndVersion int16 + ClockSeq int16 + NodePart1 int32 + NodePart22 int16 +} + +// Metadata Request (Version: 0) + +type MetadataRequestTopicV0 struct { + Name string +} + +type MetadataRequestV0 struct { + Topics []MetadataRequestTopicV0 +} + +// Metadata Request (Version: 4) + +type MetadataRequestV4 struct { + Topics []MetadataRequestTopicV0 + AllowAutoTopicCreation bool +} + +// Metadata Request (Version: 8) + +type MetadataRequestV8 struct { + Topics []MetadataRequestTopicV0 + AllowAutoTopicCreation bool + IncludeClusterAuthorizedOperations bool + IncludeTopicAuthorizedOperations bool +} + +// Metadata Request (Version: 10) + +type MetadataRequestTopicV10 struct { + Name string + UUID UUID +} + +type MetadataRequestV10 struct { + Topics []MetadataRequestTopicV10 + AllowAutoTopicCreation bool + IncludeClusterAuthorizedOperations bool + IncludeTopicAuthorizedOperations bool +} + +// Metadata Request (Version: 11) + +type MetadataRequestV11 struct { + Topics []MetadataRequestTopicV10 + AllowAutoTopicCreation bool + IncludeTopicAuthorizedOperations bool +} + +// Metadata Response (Version: 0) + +type BrokerV0 struct { + NodeId int32 + Host string + Port int32 +} + +type PartitionsV0 struct { + ErrorCode int16 + PartitionIndex int32 + LeaderId int32 + ReplicaNodes int32 + IsrNodes int32 +} + +type TopicV0 struct { + ErrorCode int16 + Name string + Partitions []PartitionsV0 +} + +type MetadataResponseV0 struct { + Brokers []BrokerV0 + Topics []TopicV0 +} + +// Metadata Response (Version: 1) + +type BrokerV1 struct { + NodeId int32 + Host string + Port int32 + Rack string +} + +type TopicV1 struct { + ErrorCode int16 + Name string + IsInternal bool + Partitions []PartitionsV0 +} + +type MetadataResponseV1 struct { + Brokers []BrokerV1 + ControllerID int32 + Topics []TopicV1 +} + +// Metadata Response (Version: 2) + +type MetadataResponseV2 struct { + Brokers []BrokerV1 + ClusterID string + ControllerID int32 + Topics []TopicV1 +} + +// Metadata Response (Version: 3) + +type MetadataResponseV3 struct { + ThrottleTimeMs int32 + Brokers []BrokerV1 + ClusterID string + ControllerID int32 + Topics []TopicV1 +} + +// Metadata Response (Version: 5) + +type PartitionsV5 struct { + ErrorCode int16 + PartitionIndex int32 + LeaderId int32 + ReplicaNodes int32 + IsrNodes int32 + OfflineReplicas int32 +} + +type TopicV5 struct { + ErrorCode int16 + Name string + IsInternal bool + Partitions []PartitionsV5 +} + +type MetadataResponseV5 struct { + ThrottleTimeMs int32 + Brokers []BrokerV1 + ClusterID string + ControllerID int32 + Topics []TopicV5 +} + +// Metadata Response (Version: 7) + +type PartitionsV7 struct { + ErrorCode int16 + PartitionIndex int32 + LeaderId int32 + LeaderEpoch int32 + ReplicaNodes int32 + IsrNodes int32 + OfflineReplicas int32 +} + +type TopicV7 struct { + ErrorCode int16 + Name string + IsInternal bool + Partitions []PartitionsV7 +} + +type MetadataResponseV7 struct { + ThrottleTimeMs int32 + Brokers []BrokerV1 + ClusterID string + ControllerID int32 + Topics []TopicV7 +} + +// Metadata Response (Version: 8) + +type TopicV8 struct { + ErrorCode int16 + Name string + IsInternal bool + Partitions []PartitionsV7 + TopicAuthorizedOperations int32 +} + +type MetadataResponseV8 struct { + ThrottleTimeMs int32 + Brokers []BrokerV1 + ClusterID string + ControllerID int32 + Topics []TopicV8 + ClusterAuthorizedOperations int32 +} + +// Metadata Response (Version: 10) + +type TopicV10 struct { + ErrorCode int16 + Name string + TopicID UUID + IsInternal bool + Partitions []PartitionsV7 + TopicAuthorizedOperations int32 +} + +type MetadataResponseV10 struct { + ThrottleTimeMs int32 + Brokers []BrokerV1 + ClusterID string + ControllerID int32 + Topics []TopicV10 + ClusterAuthorizedOperations int32 +} + +// Metadata Response (Version: 11) + +type MetadataResponseV11 struct { + ThrottleTimeMs int32 + Brokers []BrokerV1 + ClusterID string + ControllerID int32 + Topics []TopicV10 +} + +// ApiVersions Request (Version: 0) + +type ApiVersionsRequestV0 struct{} + +// ApiVersions Request (Version: 3) + +type ApiVersionsRequestV3 struct { + ClientSoftwareName string + ClientSoftwareVersion string +} + +// ApiVersions Response (Version: 0) + +type ApiVersionsResponseApiKey struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +type ApiVersionsResponseV0 struct { + ErrorCode int16 + ApiKeys []ApiVersionsResponseApiKey +} + +// ApiVersions Response (Version: 1) + +type ApiVersionsResponseV1 struct { + ErrorCode int16 + ApiKeys []ApiVersionsResponseApiKey // FIXME: `confluent-kafka-python` causes memory leak + ThrottleTimeMs int32 +} + +// Produce Request (Version: 0) + +// Message is a kafka message type +type MessageV0 struct { + Codec int8 // codec used to compress the message contents + CompressionLevel int // compression level + LogAppendTime bool // the used timestamp is LogAppendTime + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) + + compressedSize int // used for computing the compression ratio metrics +} + +// MessageBlock represents a part of request with message +type MessageBlock struct { + Offset int64 + Msg *MessageV0 +} + +// MessageSet is a replacement for RecordBatch in older versions +type MessageSet struct { + PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + OverflowMessage bool // whether the set on the wire contained an overflow message + Messages []*MessageBlock +} + +type RecordHeader struct { + HeaderKeyLength int8 + HeaderKey string + HeaderValueLength int8 + Value string +} + +// Record is kafka record type +type RecordV0 struct { + Unknown int8 + Attributes int8 + TimestampDelta int8 + OffsetDelta int8 + KeyLength int8 + Key string + ValueLen int8 + Value string + Headers []RecordHeader +} + +// RecordBatch are records from one kafka request +type RecordBatch struct { + BaseOffset int64 + BatchLength int32 + PartitionLeaderEpoch int32 + Magic int8 + Crc int32 + Attributes int16 + LastOffsetDelta int32 + FirstTimestamp int64 + MaxTimestamp int64 + ProducerId int64 + ProducerEpoch int16 + BaseSequence int32 + Record []RecordV0 +} + +type Records struct { + RecordBatch RecordBatch + // TODO: Implement `MessageSet` + // MessageSet MessageSet +} + +type PartitionData struct { + Index int32 + Unknown int32 + Records Records +} + +type Partitions struct { + Length int32 + PartitionData PartitionData +} + +type TopicData struct { + Topic string + Partitions Partitions +} + +type ProduceRequestV0 struct { + RequiredAcks RequiredAcks + Timeout int32 + TopicData []TopicData +} + +// Produce Request (Version: 3) + +type ProduceRequestV3 struct { + TransactionalID string + RequiredAcks RequiredAcks + Timeout int32 + TopicData []TopicData +} + +// Produce Response (Version: 0) + +type PartitionResponseV0 struct { + Index int32 + ErrorCode int16 + BaseOffset int64 +} + +type ResponseV0 struct { + Name string + PartitionResponses []PartitionResponseV0 +} + +type ProduceResponseV0 struct { + Responses []ResponseV0 +} + +// Produce Response (Version: 1) + +type ProduceResponseV1 struct { + Responses []ResponseV0 + ThrottleTimeMs int32 +} + +// Produce Response (Version: 2) + +type PartitionResponseV2 struct { + Index int32 + ErrorCode int16 + BaseOffset int64 + LogAppendTimeMs int64 +} + +type ResponseV2 struct { + Name string + PartitionResponses []PartitionResponseV2 +} + +type ProduceResponseV2 struct { + Responses []ResponseV2 + ThrottleTimeMs int32 +} + +// Produce Response (Version: 5) + +type PartitionResponseV5 struct { + Index int32 + ErrorCode int16 + BaseOffset int64 + LogAppendTimeMs int64 + LogStartOffset int64 +} + +type ResponseV5 struct { + Name string + PartitionResponses []PartitionResponseV5 +} + +type ProduceResponseV5 struct { + Responses []ResponseV5 + ThrottleTimeMs int32 +} + +// Produce Response (Version: 8) + +type RecordErrors struct { + BatchIndex int32 + BatchIndexErrorMessage string +} + +type PartitionResponseV8 struct { + Index int32 + ErrorCode int16 + BaseOffset int64 + LogAppendTimeMs int64 + LogStartOffset int64 + RecordErrors RecordErrors + ErrorMessage string +} + +type ResponseV8 struct { + Name string + PartitionResponses []PartitionResponseV8 +} + +type ProduceResponseV8 struct { + Responses []ResponseV8 + ThrottleTimeMs int32 +} + +// Fetch Request (Version: 0) + +type FetchPartitionV0 struct { + Partition int32 + FetchOffset int64 + PartitionMaxBytes int32 +} + +type FetchTopicV0 struct { + Topic string + Partitions []FetchPartitionV0 +} + +type FetchRequestV0 struct { + ReplicaId int32 + MaxWaitMs int32 + MinBytes int32 + Topics []FetchTopicV0 +} + +// Fetch Request (Version: 3) + +type FetchRequestV3 struct { + ReplicaId int32 + MaxWaitMs int32 + MinBytes int32 + MaxBytes int32 + Topics []FetchTopicV0 +} + +// Fetch Request (Version: 4) + +type FetchRequestV4 struct { + ReplicaId int32 + MaxWaitMs int32 + MinBytes int32 + MaxBytes int32 + IsolationLevel int8 + Topics []FetchTopicV0 +} + +// Fetch Request (Version: 5) + +type FetchPartitionV5 struct { + Partition int32 + FetchOffset int64 + LogStartOffset int64 + PartitionMaxBytes int32 +} + +type FetchTopicV5 struct { + Topic string + Partitions []FetchPartitionV5 +} + +type FetchRequestV5 struct { + ReplicaId int32 + MaxWaitMs int32 + MinBytes int32 + MaxBytes int32 + IsolationLevel int8 + Topics []FetchTopicV5 +} + +// Fetch Request (Version: 7) + +type ForgottenTopicsDataV7 struct { + Topic string + Partitions []int32 +} + +type FetchRequestV7 struct { + ReplicaId int32 + MaxWaitMs int32 + MinBytes int32 + MaxBytes int32 + IsolationLevel int8 + SessionId int32 + SessionEpoch int32 + Topics []FetchTopicV5 + ForgottenTopicsData ForgottenTopicsDataV7 +} + +// Fetch Request (Version: 9) + +type FetchPartitionV9 struct { + Partition int32 + CurrentLeaderEpoch int32 + FetchOffset int64 + LogStartOffset int64 + PartitionMaxBytes int32 +} + +type FetchTopicV9 struct { + Topic string + Partitions []FetchPartitionV9 +} + +type FetchRequestV9 struct { + ReplicaId int32 + MaxWaitMs int32 + MinBytes int32 + MaxBytes int32 + IsolationLevel int8 + SessionId int32 + SessionEpoch int32 + Topics []FetchTopicV9 + ForgottenTopicsData ForgottenTopicsDataV7 +} + +// Fetch Request (Version: 11) + +type FetchRequestV11 struct { + ReplicaId int32 + MaxWaitMs int32 + MinBytes int32 + MaxBytes int32 + IsolationLevel int8 + SessionId int32 + SessionEpoch int32 + Topics []FetchTopicV9 + ForgottenTopicsData ForgottenTopicsDataV7 + RackId string +} + +// Fetch Response (Version: 0) + +type PartitionResponseFetchV0 struct { + Partition int32 + ErrorCode int16 + HighWatermark int64 + RecordSet Records +} + +type ResponseFetchV0 struct { + Topic string + PartitionResponses []PartitionResponseFetchV0 +} + +type FetchResponseV0 struct { + Responses []ResponseFetchV0 +} + +// Fetch Response (Version: 1) + +type FetchResponseV1 struct { + ThrottleTimeMs int32 + Responses []ResponseFetchV0 +} + +// Fetch Response (Version: 4) + +type AbortedTransactionsV4 struct { + ProducerId int32 + FirstOffset int32 +} + +type PartitionResponseFetchV4 struct { + Partition int32 + ErrorCode int16 + HighWatermark int64 + LastStableOffset int64 + AbortedTransactions AbortedTransactionsV4 + RecordSet Records +} + +type ResponseFetchV4 struct { + Topic string + PartitionResponses []PartitionResponseFetchV4 +} + +type FetchResponseV4 struct { + ThrottleTimeMs int32 + Responses []ResponseFetchV4 +} + +// Fetch Response (Version: 5) + +type PartitionResponseFetchV5 struct { + Partition int32 + ErrorCode int16 + HighWatermark int64 + LastStableOffset int64 + LogStartOffset int64 + AbortedTransactions AbortedTransactionsV4 + RecordSet Records +} + +type ResponseFetchV5 struct { + Topic string + PartitionResponses []PartitionResponseFetchV5 +} + +type FetchResponseV5 struct { + ThrottleTimeMs int32 + Responses []ResponseFetchV5 +} + +// Fetch Response (Version: 7) + +type FetchResponseV7 struct { + ThrottleTimeMs int32 + ErrorCode int16 + SessionId int32 + Responses []ResponseFetchV5 +} + +// Fetch Response (Version: 11) + +type PartitionResponseFetchV11 struct { + Partition int32 + ErrorCode int16 + HighWatermark int64 + LastStableOffset int64 + LogStartOffset int64 + AbortedTransactions AbortedTransactionsV4 + PreferredReadReplica int32 + RecordSet Records +} + +type ResponseFetchV11 struct { + Topic string + PartitionResponses []PartitionResponseFetchV11 +} + +type FetchResponseV11 struct { + ThrottleTimeMs int32 + ErrorCode int16 + SessionId int32 + Responses []ResponseFetchV5 +} + +// ListOffsets Request (Version: 0) + +type ListOffsetsRequestPartitionV0 struct { + PartitionIndex int32 + Timestamp int64 + MaxNumOffsets int32 +} + +type ListOffsetsRequestTopicV0 struct { + Name string + Partitions []ListOffsetsRequestPartitionV0 +} + +type ListOffsetsRequestV0 struct { + ReplicaId int32 + Topics []ListOffsetsRequestTopicV0 +} + +// ListOffsets Request (Version: 1) + +type ListOffsetsRequestPartitionV1 struct { + PartitionIndex int32 + Timestamp int64 +} + +type ListOffsetsRequestTopicV1 struct { + Name string + Partitions []ListOffsetsRequestPartitionV1 +} + +type ListOffsetsRequestV1 struct { + ReplicaId int32 + Topics []ListOffsetsRequestTopicV1 +} + +// ListOffsets Request (Version: 2) + +type ListOffsetsRequestV2 struct { + ReplicaId int32 + IsolationLevel int8 + Topics []ListOffsetsRequestTopicV1 +} + +// ListOffsets Request (Version: 4) + +type ListOffsetsRequestPartitionV4 struct { + PartitionIndex int32 + CurrentLeaderEpoch int32 + Timestamp int64 +} + +type ListOffsetsRequestTopicV4 struct { + Name string + Partitions []ListOffsetsRequestPartitionV4 +} + +type ListOffsetsRequestV4 struct { + ReplicaId int32 + Topics []ListOffsetsRequestTopicV4 +} + +// ListOffsets Response (Version: 0) + +type ListOffsetsResponsePartitionV0 struct { + PartitionIndex int32 + ErrorCode int16 + OldStyleOffsets int64 +} + +type ListOffsetsResponseTopicV0 struct { + Name string + Partitions []ListOffsetsResponsePartitionV0 +} + +type ListOffsetsResponseV0 struct { + Topics []ListOffsetsResponseTopicV0 +} + +// ListOffsets Response (Version: 1) + +type ListOffsetsResponsePartitionV1 struct { + PartitionIndex int32 + ErrorCode int16 + Timestamp int64 + Offset int64 +} + +type ListOffsetsResponseTopicV1 struct { + Name string + Partitions []ListOffsetsResponsePartitionV1 +} + +type ListOffsetsResponseV1 struct { + Topics []ListOffsetsResponseTopicV1 +} + +// ListOffsets Response (Version: 2) + +type ListOffsetsResponseV2 struct { + ThrottleTimeMs int32 + Topics []ListOffsetsResponseTopicV1 +} + +// ListOffsets Response (Version: 4) + +type ListOffsetsResponsePartitionV4 struct { + PartitionIndex int32 + ErrorCode int16 + Timestamp int64 + Offset int64 + LeaderEpoch int32 +} + +type ListOffsetsResponseTopicV4 struct { + Name string + Partitions []ListOffsetsResponsePartitionV4 +} + +type ListOffsetsResponseV4 struct { + Topics []ListOffsetsResponseTopicV4 +} + +// CreateTopics Request (Version: 0) + +type AssignmentsV0 struct { + PartitionIndex int32 + BrokerIds []int32 +} + +type CreateTopicsRequestConfigsV0 struct { + Name string + Value string +} + +type CreateTopicsRequestTopicV0 struct { + Name string + NumPartitions int32 + ReplicationFactor int16 + Assignments []AssignmentsV0 + Configs []CreateTopicsRequestConfigsV0 +} + +type CreateTopicsRequestV0 struct { + Topics []CreateTopicsRequestTopicV0 + TimeoutMs int32 +} + +// CreateTopics Request (Version: 1) + +type CreateTopicsRequestV1 struct { + Topics []CreateTopicsRequestTopicV0 + TimeoutMs int32 + ValidateOnly bool +} + +// CreateTopics Response (Version: 0) + +type CreateTopicsResponseTopicV0 struct { + Name string + ErrorCode int16 +} + +type CreateTopicsResponseV0 struct { + Topics []CreateTopicsResponseTopicV0 +} + +// CreateTopics Response (Version: 1) + +type CreateTopicsResponseTopicV1 struct { + Name string + ErrorCode int16 + ErrorMessage string +} + +type CreateTopicsResponseV1 struct { + Topics []CreateTopicsResponseTopicV1 +} + +// CreateTopics Response (Version: 2) + +type CreateTopicsResponseV2 struct { + ThrottleTimeMs int32 + Topics []CreateTopicsResponseTopicV1 +} + +// CreateTopics Response (Version: 5) + +type CreateTopicsResponseConfigsV5 struct { + Name string + Value string + ReadOnly bool + ConfigSource int8 + IsSensitive bool +} + +type CreateTopicsResponseTopicV5 struct { + Name string + ErrorCode int16 + ErrorMessage string + NumPartitions int32 + ReplicationFactor int16 + Configs []CreateTopicsResponseConfigsV5 +} + +type CreateTopicsResponseV5 struct { + ThrottleTimeMs int32 + Topics []CreateTopicsResponseTopicV5 +} + +// CreateTopics Response (Version: 7) + +type CreateTopicsResponseTopicV7 struct { + Name string + TopicID UUID + ErrorCode int16 + ErrorMessage string + NumPartitions int32 + ReplicationFactor int16 + Configs []CreateTopicsResponseConfigsV5 +} + +type CreateTopicsResponseV7 struct { + ThrottleTimeMs int32 + Topics []CreateTopicsResponseTopicV7 +} + +// DeleteTopics Request (Version: 0) + +type DeleteTopicsRequestV0 struct { + TopicNames []string + TimeoutMs int32 +} + +// DeleteTopics Request (Version: 6) + +type DeleteTopicsRequestTopicV6 struct { + Name string + UUID UUID +} + +type DeleteTopicsRequestV6 struct { + Topics []DeleteTopicsRequestTopicV6 + TimeoutMs int32 +} + +// DeleteTopics Response (Version: 0) + +type DeleteTopicsReponseResponseV0 struct { + Name string + ErrorCode int16 +} + +type DeleteTopicsReponseV0 struct { + Responses []DeleteTopicsReponseResponseV0 +} + +// DeleteTopics Response (Version: 1) + +type DeleteTopicsReponseV1 struct { + ThrottleTimeMs int32 + Responses []DeleteTopicsReponseResponseV0 +} + +// DeleteTopics Response (Version: 5) + +type DeleteTopicsReponseResponseV5 struct { + Name string + ErrorCode int16 + ErrorMessage string +} + +type DeleteTopicsReponseV5 struct { + ThrottleTimeMs int32 + Responses []DeleteTopicsReponseResponseV5 +} + +// DeleteTopics Response (Version: 6) + +type DeleteTopicsReponseResponseV6 struct { + Name string + TopicID UUID + ErrorCode int16 + ErrorMessage string +} + +type DeleteTopicsReponseV6 struct { + ThrottleTimeMs int32 + Responses []DeleteTopicsReponseResponseV6 +} diff --git a/tap/go.mod b/tap/go.mod index 48057e605..87ce42cf3 100644 --- a/tap/go.mod +++ b/tap/go.mod @@ -3,10 +3,15 @@ module github.com/up9inc/mizu/tap go 1.16 require ( + github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4 // indirect github.com/google/gopacket v1.1.19 - github.com/google/martian v2.1.0+incompatible - github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231 github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7 - golang.org/x/net v0.0.0-20210421230115-4e50805a0758 - github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4 + github.com/up9inc/mizu/tap/api v0.0.0 + golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 + golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d + golang.org/x/text v0.3.5 + golang.org/x/tools v0.0.0-20210106214847-113979e3529a ) + +replace github.com/up9inc/mizu/tap/api v0.0.0 => ./api diff --git a/tap/go.sum b/tap/go.sum index c7e7a2c54..83a0c8f74 100644 --- a/tap/go.sum +++ b/tap/go.sum @@ -2,30 +2,44 @@ github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4 h1:NJOOlc6ZJjix github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4/go.mod h1:DQPxZS994Ld1Y8uwnJT+dRL04XPD0cElP/pHH/zEBHM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231 h1:fa50YL1pzKW+1SsBnJDOHppJN9stOEwS+CRWyUtyYGU= -github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7 h1:jkvpcEatpwuMF5O5LVxTnehj6YZ/aEZN4NWD/Xml4pI= github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7/go.mod h1:KTrHyWpO1sevuXPZwyeZc72ddWRFqNSKDFl7uVWKpg0= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758 h1:aEpZnXcAmXkd6AvLb2OPt+EN1Zu/8Ne3pCqPjja5PXY= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe h1:WdX7u8s3yOigWAhHEaDl8r9G+4XwFQEQFtBMYyN+kXQ= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tap/har_writer.go b/tap/har_writer.go deleted file mode 100644 index a20dbfb99..000000000 --- a/tap/har_writer.go +++ /dev/null @@ -1,274 +0,0 @@ -package tap - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/google/martian/har" -) - -const readPermission = 0644 -const harFilenameSuffix = ".har" -const tempFilenameSuffix = ".har.tmp" - -type PairChanItem struct { - Request *http.Request - RequestTime time.Time - Response *http.Response - ResponseTime time.Time - RequestSenderIp string - ConnectionInfo *ConnectionInfo -} - -func openNewHarFile(filename string) *HarFile { - file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, readPermission) - if err != nil { - log.Panicf("Failed to open output file: %s (%v,%+v)", err, err, err) - } - - harFile := HarFile{file: file, entryCount: 0} - harFile.writeHeader() - - return &harFile -} - -type HarFile struct { - file *os.File - entryCount int -} - -func NewEntry(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time) (*har.Entry, error) { - harRequest, err := har.NewRequest(request, false) - if err != nil { - SilentError("convert-request-to-har", "Failed converting request to HAR %s (%v,%+v)", err, err, err) - return nil, errors.New("Failed converting request to HAR") - } - - // For requests with multipart/form-data or application/x-www-form-urlencoded Content-Type, - // martian/har will parse the request body and place the parameters in harRequest.PostData.Params - // instead of harRequest.PostData.Text (as the HAR spec requires it). - // Mizu currently only looks at PostData.Text. Therefore, instead of letting martian/har set the content of - // PostData, always copy the request body to PostData.Text. - if (request.ContentLength > 0) { - reqBody, err := ioutil.ReadAll(request.Body) - if err != nil { - SilentError("read-request-body", "Failed converting request to HAR %s (%v,%+v)", err, err, err) - return nil, errors.New("Failed reading request body") - } - request.Body = ioutil.NopCloser(bytes.NewReader(reqBody)) - harRequest.PostData.Text = string(reqBody) - } - - harResponse, err := har.NewResponse(response, true) - if err != nil { - SilentError("convert-response-to-har", "Failed converting response to HAR %s (%v,%+v)", err, err, err) - return nil, errors.New("Failed converting response to HAR") - } - - if harRequest.PostData != nil && strings.HasPrefix(harRequest.PostData.MimeType, "application/grpc") { - // Force HTTP/2 gRPC into HAR template - - harRequest.URL = fmt.Sprintf("%s://%s%s", request.Header.Get(":scheme"), request.Header.Get(":authority"), request.Header.Get(":path")) - - status, err := strconv.Atoi(response.Header.Get(":status")) - if err != nil { - SilentError("convert-response-status-for-har", "Failed converting status to int %s (%v,%+v)", err, err, err) - return nil, errors.New("Failed converting response status to int for HAR") - } - harResponse.Status = status - } else { - // Martian copies http.Request.URL.String() to har.Request.URL, which usually contains the path. - // However, according to the HAR spec, the URL field needs to be the absolute URL. - var scheme string - if request.URL.Scheme != "" { - scheme = request.URL.Scheme - } else { - scheme = "http" - } - harRequest.URL = fmt.Sprintf("%s://%s%s", scheme, request.Host, request.URL) - } - - totalTime := responseTime.Sub(requestTime).Round(time.Millisecond).Milliseconds() - if totalTime < 1 { - totalTime = 1 - } - - harEntry := har.Entry{ - StartedDateTime: time.Now().UTC(), - Time: totalTime, - Request: harRequest, - Response: harResponse, - Cache: &har.Cache{}, - Timings: &har.Timings{ - Send: -1, - Wait: -1, - Receive: totalTime, - }, - } - - return &harEntry, nil -} - -func (f *HarFile) WriteEntry(harEntry *har.Entry) { - harEntryJson, err := json.Marshal(harEntry) - if err != nil { - SilentError("har-entry-marshal", "Failed converting har entry object to JSON%s (%v,%+v)", err, err, err) - return - } - - var separator string - if f.GetEntryCount() > 0 { - separator = "," - } else { - separator = "" - } - - harEntryString := append([]byte(separator), harEntryJson...) - - if _, err := f.file.Write(harEntryString); err != nil { - log.Panicf("Failed to write to output file: %s (%v,%+v)", err, err, err) - } - - f.entryCount++ -} - -func (f *HarFile) GetEntryCount() int { - return f.entryCount -} - -func (f *HarFile) Close() { - f.writeTrailer() - - err := f.file.Close() - if err != nil { - log.Panicf("Failed to close output file: %s (%v,%+v)", err, err, err) - } -} - -func (f *HarFile) writeHeader() { - header := []byte(`{"log": {"version": "1.2", "creator": {"name": "Mizu", "version": "0.0.1"}, "entries": [`) - if _, err := f.file.Write(header); err != nil { - log.Panicf("Failed to write header to output file: %s (%v,%+v)", err, err, err) - } -} - -func (f *HarFile) writeTrailer() { - trailer := []byte("]}}") - if _, err := f.file.Write(trailer); err != nil { - log.Panicf("Failed to write trailer to output file: %s (%v,%+v)", err, err, err) - } -} - -func NewHarWriter(outputDir string, maxEntries int) *HarWriter { - return &HarWriter{ - OutputDirPath: outputDir, - MaxEntries: maxEntries, - PairChan: make(chan *PairChanItem), - OutChan: make(chan *OutputChannelItem, 1000), - currentFile: nil, - done: make(chan bool), - } -} - -type OutputChannelItem struct { - HarEntry *har.Entry - ConnectionInfo *ConnectionInfo - ValidationRulesChecker string -} - -type HarWriter struct { - OutputDirPath string - MaxEntries int - PairChan chan *PairChanItem - OutChan chan *OutputChannelItem - currentFile *HarFile - done chan bool -} - -func (hw *HarWriter) WritePair(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time, connectionInfo *ConnectionInfo) { - hw.PairChan <- &PairChanItem{ - Request: request, - RequestTime: requestTime, - Response: response, - ResponseTime: responseTime, - ConnectionInfo: connectionInfo, - } -} - -func (hw *HarWriter) Start() { - if hw.OutputDirPath != "" { - if err := os.MkdirAll(hw.OutputDirPath, os.ModePerm); err != nil { - log.Panicf("Failed to create output directory: %s (%v,%+v)", err, err, err) - } - } - - go func() { - for pair := range hw.PairChan { - harEntry, err := NewEntry(pair.Request, pair.RequestTime, pair.Response, pair.ResponseTime) - if err != nil { - continue - } - - if hw.OutputDirPath != "" { - if hw.currentFile == nil { - hw.openNewFile() - } - - hw.currentFile.WriteEntry(harEntry) - - if hw.currentFile.GetEntryCount() >= hw.MaxEntries { - hw.closeFile() - } - } else { - hw.OutChan <- &OutputChannelItem{ - HarEntry: harEntry, - ConnectionInfo: pair.ConnectionInfo, - } - } - } - - if hw.currentFile != nil { - hw.closeFile() - } - hw.done <- true - }() -} - -func (hw *HarWriter) Stop() { - close(hw.PairChan) - <-hw.done - close(hw.OutChan) -} - -func (hw *HarWriter) openNewFile() { - filename := buildFilename(hw.OutputDirPath, time.Now(), tempFilenameSuffix) - hw.currentFile = openNewHarFile(filename) -} - -func (hw *HarWriter) closeFile() { - hw.currentFile.Close() - tmpFilename := hw.currentFile.file.Name() - hw.currentFile = nil - - filename := buildFilename(hw.OutputDirPath, time.Now(), harFilenameSuffix) - err := os.Rename(tmpFilename, filename) - if err != nil { - SilentError("Rename-file", "cannot rename file: %s (%v,%+v)", err, err, err) - } -} - -func buildFilename(dir string, t time.Time, suffix string) string { - // (epoch time in nanoseconds)__(YYYY_Month_DD__hh-mm-ss).har - filename := fmt.Sprintf("%d__%s%s", t.UnixNano(), t.Format("2006_Jan_02__15-04-05"), suffix) - return filepath.Join(dir, filename) -} diff --git a/tap/http_matcher.go b/tap/http_matcher.go deleted file mode 100644 index 5832cf3b1..000000000 --- a/tap/http_matcher.go +++ /dev/null @@ -1,122 +0,0 @@ -package tap - -import ( - "fmt" - "net/http" - "strings" - "time" - - "github.com/orcaman/concurrent-map" -) - -type requestResponsePair struct { - Request httpMessage `json:"request"` - Response httpMessage `json:"response"` -} - -type httpMessage struct { - isRequest bool - captureTime time.Time - orig interface{} -} - - -// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port} -type requestResponseMatcher struct { - openMessagesMap cmap.ConcurrentMap - -} - -func createResponseRequestMatcher() requestResponseMatcher { - newMatcher := &requestResponseMatcher{openMessagesMap: cmap.New()} - return *newMatcher -} - -func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time) *requestResponsePair { - split := splitIdent(ident) - key := genKey(split) - - requestHTTPMessage := httpMessage{ - isRequest: true, - captureTime: captureTime, - orig: request, - } - - if response, found := matcher.openMessagesMap.Pop(key); found { - // Type assertion always succeeds because all of the map's values are of httpMessage type - responseHTTPMessage := response.(*httpMessage) - if responseHTTPMessage.isRequest { - SilentError("Request-Duplicate", "Got duplicate request with same identifier") - return nil - } - Trace("Matched open Response for %s", key) - return matcher.preparePair(&requestHTTPMessage, responseHTTPMessage) - } - - matcher.openMessagesMap.Set(key, &requestHTTPMessage) - Trace("Registered open Request for %s", key) - return nil -} - -func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time) *requestResponsePair { - split := splitIdent(ident) - key := genKey(split) - - responseHTTPMessage := httpMessage{ - isRequest: false, - captureTime: captureTime, - orig: response, - } - - if request, found := matcher.openMessagesMap.Pop(key); found { - // Type assertion always succeeds because all of the map's values are of httpMessage type - requestHTTPMessage := request.(*httpMessage) - if !requestHTTPMessage.isRequest { - SilentError("Response-Duplicate", "Got duplicate response with same identifier") - return nil - } - Trace("Matched open Request for %s", key) - return matcher.preparePair(requestHTTPMessage, &responseHTTPMessage) - } - - matcher.openMessagesMap.Set(key, &responseHTTPMessage) - Trace("Registered open Response for %s", key) - return nil -} - -func (matcher *requestResponseMatcher) preparePair(requestHTTPMessage *httpMessage, responseHTTPMessage *httpMessage) *requestResponsePair { - return &requestResponsePair{ - Request: *requestHTTPMessage, - Response: *responseHTTPMessage, - } -} - -func splitIdent(ident string) []string { - ident = strings.Replace(ident, "->", " ", -1) - return strings.Split(ident, " ") -} - -func genKey(split []string) string { - key := fmt.Sprintf("%s:%s->%s:%s,%s", split[0], split[2], split[1], split[3], split[4]) - return key -} - -func (matcher *requestResponseMatcher) deleteOlderThan(t time.Time) int { - keysToPop := make([]string, 0) - for item := range matcher.openMessagesMap.IterBuffered() { - // Map only contains values of type httpMessage - message, _ := item.Val.(*httpMessage) - - if message.captureTime.Before(t) { - keysToPop = append(keysToPop, item.Key) - } - } - - numDeleted := len(keysToPop) - - for _, key := range keysToPop { - _, _ = matcher.openMessagesMap.Pop(key) - } - - return numDeleted -} diff --git a/tap/http_reader.go b/tap/http_reader.go deleted file mode 100644 index be6652e97..000000000 --- a/tap/http_reader.go +++ /dev/null @@ -1,305 +0,0 @@ -package tap - -import ( - "bufio" - "bytes" - "encoding/hex" - "fmt" - "github.com/bradleyfalzon/tlsx" - "io" - "io/ioutil" - "net/http" - "strconv" - "sync" - "time" -) - -const checkTLSPacketAmount = 100 - -type httpReaderDataMsg struct { - bytes []byte - timestamp time.Time -} - -type tcpID struct { - srcIP string - dstIP string - srcPort string - dstPort string -} - -type ConnectionInfo struct { - ClientIP string - ClientPort string - ServerIP string - ServerPort string - IsOutgoing bool -} - -func (tid *tcpID) String() string { - return fmt.Sprintf("%s->%s %s->%s", tid.srcIP, tid.dstIP, tid.srcPort, tid.dstPort) -} - -/* httpReader gets reads from a channel of bytes of tcp payload, and parses it into HTTP/1 requests and responses. - * The payload is written to the channel by a tcpStream object that is dedicated to one tcp connection. - * An httpReader object is unidirectional: it parses either a client stream or a server stream. - * Implements io.Reader interface (Read) - */ -type httpReader struct { - ident string - tcpID tcpID - isClient bool - isHTTP2 bool - isOutgoing bool - msgQueue chan httpReaderDataMsg // Channel of captured reassembled tcp payload - data []byte - captureTime time.Time - hexdump bool - parent *tcpStream - grpcAssembler GrpcAssembler - messageCount uint - harWriter *HarWriter - packetsSeen uint - outboundLinkWriter *OutboundLinkWriter -} - -func (h *httpReader) Read(p []byte) (int, error) { - var msg httpReaderDataMsg - - ok := true - for ok && len(h.data) == 0 { - msg, ok = <-h.msgQueue - h.data = msg.bytes - - h.captureTime = msg.timestamp - if len(h.data) > 0 { - h.packetsSeen += 1 - } - if h.packetsSeen < checkTLSPacketAmount && len(msg.bytes) > 5 { // packets with less than 5 bytes cause tlsx to panic - clientHello := tlsx.ClientHello{} - err := clientHello.Unmarshall(msg.bytes) - if err == nil { - statsTracker.incTlsConnectionsCount() - Debug("Detected TLS client hello with SNI %s\n", clientHello.SNI) - numericPort, _ := strconv.Atoi(h.tcpID.dstPort) - h.outboundLinkWriter.WriteOutboundLink(h.tcpID.srcIP, h.tcpID.dstIP, numericPort, clientHello.SNI, TLSProtocol) - } - } - } - if !ok || len(h.data) == 0 { - return 0, io.EOF - } - - l := copy(p, h.data) - h.data = h.data[l:] - return l, nil -} - -func (h *httpReader) run(wg *sync.WaitGroup) { - defer wg.Done() - b := bufio.NewReader(h) - - if isHTTP2, err := checkIsHTTP2Connection(b, h.isClient); err != nil { - SilentError("HTTP/2-Prepare-Connection", "stream %s Failed to check if client is HTTP/2: %s (%v,%+v)", h.ident, err, err, err) - // Do something? - } else { - h.isHTTP2 = isHTTP2 - } - - if h.isHTTP2 { - err := prepareHTTP2Connection(b, h.isClient) - if err != nil { - SilentError("HTTP/2-Prepare-Connection-After-Check", "stream %s error: %s (%v,%+v)", h.ident, err, err, err) - } - h.grpcAssembler = createGrpcAssembler(b) - } - - for true { - if h.isHTTP2 { - err := h.handleHTTP2Stream() - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } else if err != nil { - SilentError("HTTP/2", "stream %s error: %s (%v,%+v)", h.ident, err, err, err) - continue - } - } else if h.isClient { - err := h.handleHTTP1ClientStream(b) - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } else if err != nil { - SilentError("HTTP-request", "stream %s Request error: %s (%v,%+v)", h.ident, err, err, err) - continue - } - } else { - err := h.handleHTTP1ServerStream(b) - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } else if err != nil { - SilentError("HTTP-response", "stream %s Response error: %s (%v,%+v)", h.ident, err, err, err) - continue - } - } - } -} - -func (h *httpReader) handleHTTP2Stream() error { - streamID, messageHTTP1, err := h.grpcAssembler.readMessage() - h.messageCount++ - if err != nil { - return err - } - - var reqResPair *requestResponsePair - var connectionInfo *ConnectionInfo - - switch messageHTTP1 := messageHTTP1.(type) { - case http.Request: - ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, streamID) - connectionInfo = &ConnectionInfo{ - ClientIP: h.tcpID.srcIP, - ClientPort: h.tcpID.srcPort, - ServerIP: h.tcpID.dstIP, - ServerPort: h.tcpID.dstPort, - IsOutgoing: h.isOutgoing, - } - reqResPair = reqResMatcher.registerRequest(ident, &messageHTTP1, h.captureTime) - case http.Response: - ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, streamID) - connectionInfo = &ConnectionInfo{ - ClientIP: h.tcpID.dstIP, - ClientPort: h.tcpID.dstPort, - ServerIP: h.tcpID.srcIP, - ServerPort: h.tcpID.srcPort, - IsOutgoing: h.isOutgoing, - } - reqResPair = reqResMatcher.registerResponse(ident, &messageHTTP1, h.captureTime) - } - - if reqResPair != nil { - statsTracker.incMatchedPairs() - - if h.harWriter != nil { - h.harWriter.WritePair( - reqResPair.Request.orig.(*http.Request), - reqResPair.Request.captureTime, - reqResPair.Response.orig.(*http.Response), - reqResPair.Response.captureTime, - connectionInfo, - ) - } - } - - return nil -} - -func (h *httpReader) handleHTTP1ClientStream(b *bufio.Reader) error { - req, err := http.ReadRequest(b) - h.messageCount++ - if err != nil { - return err - } - body, err := ioutil.ReadAll(req.Body) - req.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind - s := len(body) - if err != nil { - SilentError("HTTP-request-body", "stream %s Got body err: %s", h.ident, err) - } else if h.hexdump { - Debug("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body)) - } - if err := req.Body.Close(); err != nil { - SilentError("HTTP-request-body-close", "stream %s Failed to close request body: %s", h.ident, err) - } - encoding := req.Header["Content-Encoding"] - Debug("HTTP/1 Request: %s %s %s (Body:%d) -> %s", h.ident, req.Method, req.URL, s, encoding) - - ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, h.messageCount) - reqResPair := reqResMatcher.registerRequest(ident, req, h.captureTime) - if reqResPair != nil { - statsTracker.incMatchedPairs() - - if h.harWriter != nil { - h.harWriter.WritePair( - reqResPair.Request.orig.(*http.Request), - reqResPair.Request.captureTime, - reqResPair.Response.orig.(*http.Response), - reqResPair.Response.captureTime, - &ConnectionInfo{ - ClientIP: h.tcpID.srcIP, - ClientPort: h.tcpID.srcPort, - ServerIP: h.tcpID.dstIP, - ServerPort: h.tcpID.dstPort, - IsOutgoing: h.isOutgoing, - }, - ) - } - } - - h.parent.Lock() - h.parent.urls = append(h.parent.urls, req.URL.String()) - h.parent.Unlock() - - return nil -} - -func (h *httpReader) handleHTTP1ServerStream(b *bufio.Reader) error { - res, err := http.ReadResponse(b, nil) - h.messageCount++ - var req string - h.parent.Lock() - if len(h.parent.urls) == 0 { - req = fmt.Sprintf("") - } else { - req, h.parent.urls = h.parent.urls[0], h.parent.urls[1:] - } - h.parent.Unlock() - if err != nil { - return err - } - body, err := ioutil.ReadAll(res.Body) - res.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind - s := len(body) - if err != nil { - SilentError("HTTP-response-body", "HTTP/%s: failed to get body(parsed len:%d): %s", h.ident, s, err) - } - if h.hexdump { - Debug("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body)) - } - if err := res.Body.Close(); err != nil { - SilentError("HTTP-response-body-close", "HTTP/%s: failed to close body(parsed len:%d): %s", h.ident, s, err) - } - sym := "," - if res.ContentLength > 0 && res.ContentLength != int64(s) { - sym = "!=" - } - contentType, ok := res.Header["Content-Type"] - if !ok { - contentType = []string{http.DetectContentType(body)} - } - encoding := res.Header["Content-Encoding"] - Debug("HTTP/1 Response: %s %s URL:%s (%d%s%d%s) -> %s", h.ident, res.Status, req, res.ContentLength, sym, s, contentType, encoding) - - ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, h.messageCount) - reqResPair := reqResMatcher.registerResponse(ident, res, h.captureTime) - if reqResPair != nil { - statsTracker.incMatchedPairs() - - if h.harWriter != nil { - h.harWriter.WritePair( - reqResPair.Request.orig.(*http.Request), - reqResPair.Request.captureTime, - reqResPair.Response.orig.(*http.Response), - reqResPair.Response.captureTime, - &ConnectionInfo{ - ClientIP: h.tcpID.dstIP, - ClientPort: h.tcpID.dstPort, - ServerIP: h.tcpID.srcIP, - ServerPort: h.tcpID.srcPort, - IsOutgoing: h.isOutgoing, - }, - ) - } - } - - return nil -} diff --git a/tap/passive_tapper.go b/tap/passive_tapper.go index cc0db564c..58695ed70 100644 --- a/tap/passive_tapper.go +++ b/tap/passive_tapper.go @@ -18,7 +18,6 @@ import ( "os/signal" "runtime" "runtime/pprof" - "strconv" "strings" "sync" "time" @@ -31,28 +30,13 @@ import ( "github.com/google/gopacket/layers" // pulls in all layers decoders "github.com/google/gopacket/pcap" "github.com/google/gopacket/reassembly" + "github.com/up9inc/mizu/tap/api" ) -const AppPortsEnvVar = "APP_PORTS" -const maxHTTP2DataLenEnvVar = "HTTP2_DATA_SIZE_LIMIT" -const maxHTTP2DataLenDefault = 1 * 1024 * 1024 // 1MB const cleanPeriod = time.Second * 10 var remoteOnlyOutboundPorts = []int{80, 443} -func parseAppPorts(appPortsList string) []int { - ports := make([]int, 0) - for _, portStr := range strings.Split(appPortsList, ",") { - parsedInt, parseError := strconv.Atoi(portStr) - if parseError != nil { - log.Printf("Provided app port %v is not a valid number!", portStr) - } else { - ports = append(ports, parsedInt) - } - } - return ports -} - var maxcount = flag.Int64("c", -1, "Only grab this many packets, then exit") var decoder = flag.String("decoder", "", "Name of the decoder to use (default: guess from capture)") var statsevery = flag.Int("stats", 60, "Output statistics every N seconds") @@ -65,13 +49,6 @@ var allowmissinginit = flag.Bool("allowmissinginit", true, "Support streams with var verbose = flag.Bool("verbose", false, "Be verbose") var debug = flag.Bool("debug", false, "Display debug information") var quiet = flag.Bool("quiet", false, "Be quiet regarding errors") - -// http -var nohttp = flag.Bool("nohttp", false, "Disable HTTP parsing") -var output = flag.String("output", "", "Path to create file for HTTP 200 OK responses") -var writeincomplete = flag.Bool("writeincomplete", false, "Write incomplete response") - -var hexdump = flag.Bool("dump", false, "Dump HTTP request/response as hex") // global var hexdumppkt = flag.Bool("dumppkt", false, "Dump packet as hex") // capture @@ -80,16 +57,10 @@ var fname = flag.String("r", "", "Filename to read from, overrides -i") var snaplen = flag.Int("s", 65536, "Snap length (number of bytes max to read per packet") var tstype = flag.String("timestamp_type", "", "Type of timestamps to use") var promisc = flag.Bool("promisc", true, "Set promiscuous mode") -var anydirection = flag.Bool("anydirection", false, "Capture http requests to other hosts") var staleTimeoutSeconds = flag.Int("staletimout", 120, "Max time in seconds to keep connections which don't transmit data") var memprofile = flag.String("memprofile", "", "Write memory profile") -// output -var HarOutputDir = flag.String("hardir", "", "Directory in which to store output har files") -var harEntriesPerFile = flag.Int("harentriesperfile", 200, "Number of max number of har entries to store in each file") - -var reqResMatcher = createResponseRequestMatcher() // global var statsTracker = StatsTracker{} // global @@ -119,8 +90,9 @@ var outputLevel int var errorsMap map[string]uint var errorsMapMutex sync.Mutex var nErrors uint -var ownIps []string // global -var hostMode bool // global +var ownIps []string // global +var hostMode bool // global +var extensions []*api.Extension // global /* minOutputLevel: Error will be printed only if outputLevel is above this value * t: key for errorsMap (counting errors) @@ -184,15 +156,15 @@ func (c *Context) GetCaptureInfo() gopacket.CaptureInfo { return c.CaptureInfo } -func StartPassiveTapper(opts *TapOpts) (<-chan *OutputChannelItem, <-chan *OutboundLink) { +func StartPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem, extensionsRef []*api.Extension) { hostMode = opts.HostMode + extensions = extensionsRef - harWriter := NewHarWriter(*HarOutputDir, *harEntriesPerFile) - outboundLinkWriter := NewOutboundLinkWriter() + if GetMemoryProfilingEnabled() { + startMemoryProfiler() + } - go startPassiveTapper(harWriter, outboundLinkWriter) - - return harWriter.OutChan, outboundLinkWriter.OutChan + go startPassiveTapper(outputItems) } func startMemoryProfiler() { @@ -226,7 +198,7 @@ func startMemoryProfiler() { }() } -func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWriter) { +func startPassiveTapper(outputItems chan *api.OutputChannelItem) { log.SetFlags(log.LstdFlags | log.LUTC | log.Lshortfile) defer util.Run()() @@ -248,31 +220,6 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr ownIps = localhostIPs } - appPortsStr := os.Getenv(AppPortsEnvVar) - var appPorts []int - if appPortsStr == "" { - rlog.Info("Received empty/no APP_PORTS env var! only listening to http on port 80!") - appPorts = make([]int, 0) - } else { - appPorts = parseAppPorts(appPortsStr) - } - SetFilterPorts(appPorts) - envVal := os.Getenv(maxHTTP2DataLenEnvVar) - if envVal == "" { - rlog.Infof("Received empty/no HTTP2_DATA_SIZE_LIMIT env var! falling back to %v", maxHTTP2DataLenDefault) - maxHTTP2DataLen = maxHTTP2DataLenDefault - } else { - if convertedInt, err := strconv.Atoi(envVal); err != nil { - rlog.Infof("Received invalid HTTP2_DATA_SIZE_LIMIT env var! falling back to %v", maxHTTP2DataLenDefault) - maxHTTP2DataLen = maxHTTP2DataLenDefault - } else { - rlog.Infof("Received HTTP2_DATA_SIZE_LIMIT env var: %v", maxHTTP2DataLenDefault) - maxHTTP2DataLen = convertedInt - } - } - - log.Printf("App Ports: %v", gSettings.filterPorts) - var handle *pcap.Handle var err error if *fname != "" { @@ -315,10 +262,6 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr } } - harWriter.Start() - defer harWriter.Stop() - defer outboundLinkWriter.Stop() - var dec gopacket.Decoder var ok bool decoderName := *decoder @@ -335,10 +278,12 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr statsTracker.setStartTime(time.Now()) defragger := ip4defrag.NewIPv4Defragmenter() + var emitter api.Emitter = &api.Emitting{ + OutputChannel: outputItems, + } + streamFactory := &tcpStreamFactory{ - doHTTP: !*nohttp, - harWriter: harWriter, - outbountLinkWriter: outboundLinkWriter, + Emitter: emitter, } streamPool := reassembly.NewStreamPool(streamFactory) assembler := reassembly.NewAssembler(streamPool) @@ -358,7 +303,6 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr cleaner := Cleaner{ assembler: assembler, assemblerMutex: &assemblerMutex, - matcher: &reqResMatcher, cleanPeriod: cleanPeriod, connectionTimeout: staleConnectionTimeout, } @@ -387,10 +331,9 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr memStats := runtime.MemStats{} runtime.ReadMemStats(&memStats) log.Printf( - "mem: %d, goroutines: %d, unmatched messages: %d", + "mem: %d, goroutines: %d", memStats.HeapAlloc, runtime.NumGoroutine(), - reqResMatcher.openMessagesMap.Count(), ) // Since the last print diff --git a/tap/settings.go b/tap/settings.go index 7c8636239..96f12b2db 100644 --- a/tap/settings.go +++ b/tap/settings.go @@ -14,25 +14,13 @@ const ( ) type globalSettings struct { - filterPorts []int filterAuthorities []string } var gSettings = &globalSettings{ - filterPorts: []int{}, filterAuthorities: []string{}, } -func SetFilterPorts(ports []int) { - gSettings.filterPorts = ports -} - -func GetFilterPorts() []int { - ports := make([]int, len(gSettings.filterPorts)) - copy(ports, gSettings.filterPorts) - return ports -} - func SetFilterAuthorities(ipAddresses []string) { gSettings.filterAuthorities = ipAddresses } diff --git a/tap/tcp_reader.go b/tap/tcp_reader.go new file mode 100644 index 000000000..3e276adbb --- /dev/null +++ b/tap/tcp_reader.go @@ -0,0 +1,103 @@ +package tap + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/bradleyfalzon/tlsx" + "github.com/romana/rlog" + "github.com/up9inc/mizu/tap/api" +) + +const checkTLSPacketAmount = 100 + +type tcpReaderDataMsg struct { + bytes []byte + timestamp time.Time +} + +type tcpID struct { + srcIP string + dstIP string + srcPort string + dstPort string +} + +type ConnectionInfo struct { + ClientIP string + ClientPort string + ServerIP string + ServerPort string + IsOutgoing bool +} + +func (tid *tcpID) String() string { + return fmt.Sprintf("%s->%s %s->%s", tid.srcIP, tid.dstIP, tid.srcPort, tid.dstPort) +} + +/* tcpReader gets reads from a channel of bytes of tcp payload, and parses it into requests and responses. + * The payload is written to the channel by a tcpStream object that is dedicated to one tcp connection. + * An tcpReader object is unidirectional: it parses either a client stream or a server stream. + * Implements io.Reader interface (Read) + */ +type tcpReader struct { + ident string + tcpID *api.TcpID + isClient bool + isOutgoing bool + msgQueue chan tcpReaderDataMsg // Channel of captured reassembled tcp payload + data []byte + captureTime time.Time + parent *tcpStream + messageCount uint + packetsSeen uint + outboundLinkWriter *OutboundLinkWriter + extension *api.Extension + emitter api.Emitter + counterPair *api.CounterPair +} + +func (h *tcpReader) Read(p []byte) (int, error) { + var msg tcpReaderDataMsg + + ok := true + for ok && len(h.data) == 0 { + msg, ok = <-h.msgQueue + h.data = msg.bytes + + h.captureTime = msg.timestamp + if len(h.data) > 0 { + h.packetsSeen += 1 + } + if h.packetsSeen < checkTLSPacketAmount && len(msg.bytes) > 5 { // packets with less than 5 bytes cause tlsx to panic + clientHello := tlsx.ClientHello{} + err := clientHello.Unmarshall(msg.bytes) + if err == nil { + rlog.Debugf("Detected TLS client hello with SNI %s\n", clientHello.SNI) + // TODO: Throws `panic: runtime error: invalid memory address or nil pointer dereference` error. + // numericPort, _ := strconv.Atoi(h.tcpID.DstPort) + // h.outboundLinkWriter.WriteOutboundLink(h.tcpID.SrcIP, h.tcpID.DstIP, numericPort, clientHello.SNI, TLSProtocol) + } + } + } + if !ok || len(h.data) == 0 { + return 0, io.EOF + } + + l := copy(p, h.data) + h.data = h.data[l:] + return l, nil +} + +func (h *tcpReader) run(wg *sync.WaitGroup) { + defer wg.Done() + b := bufio.NewReader(h) + err := h.extension.Dissector.Dissect(b, h.isClient, h.tcpID, h.counterPair, h.emitter) + if err != nil { + io.Copy(ioutil.Discard, b) + } +} diff --git a/tap/tcp_stream.go b/tap/tcp_stream.go index b886567bd..55ab74558 100644 --- a/tap/tcp_stream.go +++ b/tap/tcp_stream.go @@ -2,7 +2,6 @@ package tap import ( "encoding/binary" - "encoding/hex" "fmt" "sync" @@ -14,7 +13,7 @@ import ( /* It's a connection (bidirectional) * Implements gopacket.reassembly.Stream interface (Accept, ReassembledSG, ReassemblyComplete) * ReassembledSG gets called when new reassembled data is ready (i.e. bytes in order, no duplicates, complete) - * In our implementation, we pass information from ReassembledSG to the httpReader through a shared channel. + * In our implementation, we pass information from ReassembledSG to the tcpReader through a shared channel. */ type tcpStream struct { tcpstate *reassembly.TCPSimpleFSM @@ -22,10 +21,9 @@ type tcpStream struct { optchecker reassembly.TCPOptionCheck net, transport gopacket.Flow isDNS bool - isHTTP bool - reversed bool - client httpReader - server httpReader + isTapTarget bool + clients []tcpReader + servers []tcpReader urls []string ident string sync.Mutex @@ -141,18 +139,19 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass if len(data) > 2+int(dnsSize) { sg.KeepFrom(2 + int(dnsSize)) } - } else if t.isHTTP { + } else if t.isTapTarget { if length > 0 { - if *hexdump { - Trace("Feeding http with:%s", hex.Dump(data)) - } // This is where we pass the reassembled information onwards - // This channel is read by an httpReader object + // This channel is read by an tcpReader object statsTracker.incReassembledTcpPayloadsCount() - if dir == reassembly.TCPDirClientToServer && !t.reversed { - t.client.msgQueue <- httpReaderDataMsg{data, ac.GetCaptureInfo().Timestamp} + if dir == reassembly.TCPDirClientToServer { + for _, reader := range t.clients { + reader.msgQueue <- tcpReaderDataMsg{data, ac.GetCaptureInfo().Timestamp} + } } else { - t.server.msgQueue <- httpReaderDataMsg{data, ac.GetCaptureInfo().Timestamp} + for _, reader := range t.servers { + reader.msgQueue <- tcpReaderDataMsg{data, ac.GetCaptureInfo().Timestamp} + } } } } @@ -160,9 +159,13 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass func (t *tcpStream) ReassemblyComplete(ac reassembly.AssemblerContext) bool { Trace("%s: Connection closed", t.ident) - if t.isHTTP { - close(t.client.msgQueue) - close(t.server.msgQueue) + if t.isTapTarget { + for _, reader := range t.clients { + close(reader.msgQueue) + } + for _, reader := range t.servers { + close(reader.msgQueue) + } } // do not remove the connection to allow last ACK return false diff --git a/tap/tcp_stream_factory.go b/tap/tcp_stream_factory.go index c03600584..900adf842 100644 --- a/tap/tcp_stream_factory.go +++ b/tap/tcp_stream_factory.go @@ -2,9 +2,11 @@ package tap import ( "fmt" - "github.com/romana/rlog" "sync" + "github.com/romana/rlog" + "github.com/up9inc/mizu/tap/api" + "github.com/google/gopacket" "github.com/google/gopacket/layers" // pulls in all layers decoders "github.com/google/gopacket/reassembly" @@ -17,9 +19,8 @@ import ( */ type tcpStreamFactory struct { wg sync.WaitGroup - doHTTP bool - harWriter *HarWriter - outbountLinkWriter *OutboundLinkWriter + outboundLinkWriter *OutboundLinkWriter + Emitter api.Emitter } func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.TCP, ac reassembly.AssemblerContext) reassembly.Stream { @@ -27,62 +28,70 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T fsmOptions := reassembly.TCPSimpleFSMOptions{ SupportMissingEstablishment: *allowmissinginit, } - rlog.Debugf("Current App Ports: %v", gSettings.filterPorts) srcIp := net.Src().String() dstIp := net.Dst().String() - dstPort := int(tcp.DstPort) + srcPort := transport.Src().String() + dstPort := transport.Dst().String() - if factory.shouldNotifyOnOutboundLink(dstIp, dstPort) { - factory.outbountLinkWriter.WriteOutboundLink(net.Src().String(), dstIp, dstPort, "", "") - } - props := factory.getStreamProps(srcIp, dstIp, dstPort) - isHTTP := props.isTapTarget + // if factory.shouldNotifyOnOutboundLink(dstIp, dstPort) { + // factory.outboundLinkWriter.WriteOutboundLink(net.Src().String(), dstIp, dstPort, "", "") + // } + props := factory.getStreamProps(srcIp, srcPort, dstIp, dstPort) + isTapTarget := props.isTapTarget stream := &tcpStream{ - net: net, - transport: transport, - isDNS: tcp.SrcPort == 53 || tcp.DstPort == 53, - isHTTP: isHTTP && factory.doHTTP, - reversed: tcp.SrcPort == 80, - tcpstate: reassembly.NewTCPSimpleFSM(fsmOptions), - ident: fmt.Sprintf("%s:%s", net, transport), - optchecker: reassembly.NewTCPOptionCheck(), + net: net, + transport: transport, + isDNS: tcp.SrcPort == 53 || tcp.DstPort == 53, + isTapTarget: isTapTarget, + tcpstate: reassembly.NewTCPSimpleFSM(fsmOptions), + ident: fmt.Sprintf("%s:%s", net, transport), + optchecker: reassembly.NewTCPOptionCheck(), } - if stream.isHTTP { - stream.client = httpReader{ - msgQueue: make(chan httpReaderDataMsg), - ident: fmt.Sprintf("%s %s", net, transport), - tcpID: tcpID{ - srcIP: net.Src().String(), - dstIP: net.Dst().String(), - srcPort: transport.Src().String(), - dstPort: transport.Dst().String(), - }, - hexdump: *hexdump, - parent: stream, - isClient: true, - isOutgoing: props.isOutgoing, - harWriter: factory.harWriter, - outboundLinkWriter: factory.outbountLinkWriter, + if stream.isTapTarget { + for i, extension := range extensions { + counterPair := &api.CounterPair{ + Request: 0, + Response: 0, + } + stream.clients = append(stream.clients, tcpReader{ + msgQueue: make(chan tcpReaderDataMsg), + ident: fmt.Sprintf("%s %s", net, transport), + tcpID: &api.TcpID{ + SrcIP: srcIp, + DstIP: dstIp, + SrcPort: srcPort, + DstPort: dstPort, + }, + parent: stream, + isClient: true, + isOutgoing: props.isOutgoing, + outboundLinkWriter: factory.outboundLinkWriter, + extension: extension, + emitter: factory.Emitter, + counterPair: counterPair, + }) + stream.servers = append(stream.servers, tcpReader{ + msgQueue: make(chan tcpReaderDataMsg), + ident: fmt.Sprintf("%s %s", net, transport), + tcpID: &api.TcpID{ + SrcIP: net.Dst().String(), + DstIP: net.Src().String(), + SrcPort: transport.Dst().String(), + DstPort: transport.Src().String(), + }, + parent: stream, + isClient: false, + isOutgoing: props.isOutgoing, + outboundLinkWriter: factory.outboundLinkWriter, + extension: extension, + emitter: factory.Emitter, + counterPair: counterPair, + }) + factory.wg.Add(2) + // Start reading from channel stream.reader.bytes + go stream.clients[i].run(&factory.wg) + go stream.servers[i].run(&factory.wg) } - stream.server = httpReader{ - msgQueue: make(chan httpReaderDataMsg), - ident: fmt.Sprintf("%s %s", net.Reverse(), transport.Reverse()), - tcpID: tcpID{ - srcIP: net.Dst().String(), - dstIP: net.Src().String(), - srcPort: transport.Dst().String(), - dstPort: transport.Src().String(), - }, - hexdump: *hexdump, - parent: stream, - isOutgoing: props.isOutgoing, - harWriter: factory.harWriter, - outboundLinkWriter: factory.outbountLinkWriter, - } - factory.wg.Add(2) - // Start reading from channels stream.client.bytes and stream.server.bytes - go stream.client.run(&factory.wg) - go stream.server.run(&factory.wg) } return stream } @@ -91,34 +100,24 @@ func (factory *tcpStreamFactory) WaitGoRoutines() { factory.wg.Wait() } -func (factory *tcpStreamFactory) getStreamProps(srcIP string, dstIP string, dstPort int) *streamProps { +func (factory *tcpStreamFactory) getStreamProps(srcIP string, srcPort string, dstIP string, dstPort string) *streamProps { if hostMode { - if inArrayString(gSettings.filterAuthorities, fmt.Sprintf("%s:%d", dstIP, dstPort)) == true { - rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ host1 %s:%d", dstIP, dstPort)) + if inArrayString(gSettings.filterAuthorities, fmt.Sprintf("%s:%s", dstIP, dstPort)) { + rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ host1 %s:%s", dstIP, dstPort)) return &streamProps{isTapTarget: true, isOutgoing: false} - } else if inArrayString(gSettings.filterAuthorities, dstIP) == true { + } else if inArrayString(gSettings.filterAuthorities, dstIP) { rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ host2 %s", dstIP)) return &streamProps{isTapTarget: true, isOutgoing: false} - } else if *anydirection && inArrayString(gSettings.filterAuthorities, srcIP) == true { - rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ host3 %s", srcIP)) + } else if inArrayString(gSettings.filterAuthorities, fmt.Sprintf("%s:%s", srcIP, srcPort)) { + rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ host3 %s:%s", srcIP, srcPort)) + return &streamProps{isTapTarget: true, isOutgoing: true} + } else if inArrayString(gSettings.filterAuthorities, srcIP) { + rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ host4 %s", srcIP)) return &streamProps{isTapTarget: true, isOutgoing: true} } - return &streamProps{isTapTarget: false} + return &streamProps{isTapTarget: false, isOutgoing: false} } else { - isTappedPort := dstPort == 80 || (gSettings.filterPorts != nil && (inArrayInt(gSettings.filterPorts, dstPort))) - if !isTappedPort { - rlog.Debugf("getStreamProps %s", fmt.Sprintf("- notHost1 %d", dstPort)) - return &streamProps{isTapTarget: false, isOutgoing: false} - } - - isOutgoing := !inArrayString(ownIps, dstIP) - - if !*anydirection && isOutgoing { - rlog.Debugf("getStreamProps %s", fmt.Sprintf("- notHost2")) - return &streamProps{isTapTarget: false, isOutgoing: isOutgoing} - } - - rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ notHost3 %s -> %s:%d", srcIP, dstIP, dstPort)) + rlog.Debugf("getStreamProps %s", fmt.Sprintf("+ notHost3 %s -> %s:%s", srcIP, dstIP, dstPort)) return &streamProps{isTapTarget: true} } } diff --git a/ui/.env.example b/ui/.env.example new file mode 100644 index 000000000..cebc16952 --- /dev/null +++ b/ui/.env.example @@ -0,0 +1,2 @@ +REACT_APP_OVERRIDE_WS_URL="ws://localhost:8899/ws" +REACT_APP_OVERRIDE_API_URL="http://localhost:8899/api/" diff --git a/ui/src/App.sass b/ui/src/App.sass index 7ef640155..0bc2e9505 100644 --- a/ui/src/App.sass +++ b/ui/src/App.sass @@ -1,4 +1,4 @@ -@import 'src/variables.module' +@import './variables.module' .mizuApp background-color: $main-background-color diff --git a/ui/src/App.tsx b/ui/src/App.tsx index 3ea758f8d..003fbc8fe 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -38,15 +38,14 @@ const App = () => { } })(); - // eslint-disable-next-line - }, []); + }); const onTLSDetected = (destAddress: string) => { addressesWithTLS.add(destAddress); setAddressesWithTLS(new Set(addressesWithTLS)); if (!userDismissedTLSWarning) { - setShowTLSWarning(true); + setShowTLSWarning(true); } }; diff --git a/ui/src/components/EntriesList.tsx b/ui/src/components/EntriesList.tsx index 31c2634f3..6b1ae69f9 100644 --- a/ui/src/components/EntriesList.tsx +++ b/ui/src/components/EntriesList.tsx @@ -7,11 +7,11 @@ import {StatusType} from "./Filters"; import Api from "../helpers/api"; import down from "./assets/downImg.svg"; -interface HarEntriesListProps { +interface EntriesListProps { entries: any[]; setEntries: (entries: any[]) => void; - focusedEntry: any; - setFocusedEntry: (entry: any) => void; + focusedEntryId: string; + setFocusedEntryId: (id: string) => void; connectionOpen: boolean; noMoreDataTop: boolean; setNoMoreDataTop: (flag: boolean) => void; @@ -32,12 +32,13 @@ enum FetchOperator { const api = new Api(); -export const EntriesList: React.FC = ({entries, setEntries, focusedEntry, setFocusedEntry, connectionOpen, noMoreDataTop, setNoMoreDataTop, noMoreDataBottom, setNoMoreDataBottom, methodsFilter, statusFilter, pathFilter, listEntryREF, onScrollEvent, scrollableList}) => { +export const EntriesList: React.FC = ({entries, setEntries, focusedEntryId, setFocusedEntryId, connectionOpen, noMoreDataTop, setNoMoreDataTop, noMoreDataBottom, setNoMoreDataBottom, methodsFilter, statusFilter, pathFilter, listEntryREF, onScrollEvent, scrollableList}) => { const [loadMoreTop, setLoadMoreTop] = useState(false); const [isLoadingTop, setIsLoadingTop] = useState(false); + const scrollableRef = useRef(null); - + useEffect(() => { const list = document.getElementById('list').firstElementChild; list.addEventListener('scroll', (e) => { @@ -111,16 +112,16 @@ export const EntriesList: React.FC = ({entries, setEntries, return <>
-
+
{isLoadingTop &&
spinner
} onScrollEvent(isAtBottom)}> {noMoreDataTop && !connectionOpen &&
No more data available
} {filteredEntries.map(entry => )} + entry={entry} + setFocusedEntryId={setFocusedEntryId} + isSelected={focusedEntryId === entry.id}/>)} {!connectionOpen && !noMoreDataBottom &&
getNewEntries()}>Fetch more entries
} diff --git a/ui/src/components/EntryDetailed.tsx b/ui/src/components/EntryDetailed.tsx new file mode 100644 index 000000000..f7040f35c --- /dev/null +++ b/ui/src/components/EntryDetailed.tsx @@ -0,0 +1,72 @@ +import React from "react"; +import EntryViewer from "./EntryDetailed/EntryViewer"; +import {makeStyles} from "@material-ui/core"; +import Protocol from "./UI/Protocol" +import StatusCode from "./UI/StatusCode"; +import {EndpointPath} from "./UI/EndpointPath"; + +const useStyles = makeStyles(() => ({ + entryTitle: { + display: 'flex', + minHeight: 20, + maxHeight: 46, + alignItems: 'center', + marginBottom: 4, + padding: 2, + paddingBottom: 0 + }, + entrySummary: { + display: 'flex', + minHeight: 36, + maxHeight: 46, + alignItems: 'center', + marginBottom: 4, + padding: 5, + paddingBottom: 0 + } +})); + +interface EntryDetailedProps { + entryData: any +} + +export const formatSize = (n: number) => n > 1000 ? `${Math.round(n / 1000)}KB` : `${n} B`; + +const EntryTitle: React.FC = ({protocol, data}) => { + const classes = useStyles(); + const {response} = JSON.parse(data.entry); + + + return
+ +
+ {response.payload &&
{formatSize(response.payload.bodySize)}
} +
{'rulesMatched' in data ? data.rulesMatched?.length : '0'} Rules Applied
+
+
; +}; + +const EntrySummary: React.FC = ({data}) => { + const classes = useStyles(); + + const {response, request} = JSON.parse(data.entry); + + return
+ {response?.payload && response.payload?.details && "status" in response.payload.details &&
+ +
} +
+ +
+
; +}; + +export const EntryDetailed: React.FC = ({entryData}) => { + return <> + + {entryData.data && } + <> + {entryData.data && } + + +}; diff --git a/ui/src/components/EntryDetailed/EntryDetailed.module.sass b/ui/src/components/EntryDetailed/EntryDetailed.module.sass deleted file mode 100644 index 2af3d6a54..000000000 --- a/ui/src/components/EntryDetailed/EntryDetailed.module.sass +++ /dev/null @@ -1,23 +0,0 @@ -@import "src/variables.module" - -.content - font-family: "Source Sans Pro", Lucida Grande, Tahoma, sans-serif - height: calc(100% - 56px) - overflow-y: auto - width: 100% - - .body - background: $main-background-color - color: $blue-gray - border-radius: 4px - padding: 10px - .bodyHeader - padding: 0 1rem - .endpointURL - font-size: .75rem - display: block - color: $blue-color - text-decoration: none - margin-bottom: .5rem - overflow-wrap: anywhere - padding: 5px 0 \ No newline at end of file diff --git a/ui/src/components/EntryDetailed/EntryDetailed.tsx b/ui/src/components/EntryDetailed/EntryDetailed.tsx deleted file mode 100644 index 0db1d1a6a..000000000 --- a/ui/src/components/EntryDetailed/EntryDetailed.tsx +++ /dev/null @@ -1,56 +0,0 @@ -import React from "react"; -import styles from './EntryDetailed.module.sass'; -import {makeStyles} from "@material-ui/core"; -import {EntryType} from "../EntryListItem/EntryListItem"; -import {RestEntryDetailsTitle} from "./Rest/RestEntryDetailsTitle"; -import {KafkaEntryDetailsTitle} from "./Kafka/KafkaEntryDetailsTitle"; -import {RestEntryDetailsContent} from "./Rest/RestEntryDetailsContent"; -import {KafkaEntryDetailsContent} from "./Kafka/KafkaEntryDetailsContent"; - -const useStyles = makeStyles(() => ({ - entryTitle: { - display: 'flex', - minHeight: 46, - maxHeight: 46, - alignItems: 'center', - marginBottom: 8, - padding: 5, - paddingBottom: 0 - } -})); - -interface EntryDetailedProps { - entryData: any; - classes?: any; - entryType: string; -} - -export const EntryDetailed: React.FC = ({classes, entryData, entryType}) => { - const classesTitle = useStyles(); - - let title, content; - - switch (entryType) { - case EntryType.Rest: - title = ; - content = ; - break; - case EntryType.Kafka: - title = ; - content = ; - break; - default: - title = ; - content = ; - break; - } - - return <> -
{title}
-
-
- {content} -
-
- -}; \ No newline at end of file diff --git a/ui/src/components/EntryDetailed/EntrySections.module.sass b/ui/src/components/EntryDetailed/EntrySections.module.sass index f6d73bc7c..f6f16c96f 100644 --- a/ui/src/components/EntryDetailed/EntrySections.module.sass +++ b/ui/src/components/EntryDetailed/EntrySections.module.sass @@ -1,4 +1,4 @@ -@import 'src/variables.module' +@import '../../variables.module' .title display: flex @@ -31,7 +31,6 @@ margin: .3rem 0 .dataKey - text-transform: capitalize color: $blue-gray margin: 0 0.5rem 0 0 text-align: right diff --git a/ui/src/components/EntryDetailed/EntrySections.tsx b/ui/src/components/EntryDetailed/EntrySections.tsx index c6546f0a4..d4968e046 100644 --- a/ui/src/components/EntryDetailed/EntrySections.tsx +++ b/ui/src/components/EntryDetailed/EntrySections.tsx @@ -1,17 +1,17 @@ import styles from "./EntrySections.module.sass"; import React, {useState} from "react"; -import {SyntaxHighlighter} from "../UI/SyntaxHighlighter"; +import {SyntaxHighlighter} from "../UI/SyntaxHighlighter/index"; import CollapsibleContainer from "../UI/CollapsibleContainer"; import FancyTextDisplay from "../UI/FancyTextDisplay"; import Checkbox from "../UI/Checkbox"; import ProtobufDecoder from "protobuf-decoder"; -interface ViewLineProps { +interface EntryViewLineProps { label: string; value: number | string; } -const ViewLine: React.FC = ({label, value}) => { +const EntryViewLine: React.FC = ({label, value}) => { return (label && value && {label} @@ -26,43 +26,52 @@ const ViewLine: React.FC = ({label, value}) => { ) || null; } -interface SectionCollapsibleTitleProps { - title: string; - isExpanded: boolean; + +interface EntrySectionCollapsibleTitleProps { + title: string, + color: string, + isExpanded: boolean, } -const SectionCollapsibleTitle: React.FC = ({title, isExpanded}) => { +const EntrySectionCollapsibleTitle: React.FC = ({title, color, isExpanded}) => { return
- + {isExpanded ? '-' : '+'} {title}
} -interface SectionContainerProps { - title: string; +interface EntrySectionContainerProps { + title: string, + color: string, } -export const SectionContainer: React.FC = ({title, children}) => { +export const EntrySectionContainer: React.FC = ({title, color, children}) => { const [expanded, setExpanded] = useState(true); return setExpanded(!expanded)} - title={} + title={} > {children} } -interface BodySectionProps { - content: any; - encoding?: string; - contentType?: string; +interface EntryBodySectionProps { + content: any, + color: string, + encoding?: string, + contentType?: string, } -export const BodySection: React.FC = ({content, encoding, contentType}) => { +export const EntryBodySection: React.FC = ({ + color, + content, + encoding, + contentType, +}) => { const MAXIMUM_BYTES_TO_HIGHLIGHT = 10000; // The maximum of chars to highlight in body, in case the response can be megabytes const supportedLanguages = [['html', 'html'], ['json', 'json'], ['application/grpc', 'json']]; // [[indicator, languageToUse],...] const jsonLikeFormats = ['json']; @@ -74,9 +83,9 @@ export const BodySection: React.FC = ({content, encoding, cont const bodyBuf = encoding === 'base64' ? atob(chunk) : chunk; try { - if (jsonLikeFormats.some(format => content?.mimeType?.indexOf(format) > -1)) { + if (jsonLikeFormats.some(format => contentType?.indexOf(format) > -1)) { return JSON.stringify(JSON.parse(bodyBuf), null, 2); - } else if (protobufFormats.some(format => content?.mimeType?.indexOf(format) > -1)) { + } else if (protobufFormats.some(format => contentType?.indexOf(format) > -1)) { // Replace all non printable characters (ASCII) const protobufDecoder = new ProtobufDecoder(bodyBuf, true); return JSON.stringify(protobufDecoder.decode().toSimple(), null, 2); @@ -88,18 +97,18 @@ export const BodySection: React.FC = ({content, encoding, cont } const getLanguage = (mimetype) => { - const chunk = content.text?.slice(0, 100); + const chunk = content?.slice(0, 100); if (chunk.indexOf('html') > 0 || chunk.indexOf('HTML') > 0) return supportedLanguages[0][1]; const language = supportedLanguages.find(el => (mimetype + contentType).indexOf(el[0]) > -1); return language ? language[1] : 'default'; } return - {content && content.text?.length > 0 && + {content && content?.length > 0 && - - + +
@@ -112,50 +121,54 @@ export const BodySection: React.FC = ({content, encoding, cont -
} + }
} -interface TableSectionProps { +interface EntrySectionProps { title: string, + color: string, arrayToIterate: any[], } -export const TableSection: React.FC = ({title, arrayToIterate}) => { +export const EntryTableSection: React.FC = ({title, color, arrayToIterate}) => { return { arrayToIterate && arrayToIterate.length > 0 ? - + - {arrayToIterate.map(({name, value}, index) => )}
-
: + : }
} -interface HAREntryPolicySectionProps { + + +interface EntryPolicySectionProps { service: string, title: string, + color: string, response: any, latency?: number, arrayToIterate: any[], } -interface HAREntryPolicySectionCollapsibleTitleProps { +interface EntryPolicySectionCollapsibleTitleProps { label: string; matched: string; isExpanded: boolean; } -const HAREntryPolicySectionCollapsibleTitle: React.FC = ({label, matched, isExpanded}) => { +const EntryPolicySectionCollapsibleTitle: React.FC = ({label, matched, isExpanded}) => { return
{isExpanded ? '-' : '+'} @@ -169,45 +182,76 @@ const HAREntryPolicySectionCollapsibleTitle: React.FC } -interface HAREntryPolicySectionContainerProps { +interface EntryPolicySectionContainerProps { label: string; matched: string; children?: any; } -export const HAREntryPolicySectionContainer: React.FC = ({label, matched, children}) => { +export const EntryPolicySectionContainer: React.FC = ({label, matched, children}) => { const [expanded, setExpanded] = useState(false); return setExpanded(!expanded)} - title={} + title={} > {children} } -export const HAREntryTablePolicySection: React.FC = ({service, title, response, latency, arrayToIterate}) => { +export const EntryTablePolicySection: React.FC = ({service, title, color, response, latency, arrayToIterate}) => { return - {arrayToIterate && arrayToIterate.length > 0 ? <> - - - - {arrayToIterate.map(({rule, matched}, index) => { - return (= latency : true)? "Success" : "Failure"}> - <> - {rule.Key && } - {rule.Latency > 0 ? : ''} - {rule.Method && } - {rule.Path && } - {rule.Service && } - {rule.Type && } - {rule.Value && } - - )})} - -
Key:{rule.Key}
Latency:{rule.Latency}
Method: {rule.Method}
Path: {rule.Path}
Service: {service}
Type: {rule.Type}
Value: {rule.Value}
-
- : No rules could be applied to this request.} + { + arrayToIterate && arrayToIterate.length > 0 ? + <> + + + + {arrayToIterate.map(({rule, matched}, index) => { + return ( + = latency : true)? "Success" : "Failure"}> + { + <> + { + rule.Key && + + } + { + rule.Latency && + + } + { + rule.Method && + + } + { + rule.Path && + + } + { + rule.Service && + + } + { + rule.Type && + + } + { + rule.Value && + + } + + } + + ) + } + ) + } + +
Key:{rule.Key}
Latency: {rule.Latency}
Method: {rule.Method}
Path: {rule.Path}
Service: {service}
Type: {rule.Type}
Value: {rule.Value}
+
+ : + }
-} \ No newline at end of file +} diff --git a/ui/src/components/EntryDetailed/EntryViewer.module.sass b/ui/src/components/EntryDetailed/EntryViewer.module.sass new file mode 100644 index 000000000..fd8c882da --- /dev/null +++ b/ui/src/components/EntryDetailed/EntryViewer.module.sass @@ -0,0 +1,60 @@ +@import "../../variables.module" + +.Entry + font-family: "Source Sans Pro", Lucida Grande, Tahoma, sans-serif + height: 100% + width: 100% + + h3, + h4 + font-family: "Source Sans Pro", Lucida Grande, Tahoma, sans-serif + + .header + background-color: rgb(55, 65, 111) + padding: 0.5rem .75rem .65rem .75rem + border-top-left-radius: 0.25rem + border-top-right-radius: 0.25rem + display: flex + font-size: .75rem + align-items: center + .description + min-width: 25rem + display: flex + align-items: center + justify-content: space-between + .method + padding: 0 .25rem + font-size: 0.75rem + font-weight: bold + border-radius: 0.25rem + border: 0.0625rem solid rgba(255, 255, 255, 0.16) + margin-right: .5rem + > span + margin-left: .5rem + .timing + border-left: 1px solid #627ef7 + margin-left: .3rem + padding-left: .3rem + + .headerClickable + cursor: pointer + &:hover + background: lighten(rgb(55, 65, 111), 10%) + border-top-left-radius: 0 + border-top-right-radius: 0 + + .body + background: $main-background-color + color: $blue-gray + border-radius: 4px + padding: 10px + .bodyHeader + padding: 0 1rem + .endpointURL + font-size: .75rem + display: block + color: $blue-color + text-decoration: none + margin-bottom: .5rem + overflow-wrap: anywhere + padding: 5px 0 diff --git a/ui/src/components/EntryDetailed/EntryViewer.tsx b/ui/src/components/EntryDetailed/EntryViewer.tsx new file mode 100644 index 000000000..aa1daae6f --- /dev/null +++ b/ui/src/components/EntryDetailed/EntryViewer.tsx @@ -0,0 +1,87 @@ +import React, {useState} from 'react'; +import styles from './EntryViewer.module.sass'; +import Tabs from "../UI/Tabs"; +import {EntryTableSection, EntryBodySection, EntryTablePolicySection} from "./EntrySections"; + +enum SectionTypes { + SectionTable = "table", + SectionBody = "body", +} + +const SectionsRepresentation: React.FC = ({data, color}) => { + const sections = [] + + if (data) { + for (const [i, row] of data.entries()) { + switch (row.type) { + case SectionTypes.SectionTable: + sections.push( + + ) + break; + case SectionTypes.SectionBody: + sections.push( + + ) + break; + default: + break; + } + } + } + + return <>{sections}; +} + +const AutoRepresentation: React.FC = ({representation, color}) => { + const rulesMatched = [] + const TABS = [ + { + tab: 'request' + }, + { + tab: 'response', + }, + { + tab: 'Rules', + }, + ]; + const [currentTab, setCurrentTab] = useState(TABS[0].tab); + + // Don't fail even if `representation` is an empty string + if (!representation) { + return <>; + } + + const {request, response} = JSON.parse(representation); + + return
+ {
+
+ + {request?.url && {request.payload.url}} +
+ {currentTab === TABS[0].tab && + + } + {currentTab === TABS[1].tab && + + } + {currentTab === TABS[2].tab && + {// FIXME: Fix here + } + } +
} +
; +} + +interface Props { + representation: any; + color: string, +} + +const EntryViewer: React.FC = ({representation, color}) => { + return +}; + +export default EntryViewer; diff --git a/ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsContent.tsx b/ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsContent.tsx deleted file mode 100644 index 7fe97954c..000000000 --- a/ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsContent.tsx +++ /dev/null @@ -1,6 +0,0 @@ -import React from "react"; - -export const KafkaEntryDetailsContent: React.FC = ({entryData}) => { - - return <>; -} diff --git a/ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsTitle.tsx b/ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsTitle.tsx deleted file mode 100644 index 4d1aeee2f..000000000 --- a/ui/src/components/EntryDetailed/Kafka/KafkaEntryDetailsTitle.tsx +++ /dev/null @@ -1,6 +0,0 @@ -import React from "react"; - -export const KafkaEntryDetailsTitle: React.FC = ({entryData}) => { - - return <> -} \ No newline at end of file diff --git a/ui/src/components/EntryDetailed/Rest/RestEntryDetailsContent.tsx b/ui/src/components/EntryDetailed/Rest/RestEntryDetailsContent.tsx deleted file mode 100644 index fe00f15a0..000000000 --- a/ui/src/components/EntryDetailed/Rest/RestEntryDetailsContent.tsx +++ /dev/null @@ -1,43 +0,0 @@ -import React, {useState} from "react"; -import styles from "../EntryDetailed.module.sass"; -import Tabs from "../../UI/Tabs"; -import {BodySection, HAREntryTablePolicySection, TableSection} from "../EntrySections"; -import {singleEntryToHAR} from "../../../helpers/utils"; - -const MIME_TYPE_KEY = 'mimeType'; - -export const RestEntryDetailsContent: React.FC = ({entryData}) => { - - const har = singleEntryToHAR(entryData); - const {request, response, timings: {receive}} = har.log.entries[0].entry; - const rulesMatched = har.log.entries[0].rulesMatched - const TABS = [ - {tab: 'request'}, - {tab: 'response'}, - {tab: 'Rules'}, - ]; - - const [currentTab, setCurrentTab] = useState(TABS[0].tab); - - return <> -
- - {request?.url && {request.url}} -
- {currentTab === TABS[0].tab && <> - - - {request?.postData && } - - - } - {currentTab === TABS[1].tab && <> - - - - } - {currentTab === TABS[2].tab && <> - - } - ; -} diff --git a/ui/src/components/EntryDetailed/Rest/RestEntryDetailsTitle.tsx b/ui/src/components/EntryDetailed/Rest/RestEntryDetailsTitle.tsx deleted file mode 100644 index 3d9925505..000000000 --- a/ui/src/components/EntryDetailed/Rest/RestEntryDetailsTitle.tsx +++ /dev/null @@ -1,26 +0,0 @@ -import React from "react"; -import {singleEntryToHAR} from "../../../helpers/utils"; -import StatusCode from "../../UI/StatusCode"; -import {EndpointPath} from "../../UI/EndpointPath"; - -const formatSize = (n: number) => n > 1000 ? `${Math.round(n / 1000)}KB` : `${n} B`; - -export const RestEntryDetailsTitle: React.FC = ({entryData}) => { - - const har = singleEntryToHAR(entryData); - const {log: {entries}} = har; - const {response, request, timings: {receive}} = entries[0].entry; - const {status, statusText, bodySize} = response; - - return har && <> - {status &&
- -
} -
- -
-
{formatSize(bodySize)}
-
{status} {statusText}
-
{Math.round(receive)}ms
- -} \ No newline at end of file diff --git a/ui/src/components/EntryListItem/EntryListItem.module.sass b/ui/src/components/EntryListItem/EntryListItem.module.sass index 23a5421df..0ee428182 100644 --- a/ui/src/components/EntryListItem/EntryListItem.module.sass +++ b/ui/src/components/EntryListItem/EntryListItem.module.sass @@ -1,4 +1,4 @@ -@import 'src/variables.module' +@import '../../variables.module' .row display: flex @@ -19,45 +19,20 @@ .rowSelected border: 1px $blue-color solid - border-left: 5px $blue-color solid +// border-left: 5px $blue-color solid margin-left: 10px margin-right: 3px .ruleSuccessRow - background: #E8FFF1 - -.ruleSuccessRowSelected - border: 1px #6FCF97 solid - border-left: 5px #6FCF97 solid - margin-left: 10px - margin-right: 3px + border: 1px $success-color solid +// border-left: 5px $success-color solid .ruleFailureRow background: #FFE9EF .ruleFailureRowSelected border: 1px $failure-color solid - border-left: 5px $failure-color solid - margin-left: 10px - margin-right: 3px - -.ruleNumberTextFailure - color: #DB2156 - font-family: Source Sans Pro - font-style: normal - font-weight: 600 - font-size: 12px - line-height: 15px - padding-right: 12px - -.ruleNumberTextSuccess - color: #219653 - font-family: Source Sans Pro - font-style: normal - font-weight: 600 - font-size: 12px - line-height: 15px - padding-right: 12px +// border-left: 5px $failure-color solid .service text-overflow: ellipsis @@ -73,11 +48,10 @@ .timestamp font-size: 12px color: $secondary-font-color + padding-left: 12px flex-shrink: 0 width: 145px text-align: left - border-left: 1px solid $data-background-color - padding: 6px 0 6px 12px .endpointServiceContainer display: flex @@ -89,12 +63,11 @@ .directionContainer display: flex - padding: 4px 12px 4px 4px + border-right: 1px solid $data-background-color + padding: 4px + padding-right: 12px -.icon - height: 14px - width: 50px - padding: 5px - background-color: white - border-radius: 15px - box-shadow: 1px 1px 9px -4px black \ No newline at end of file +.port + font-size: 12px + color: $secondary-font-color + margin: 5px diff --git a/ui/src/components/EntryListItem/EntryListItem.tsx b/ui/src/components/EntryListItem/EntryListItem.tsx index 4a8a8a017..529f65895 100644 --- a/ui/src/components/EntryListItem/EntryListItem.tsx +++ b/ui/src/components/EntryListItem/EntryListItem.tsx @@ -1,16 +1,31 @@ import React from "react"; import styles from './EntryListItem.module.sass'; -import restIcon from '../assets/restIcon.svg'; -import kafkaIcon from '../assets/kafkaIcon.svg'; -import {RestEntry, RestEntryContent} from "./RestEntryContent"; -import {KafkaEntry, KafkaEntryContent} from "./KafkaEntryContent"; +import StatusCode, {getClassification, StatusCodeClassification} from "../UI/StatusCode"; +import Protocol, {ProtocolInterface} from "../UI/Protocol" +import {EndpointPath} from "../UI/EndpointPath"; +import ingoingIconSuccess from "../assets/ingoing-traffic-success.svg" +import ingoingIconFailure from "../assets/ingoing-traffic-failure.svg" +import ingoingIconNeutral from "../assets/ingoing-traffic-neutral.svg" +import outgoingIconSuccess from "../assets/outgoing-traffic-success.svg" +import outgoingIconFailure from "../assets/outgoing-traffic-failure.svg" +import outgoingIconNeutral from "../assets/outgoing-traffic-neutral.svg" -export interface BaseEntry { - type: string; +interface Entry { + protocol: ProtocolInterface, + method?: string, + summary: string, + service: string, + id: string, + status_code?: number; + url?: string; timestamp: Date; - id: string; - rules: Rules; + source_ip: string, + source_port: string, + destination_ip: string, + destination_port: string, + isOutgoing?: boolean; latency: number; + rules: Rules; } interface Rules { @@ -20,66 +35,100 @@ interface Rules { } interface EntryProps { - entry: RestEntry | KafkaEntry | any; - setFocusedEntry: (entry: RestEntry | KafkaEntry) => void; + entry: Entry; + setFocusedEntryId: (id: string) => void; isSelected?: boolean; } -export enum EntryType { - Rest = "rest", - Kafka = "kafka" -} - -export const EntryItem: React.FC = ({entry, setFocusedEntry, isSelected}) => { - - let additionalRulesProperties = ""; +export const EntryItem: React.FC = ({entry, setFocusedEntryId, isSelected}) => { + const classification = getClassification(entry.status_code) + let ingoingIcon; + let outgoingIcon; + switch(classification) { + case StatusCodeClassification.SUCCESS: { + ingoingIcon = ingoingIconSuccess; + outgoingIcon = outgoingIconSuccess; + break; + } + case StatusCodeClassification.FAILURE: { + ingoingIcon = ingoingIconFailure; + outgoingIcon = outgoingIconFailure; + break; + } + case StatusCodeClassification.NEUTRAL: { + ingoingIcon = ingoingIconNeutral; + outgoingIcon = outgoingIconNeutral; + break; + } + } + // let additionalRulesProperties = ""; + // let ruleSuccess: boolean; let rule = 'latency' in entry.rules if (rule) { if (entry.rules.latency !== -1) { if (entry.rules.latency >= entry.latency) { - additionalRulesProperties = styles.ruleSuccessRow + // additionalRulesProperties = styles.ruleSuccessRow + // ruleSuccess = true } else { - additionalRulesProperties = styles.ruleFailureRow + // additionalRulesProperties = styles.ruleFailureRow + // ruleSuccess = false } if (isSelected) { - additionalRulesProperties += ` ${entry.rules.latency >= entry.latency ? styles.ruleSuccessRowSelected : styles.ruleFailureRowSelected}` + // additionalRulesProperties += ` ${entry.rules.latency >= entry.latency ? styles.ruleSuccessRowSelected : styles.ruleFailureRowSelected}` } } else { if (entry.rules.status) { - additionalRulesProperties = styles.ruleSuccessRow + // additionalRulesProperties = styles.ruleSuccessRow + // ruleSuccess = true } else { - additionalRulesProperties = styles.ruleFailureRow + // additionalRulesProperties = styles.ruleFailureRow + // ruleSuccess = false } if (isSelected) { - additionalRulesProperties += ` ${entry.rules.status ? styles.ruleSuccessRowSelected : styles.ruleFailureRowSelected}` + // additionalRulesProperties += ` ${entry.rules.status ? styles.ruleSuccessRowSelected : styles.ruleFailureRowSelected}` } } } - - let icon, content; - - switch (entry.type) { - case EntryType.Rest: - content = ; - icon = restIcon; - break; - case EntryType.Kafka: - content = ; - icon = kafkaIcon; - break; - default: - content = ; - icon = restIcon; - break; + let backgroundColor = ""; + if ('latency' in entry.rules) { + if (entry.rules.latency !== -1) { + backgroundColor = entry.rules.latency >= entry.latency ? styles.ruleSuccessRow : styles.ruleFailureRow + } else { + backgroundColor = entry.rules.status ? styles.ruleSuccessRow : styles.ruleFailureRow + } } - return <> -
setFocusedEntry(entry)}> - {icon &&
{icon}
} - {content} -
{new Date(+entry.timestamp)?.toLocaleString()}
+
setFocusedEntryId(entry.id)} + style={{border: isSelected ? `1px ${entry.protocol.background_color} solid` : "1px transparent solid"}} + > + + {((entry.protocol.name === "http" && "status_code" in entry) || entry.status_code !== 0) &&
+ +
} +
+ +
+ {entry.service} +
+
+
+ {entry.source_port} + {entry.isOutgoing ? + Ingoing traffic + : + Outgoing traffic + } + {entry.destination_port} +
+
+ + {new Date(+entry.timestamp)?.toLocaleString()} + +
}; - diff --git a/ui/src/components/EntryListItem/KafkaEntryContent.tsx b/ui/src/components/EntryListItem/KafkaEntryContent.tsx deleted file mode 100644 index b461aef35..000000000 --- a/ui/src/components/EntryListItem/KafkaEntryContent.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import {BaseEntry} from "./EntryListItem"; -import React from "react"; - -export interface KafkaEntry extends BaseEntry{ -} - -interface KafkaEntryContentProps { - entry: KafkaEntry; -} - -export const KafkaEntryContent: React.FC = ({entry}) => { - - return <> - -} \ No newline at end of file diff --git a/ui/src/components/EntryListItem/RestEntryContent.tsx b/ui/src/components/EntryListItem/RestEntryContent.tsx deleted file mode 100644 index fb51bff87..000000000 --- a/ui/src/components/EntryListItem/RestEntryContent.tsx +++ /dev/null @@ -1,82 +0,0 @@ -import React from "react"; -import StatusCode, {getClassification, StatusCodeClassification} from "../UI/StatusCode"; -import ingoingIconSuccess from "../assets/ingoing-traffic-success.svg"; -import outgoingIconSuccess from "../assets/outgoing-traffic-success.svg"; -import ingoingIconFailure from "../assets/ingoing-traffic-failure.svg"; -import outgoingIconFailure from "../assets/outgoing-traffic-failure.svg"; -import ingoingIconNeutral from "../assets/ingoing-traffic-neutral.svg"; -import outgoingIconNeutral from "../assets/outgoing-traffic-neutral.svg"; -import styles from "./EntryListItem.module.sass"; -import {EndpointPath} from "../UI/EndpointPath"; -import {BaseEntry} from "./EntryListItem"; - -export interface RestEntry extends BaseEntry{ - method?: string, - path: string, - service: string, - statusCode?: number; - url?: string; - isCurrentRevision?: boolean; - isOutgoing?: boolean; -} - -interface RestEntryContentProps { - entry: RestEntry; -} - -export const RestEntryContent: React.FC = ({entry}) => { - const classification = getClassification(entry.statusCode) - const numberOfRules = entry.rules.numberOfRules - - let ingoingIcon; - let outgoingIcon; - switch (classification) { - case StatusCodeClassification.SUCCESS: { - ingoingIcon = ingoingIconSuccess; - outgoingIcon = outgoingIconSuccess; - break; - } - case StatusCodeClassification.FAILURE: { - ingoingIcon = ingoingIconFailure; - outgoingIcon = outgoingIconFailure; - break; - } - case StatusCodeClassification.NEUTRAL: { - ingoingIcon = ingoingIconNeutral; - outgoingIcon = outgoingIconNeutral; - break; - } - } - - let ruleSuccess: boolean; - let rule = 'latency' in entry.rules - if (rule) { - if (entry.rules.latency !== -1) { - ruleSuccess = entry.rules.latency >= entry.latency; - } else { - ruleSuccess = entry.rules.status; - } - } - - return <> - {entry.statusCode &&
- -
} -
- -
- {entry.service} -
-
- {rule &&
- {`Rules (${numberOfRules})`} -
} -
- {entry.isOutgoing ? - outgoing traffic - : - ingoing traffic - } -
- -} \ No newline at end of file diff --git a/ui/src/components/Filters.tsx b/ui/src/components/Filters.tsx index 39430a2cb..c7188c2a5 100644 --- a/ui/src/components/Filters.tsx +++ b/ui/src/components/Filters.tsx @@ -4,7 +4,7 @@ import {FilterSelect} from "./UI/FilterSelect"; import {TextField} from "@material-ui/core"; import {ALL_KEY} from "./UI/Select"; -interface HarFiltersProps { +interface FiltersProps { methodsFilter: Array; setMethodsFilter: (methods: Array) => void; statusFilter: Array; @@ -13,7 +13,7 @@ interface HarFiltersProps { setPathFilter: (val: string) => void; } -export const Filters: React.FC = ({methodsFilter, setMethodsFilter, statusFilter, setStatusFilter, pathFilter, setPathFilter}) => { +export const Filters: React.FC = ({methodsFilter, setMethodsFilter, statusFilter, setStatusFilter, pathFilter, setPathFilter}) => { return
diff --git a/ui/src/components/TrafficPage.tsx b/ui/src/components/TrafficPage.tsx index 8ecfd46f0..18901e659 100644 --- a/ui/src/components/TrafficPage.tsx +++ b/ui/src/components/TrafficPage.tsx @@ -4,7 +4,7 @@ import {EntriesList} from "./EntriesList"; import {makeStyles} from "@material-ui/core"; import "./style/TrafficPage.sass"; import styles from './style/EntriesList.module.sass'; -import {EntryDetailed} from "./EntryDetailed/EntryDetailed"; +import {EntryDetailed} from "./EntryDetailed"; import playIcon from './assets/run.svg'; import pauseIcon from './assets/pause.svg'; import variables from '../variables.module.scss'; @@ -18,15 +18,16 @@ const useLayoutStyles = makeStyles(() => ({ padding: "12px 24px", borderRadius: 4, marginTop: 15, - background: variables.headerBackgoundColor + background: variables.headerBackgoundColor, }, - harViewer: { + viewer: { display: 'flex', overflowY: 'auto', height: "calc(100% - 70px)", padding: 5, - paddingBottom: 0 + paddingBottom: 0, + overflow: "auto", } })); @@ -36,19 +37,19 @@ enum ConnectionStatus { Paused } -interface HarPageProps { +interface TrafficPageProps { setAnalyzeStatus: (status: any) => void; onTLSDetected: (destAddress: string) => void; } const api = new Api(); -export const TrafficPage: React.FC = ({setAnalyzeStatus, onTLSDetected}) => { +export const TrafficPage: React.FC = ({setAnalyzeStatus, onTLSDetected}) => { const classes = useLayoutStyles(); const [entries, setEntries] = useState([] as any); - const [focusedEntry, setFocusedEntry] = useState(null); + const [focusedEntryId, setFocusedEntryId] = useState(null); const [selectedEntryData, setSelectedEntryData] = useState(null); const [connection, setConnection] = useState(ConnectionStatus.Closed); const [noMoreDataTop, setNoMoreDataTop] = useState(false); @@ -83,7 +84,7 @@ export const TrafficPage: React.FC = ({setAnalyzeStatus, onTLSDete setNoMoreDataBottom(false) return; } - if (!focusedEntry) setFocusedEntry(entry) + if (!focusedEntryId) setFocusedEntryId(entry.id) let newEntries = [...entries]; if (entries.length === 1000) { newEntries = newEntries.splice(1); @@ -128,17 +129,17 @@ export const TrafficPage: React.FC = ({setAnalyzeStatus, onTLSDete useEffect(() => { - if (!focusedEntry) return; + if (!focusedEntryId) return; setSelectedEntryData(null); (async () => { try { - const entryData = await api.getEntry(focusedEntry.id); + const entryData = await api.getEntry(focusedEntryId); setSelectedEntryData(entryData); } catch (error) { console.error(error); } })() - }, [focusedEntry]) + }, [focusedEntryId]) const toggleConnection = () => { setConnection(connection === ConnectionStatus.Connected ? ConnectionStatus.Paused : ConnectionStatus.Connected); @@ -170,16 +171,16 @@ export const TrafficPage: React.FC = ({setAnalyzeStatus, onTLSDete const onScrollEvent = (isAtBottom) => { isAtBottom ? setDisableScrollList(false) : setDisableScrollList(true) } - + const isScrollable = (element) => { return element.scrollHeight > element.clientHeight; }; return ( -
-
+
+
pause + src={connection === ConnectionStatus.Connected ? pauseIcon : playIcon} onClick={toggleConnection}/>
{getConnectionTitle()}
@@ -187,36 +188,36 @@ export const TrafficPage: React.FC = ({setAnalyzeStatus, onTLSDete
- {entries.length > 0 &&
-
+ {entries.length > 0 &&
+
- {selectedEntryData && } + {selectedEntryData && }
} {tappingStatus?.pods != null && } diff --git a/ui/src/components/UI/EndpointPath.tsx b/ui/src/components/UI/EndpointPath.tsx index 4209d6e19..2561aab44 100644 --- a/ui/src/components/UI/EndpointPath.tsx +++ b/ui/src/components/UI/EndpointPath.tsx @@ -9,7 +9,7 @@ interface EndpointPathProps { export const EndpointPath: React.FC = ({method, path}) => { return
- {method && {method}} - {path &&
{path}
} + {method && {method}} + {path &&
{path}
}
-}; \ No newline at end of file +}; diff --git a/ui/src/components/UI/FilterSelect.tsx b/ui/src/components/UI/FilterSelect.tsx index a2247b6d8..bf6764ad0 100644 --- a/ui/src/components/UI/FilterSelect.tsx +++ b/ui/src/components/UI/FilterSelect.tsx @@ -3,7 +3,7 @@ import { MenuItem } from '@material-ui/core'; import style from './style/FilterSelect.module.sass'; import { Select, SelectProps } from "./Select"; -interface HARFilterSelectProps extends SelectProps { +interface FilterSelectProps extends SelectProps { items: string[]; value: string | string[]; onChange: (string) => void; @@ -12,7 +12,7 @@ interface HARFilterSelectProps extends SelectProps { transformDisplay?: (string) => string; } -export const FilterSelect: React.FC = ({items, value, onChange, label, allowMultiple= false, transformDisplay}) => { +export const FilterSelect: React.FC = ({items, value, onChange, label, allowMultiple= false, transformDisplay}) => { return