mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-05 04:23:09 +00:00
Refactor Mizu, define an extension API and add new protocols: AMQP, Kafka (#224)
* Separate HTTP related code into `extensions/http` as a Go plugin * Move `extensions` folder into `tap` folder * Move HTTP files into `tap/extensions/lib` for now * Replace `orcaman/concurrent-map` with `sync.Map` * Remove `grpc_assembler.go` * Remove `github.com/up9inc/mizu/tap/extensions/http/lib` * Add a build script to automatically build extensions from a known path and load them * Start to define the extension API * Implement the `run()` function for the TCP stream * Add support of defining multiple ports to the extension API * Set the extension name inside the extension * Declare the `Dissect` function in the extension API * Dissect HTTP request from inside the HTTP extension * Make the distinction of outbound and inbound ports * Dissect HTTP response from inside the HTTP extension * Bring back the HTTP request-response pair matcher * Return a `*api.RequestResponsePair` from the dissection * Bring back the gRPC-HTTP/2 parser * Fix the issues in `handleHTTP1ClientStream` and `handleHTTP1ServerStream` * Call a function pointer to emit dissected data back to the `tap` package * roee changes - trying to fix agent to work with the "api" object) - ***still not working*** * small mistake in the conflicts * Fix the issues that are introduced by the merge conflict * Add `Emitter` interface to the API and send `OutputChannelItem`(s) to `OutputChannel` * Fix the `HTTP1` handlers * Set `ConnectionInfo` in HTTP handlers * Fix the `Dockerfile` to build the extensions * remove some unwanted code * no message * Re-enable `getStreamProps` function * Migrate back from `gopacket/tcpassembly` to `gopacket/reassembly` * Introduce `HTTPPayload` struct and `HTTPPayloader` interface to `MarshalJSON()` all the data structures that are returned by the HTTP protocol * Read `socketHarOutChannel` instead of `filteredHarChannel` * Connect `OutputChannelItem` to the last WebSocket means that finally the web UI started to work again * Add `.env.example` to React app * Marshal and unmarshal `*http.Request`, `*http.Response` pairs * Move `loadExtensions` into `main.go` and map extensions into `extensionsMap` * Add `Summarize()` method to the `Dissector` interface * Add `Analyze` method to the `Dissector` interface and `MizuEntry` to the extension API * Add `Protocol` struct and make it effect the UI * Refactor `BaseEntryDetails` struct and display the source and destination ports in the UI * Display the protocol name inside the details layout * Add `Represent` method to the `Dissector` interface and manipulate the UI through this method * Make the protocol color affect the details layout color and write protocol abbreviation vertically * Remove everything HTTP related from the `tap` package and make the extension system fully functional * Fix the TypeScript warnings * Bring in the files related AMQP into `amqp` directory * Add `--nodefrag` flag to the tapper and bring in the main AMQP code * Implement the AMQP `BasicPublish` and fix some issues in the UI when the response payload is missing * Implement `representBasicPublish` method * Fix several minor issues * Implement the AMQP `BasicDeliver` * Implement the AMQP `QueueDeclare` * Implement the AMQP `ExchangeDeclare` * Implement the AMQP `ConnectionStart` * Implement the AMQP `ConnectionClose` * Implement the AMQP `QueueBind` * Implement the AMQP `BasicConsume` * Fix an issue in `ConnectionStart` * Fix a linter error * Bring in the files related Kafka into `kafka` directory * Fix the build errors in Kafka Go files * Implement `Dissect` method of Kafka and adapt request-response pair matcher to asynchronous client-server stream * Do the "Is reversed?" checked inside `getStreamProps` and fix an issue in Kafka `Dissect` method * Implement `Analyze`, `Summarize` methods of Kafka * Implement the representations for Kafka `Metadata`, `RequestHeader` and `ResponseHeader` * Refactor the AMQP and Kafka implementations to create the summary string only inside the `Analyze` method * Implement the representations for Kafka `ApiVersions` * Implement the representations for Kafka `Produce` * Implement the representations for Kafka `Fetch` * Implement the representations for Kafka `ListOffsets`, `CreateTopics` and `DeleteTopics` * Fix the encoding of AMQP `BasicPublish` and `BasicDeliver` body * Remove the unnecessary logging * Remove more logging * Introduce `Version` field to `Protocol` struct for dynamically switching the HTTP protocol to HTTP/2 * Fix the issues in analysis and representation of HTTP/2 (gRPC) protocol * Fix the issues in summary section of details layout for HTTP/2 (gRPC) protocol * Fix the read errors that freezes the sniffer in HTTP and Kafka * Fix the issues in HTTP POST data * Fix one more issue in HTTP POST data * Fix an infinite loop in Kafka * Fix another freezing issue in Kafka * Revert "UI Infra - Support multiple entry types + refactoring (#211)" This reverts commitf74a52d4dc
. * Fix more issues that are introduced by the merge * Fix the status code in the summary section * adding the cleaner again (why we removed it?). add TODO: on the extension loop . * fix dockerfile (remove deleting .env file) - it is found in dockerignore and fails to build if the file not exists * fix GetEntrties ("/entries" endpoint) - working with "tapApi.BaseEntryDetail" (moved from shared) * Fix an issue in the UI summary section * Refactor the protocol payload structs * Fix a log message in the passive tapper * Adapt `APP_PORTS` environment variable to the new extension system and change its format to `APP_PORTS='{"http": ["8001"]}' ` * Revert "fix dockerfile (remove deleting .env file) - it is found in dockerignore and fails to build if the file not exists" This reverts commit4f514ae1f4
. * Bring in the necessary changes fromf74a52d4dc
* Open the API server URL in the web browser as soon as Mizu is ready * Make the TCP reader consists of a single Go routine (instead of two) and try to dissect in both client and server mode by rewinding * Swap `TcpID` without overwriting it * Sort extension by priority * Try to dissect with looping through all the extensions * fix getStreamProps function. (it should be passed from CLI as it was before). * Turn TCP reader back into two Goroutines (client and server) * typo * Learn `isClient` from the TCP stream * Set `viewer` style `overflow: "auto"` * Fix the memory leaks in AMQP and Kafka dissectors * Revert some of the changes inbe7c65eb6d
* Remove `allExtensionPorts` since it's no longer needed * Remove `APP_PORTS` since it's no longer needed * Fix all of the minor issues in the React code * Check Kafka header size and fail-fast * Break the dissectors loop upon a successful dissection * Don't break the dissector loop. Protocols might collide * Improve the HTTP request-response counter (still not perfect) * Make the HTTP request-response counter perfect * Revert "Revert some of the changes in be7c65eb6d3fb657a059707da3ca559937e59739" This reverts commit08e7d786d8
. * Bring back `filterItems` and `isHealthCheckByUserAgent` functions * Remove some development artifacts * remove unused and commented lines that are not relevant * Fix the performance in TCP stream factory. Make it create two `tcpReader`(s) per extension * Change a log to debug * Make `*api.CounterPair` a field of `tcpReader` * Set `isTapTarget` to always `true` again since `filterAuthorities` implementation has problems * Remove a variable that's only used for logging even though not introduced by this branch * Bring back the `NumberOfRules` field of `ApplicableRules` struct * Remove the unused `NewEntry` function * Move `k8sResolver == nil` check to a more appropriate place * default healthChecksUserAgentHeaders should be empty array (like the default config value) * remove spam console.log * Rules button cause app to crash (access the service via incorrect property) * Ignore all .env* files in docker build. * Better caching in dockerfile: only copy go.mod before go mod download. * Check for errors while loading an extension * Add a comment about why `Protocol` is not a pointer * Bring back the call to `deleteOlderThan` * Remove the `nil` check * Reduce the maximum allowed AMQP message from 128MB to 1MB * Fix an error that only occurs when a Kafka broker is initiating * Revert the change inb2abd7b990
* Fix the service name resolution in all protocols * Remove the `anydirection` flag and fix the issue in `filterAuthorities` * Pass `sync.Map` by reference to `deleteOlderThan` method * Fix the packet capture issue in standalone mode that's introduced by the removal of `anydirection` * Temporarily resolve the memory exhaustion in AMQP * Fix a nil pointer dereference error * Fix the CLI build error * Fix a memory leak that's identified by `pprof` Co-authored-by: Roee Gadot <roee.gadot@up9.com> Co-authored-by: Nimrod Gilboa Markevich <nimrod@up9.com>
This commit is contained in:
598
tap/extensions/kafka/decode.go
Normal file
598
tap/extensions/kafka/decode.go
Normal file
@@ -0,0 +1,598 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type discarder interface {
|
||||
Discard(int) (int, error)
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
reader io.Reader
|
||||
remain int
|
||||
buffer [8]byte
|
||||
err error
|
||||
table *crc32.Table
|
||||
crc32 uint32
|
||||
}
|
||||
|
||||
func (d *decoder) Reset(r io.Reader, n int) {
|
||||
d.reader = r
|
||||
d.remain = n
|
||||
d.buffer = [8]byte{}
|
||||
d.err = nil
|
||||
d.table = nil
|
||||
d.crc32 = 0
|
||||
}
|
||||
|
||||
func (d *decoder) Read(b []byte) (int, error) {
|
||||
if d.err != nil {
|
||||
return 0, d.err
|
||||
}
|
||||
if d.remain == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if len(b) > d.remain {
|
||||
b = b[:d.remain]
|
||||
}
|
||||
n, err := d.reader.Read(b)
|
||||
if n > 0 && d.table != nil {
|
||||
d.crc32 = crc32.Update(d.crc32, d.table, b[:n])
|
||||
}
|
||||
d.remain -= n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (d *decoder) ReadByte() (byte, error) {
|
||||
c := d.readByte()
|
||||
return c, d.err
|
||||
}
|
||||
|
||||
func (d *decoder) done() bool {
|
||||
return d.remain == 0 || d.err != nil
|
||||
}
|
||||
|
||||
func (d *decoder) setCRC(table *crc32.Table) {
|
||||
d.table, d.crc32 = table, 0
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(v value) {
|
||||
v.setBool(d.readBool())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt8(v value) {
|
||||
v.setInt8(d.readInt8())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt16(v value) {
|
||||
v.setInt16(d.readInt16())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt32(v value) {
|
||||
v.setInt32(d.readInt32())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt64(v value) {
|
||||
v.setInt64(d.readInt64())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(v value) {
|
||||
v.setString(d.readString())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCompactString(v value) {
|
||||
v.setString(d.readCompactString())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(v value) {
|
||||
v.setBytes(d.readBytes())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCompactBytes(v value) {
|
||||
v.setBytes(d.readCompactBytes())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeArray(v value, elemType reflect.Type, decodeElem decodeFunc) {
|
||||
if n := d.readInt32(); n < 0 || n > 65535 {
|
||||
v.setArray(array{})
|
||||
} else {
|
||||
a := makeArray(elemType, int(n))
|
||||
for i := 0; i < int(n) && d.remain > 0; i++ {
|
||||
decodeElem(d, a.index(i))
|
||||
}
|
||||
v.setArray(a)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCompactArray(v value, elemType reflect.Type, decodeElem decodeFunc) {
|
||||
if n := d.readUnsignedVarInt(); n < 1 || n > 65535 {
|
||||
v.setArray(array{})
|
||||
} else {
|
||||
a := makeArray(elemType, int(n-1))
|
||||
for i := 0; i < int(n-1) && d.remain > 0; i++ {
|
||||
decodeElem(d, a.index(i))
|
||||
}
|
||||
v.setArray(a)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeRecordV0(v value) {
|
||||
x := &RecordV0{}
|
||||
x.Unknown = d.readInt8()
|
||||
x.Attributes = d.readInt8()
|
||||
x.TimestampDelta = d.readInt8()
|
||||
x.OffsetDelta = d.readInt8()
|
||||
|
||||
x.KeyLength = int8(d.readVarInt())
|
||||
key := strings.Builder{}
|
||||
for i := 0; i < int(x.KeyLength); i++ {
|
||||
key.WriteString(fmt.Sprintf("%c", d.readInt8()))
|
||||
}
|
||||
x.Key = key.String()
|
||||
|
||||
x.ValueLen = int8(d.readVarInt())
|
||||
value := strings.Builder{}
|
||||
for i := 0; i < int(x.ValueLen); i++ {
|
||||
value.WriteString(fmt.Sprintf("%c", d.readInt8()))
|
||||
}
|
||||
x.Value = value.String()
|
||||
|
||||
headerLen := d.readInt8() / 2
|
||||
headers := make([]RecordHeader, 0)
|
||||
for i := 0; i < int(headerLen); i++ {
|
||||
header := &RecordHeader{}
|
||||
|
||||
header.HeaderKeyLength = int8(d.readVarInt())
|
||||
headerKey := strings.Builder{}
|
||||
for j := 0; j < int(header.HeaderKeyLength); j++ {
|
||||
headerKey.WriteString(fmt.Sprintf("%c", d.readInt8()))
|
||||
}
|
||||
header.HeaderKey = headerKey.String()
|
||||
|
||||
header.HeaderValueLength = int8(d.readVarInt())
|
||||
headerValue := strings.Builder{}
|
||||
for j := 0; j < int(header.HeaderValueLength); j++ {
|
||||
headerValue.WriteString(fmt.Sprintf("%c", d.readInt8()))
|
||||
}
|
||||
header.Value = headerValue.String()
|
||||
|
||||
headers = append(headers, *header)
|
||||
}
|
||||
x.Headers = headers
|
||||
|
||||
v.val.Set(valueOf(x).val)
|
||||
}
|
||||
|
||||
func (d *decoder) discardAll() {
|
||||
d.discard(d.remain)
|
||||
}
|
||||
|
||||
func (d *decoder) discard(n int) {
|
||||
if n > d.remain {
|
||||
n = d.remain
|
||||
}
|
||||
var err error
|
||||
if r, _ := d.reader.(discarder); r != nil {
|
||||
n, err = r.Discard(n)
|
||||
d.remain -= n
|
||||
} else {
|
||||
_, err = io.Copy(ioutil.Discard, d)
|
||||
}
|
||||
d.setError(err)
|
||||
}
|
||||
|
||||
func (d *decoder) read(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
n, err := io.ReadFull(d, b)
|
||||
b = b[:n]
|
||||
d.setError(err)
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *decoder) writeTo(w io.Writer, n int) {
|
||||
limit := d.remain
|
||||
if n < limit {
|
||||
d.remain = n
|
||||
}
|
||||
c, err := io.Copy(w, d)
|
||||
if int(c) < n && err == nil {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
d.remain = limit - int(c)
|
||||
d.setError(err)
|
||||
}
|
||||
|
||||
func (d *decoder) setError(err error) {
|
||||
if d.err == nil && err != nil {
|
||||
d.err = err
|
||||
d.discardAll()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readFull(b []byte) bool {
|
||||
n, err := io.ReadFull(d, b)
|
||||
d.setError(err)
|
||||
return n == len(b)
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() byte {
|
||||
if d.readFull(d.buffer[:1]) {
|
||||
return d.buffer[0]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readBool() bool {
|
||||
return d.readByte() != 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt8() int8 {
|
||||
if d.readFull(d.buffer[:1]) {
|
||||
return decodeReadInt8(d.buffer[:1])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt16() int16 {
|
||||
if d.readFull(d.buffer[:2]) {
|
||||
return decodeReadInt16(d.buffer[:2])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt32() int32 {
|
||||
if d.readFull(d.buffer[:4]) {
|
||||
return decodeReadInt32(d.buffer[:4])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readInt64() int64 {
|
||||
if d.readFull(d.buffer[:8]) {
|
||||
return decodeReadInt64(d.buffer[:8])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readString() string {
|
||||
if n := d.readInt16(); n < 0 {
|
||||
return ""
|
||||
} else {
|
||||
return bytesToString(d.read(int(n)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readVarString() string {
|
||||
if n := d.readVarInt(); n < 0 {
|
||||
return ""
|
||||
} else {
|
||||
return bytesToString(d.read(int(n)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readCompactString() string {
|
||||
if n := d.readUnsignedVarInt(); n < 1 {
|
||||
return ""
|
||||
} else {
|
||||
return bytesToString(d.read(int(n - 1)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readBytes() []byte {
|
||||
if n := d.readInt32(); n < 0 {
|
||||
return nil
|
||||
} else {
|
||||
return d.read(int(n))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readBytesTo(w io.Writer) bool {
|
||||
if n := d.readInt32(); n < 0 {
|
||||
return false
|
||||
} else {
|
||||
d.writeTo(w, int(n))
|
||||
return d.err == nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readVarBytes() []byte {
|
||||
if n := d.readVarInt(); n < 0 {
|
||||
return nil
|
||||
} else {
|
||||
return d.read(int(n))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readVarBytesTo(w io.Writer) bool {
|
||||
if n := d.readVarInt(); n < 0 {
|
||||
return false
|
||||
} else {
|
||||
d.writeTo(w, int(n))
|
||||
return d.err == nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readCompactBytes() []byte {
|
||||
if n := d.readUnsignedVarInt(); n < 1 {
|
||||
return nil
|
||||
} else {
|
||||
return d.read(int(n - 1))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readCompactBytesTo(w io.Writer) bool {
|
||||
if n := d.readUnsignedVarInt(); n < 1 {
|
||||
return false
|
||||
} else {
|
||||
d.writeTo(w, int(n-1))
|
||||
return d.err == nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) readVarInt() int64 {
|
||||
n := 11 // varints are at most 11 bytes
|
||||
|
||||
if n > d.remain {
|
||||
n = d.remain
|
||||
}
|
||||
|
||||
x := uint64(0)
|
||||
s := uint(0)
|
||||
|
||||
for n > 0 {
|
||||
b := d.readByte()
|
||||
|
||||
if (b & 0x80) == 0 {
|
||||
x |= uint64(b) << s
|
||||
return int64(x>>1) ^ -(int64(x) & 1)
|
||||
}
|
||||
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
n--
|
||||
}
|
||||
|
||||
d.setError(fmt.Errorf("cannot decode varint from input stream"))
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *decoder) readUnsignedVarInt() uint64 {
|
||||
n := 11 // varints are at most 11 bytes
|
||||
|
||||
if n > d.remain {
|
||||
n = d.remain
|
||||
}
|
||||
|
||||
x := uint64(0)
|
||||
s := uint(0)
|
||||
|
||||
for n > 0 {
|
||||
b := d.readByte()
|
||||
|
||||
if (b & 0x80) == 0 {
|
||||
x |= uint64(b) << s
|
||||
return x
|
||||
}
|
||||
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
n--
|
||||
}
|
||||
|
||||
d.setError(fmt.Errorf("cannot decode unsigned varint from input stream"))
|
||||
return 0
|
||||
}
|
||||
|
||||
type decodeFunc func(*decoder, value)
|
||||
|
||||
var (
|
||||
_ io.Reader = (*decoder)(nil)
|
||||
_ io.ByteReader = (*decoder)(nil)
|
||||
|
||||
readerFrom = reflect.TypeOf((*io.ReaderFrom)(nil)).Elem()
|
||||
)
|
||||
|
||||
func decodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc {
|
||||
if reflect.PtrTo(typ).Implements(readerFrom) {
|
||||
return readerDecodeFuncOf(typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return (*decoder).decodeBool
|
||||
case reflect.Int8:
|
||||
return (*decoder).decodeInt8
|
||||
case reflect.Int16:
|
||||
return (*decoder).decodeInt16
|
||||
case reflect.Int32:
|
||||
return (*decoder).decodeInt32
|
||||
case reflect.Int64:
|
||||
return (*decoder).decodeInt64
|
||||
case reflect.String:
|
||||
return stringDecodeFuncOf(flexible, tag)
|
||||
case reflect.Struct:
|
||||
return structDecodeFuncOf(typ, version, flexible)
|
||||
case reflect.Slice:
|
||||
if typ.Elem().Kind() == reflect.Uint8 { // []byte
|
||||
return bytesDecodeFuncOf(flexible, tag)
|
||||
}
|
||||
return arrayDecodeFuncOf(typ, version, flexible, tag)
|
||||
default:
|
||||
panic("unsupported type: " + typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func stringDecodeFuncOf(flexible bool, tag structTag) decodeFunc {
|
||||
if flexible {
|
||||
// In flexible messages, all strings are compact
|
||||
return (*decoder).decodeCompactString
|
||||
}
|
||||
return (*decoder).decodeString
|
||||
}
|
||||
|
||||
func bytesDecodeFuncOf(flexible bool, tag structTag) decodeFunc {
|
||||
if flexible {
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*decoder).decodeCompactBytes
|
||||
}
|
||||
return (*decoder).decodeBytes
|
||||
}
|
||||
|
||||
func structDecodeFuncOf(typ reflect.Type, version int16, flexible bool) decodeFunc {
|
||||
type field struct {
|
||||
decode decodeFunc
|
||||
index index
|
||||
tagID int
|
||||
}
|
||||
|
||||
var fields []field
|
||||
taggedFields := map[int]*field{}
|
||||
|
||||
if typ == reflect.TypeOf(RecordV0{}) {
|
||||
return (*decoder).decodeRecordV0
|
||||
}
|
||||
|
||||
forEachStructField(typ, func(typ reflect.Type, index index, tag string) {
|
||||
forEachStructTag(tag, func(tag structTag) bool {
|
||||
if tag.MinVersion <= version && version <= tag.MaxVersion {
|
||||
f := field{
|
||||
decode: decodeFuncOf(typ, version, flexible, tag),
|
||||
index: index,
|
||||
tagID: tag.TagID,
|
||||
}
|
||||
|
||||
if tag.TagID < -1 {
|
||||
// Normal required field
|
||||
fields = append(fields, f)
|
||||
} else {
|
||||
// Optional tagged field (flexible messages only)
|
||||
taggedFields[tag.TagID] = &f
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
})
|
||||
|
||||
return func(d *decoder, v value) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
f.decode(d, v.fieldByIndex(f.index))
|
||||
}
|
||||
|
||||
if flexible {
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
|
||||
// for details of tag buffers in "flexible" messages.
|
||||
n := int(d.readUnsignedVarInt())
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
tagID := int(d.readUnsignedVarInt())
|
||||
size := int(d.readUnsignedVarInt())
|
||||
|
||||
f, ok := taggedFields[tagID]
|
||||
if ok {
|
||||
f.decode(d, v.fieldByIndex(f.index))
|
||||
} else {
|
||||
d.read(size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func arrayDecodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc {
|
||||
elemType := typ.Elem()
|
||||
elemFunc := decodeFuncOf(elemType, version, flexible, tag)
|
||||
if flexible {
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(d *decoder, v value) { d.decodeCompactArray(v, elemType, elemFunc) }
|
||||
}
|
||||
|
||||
return func(d *decoder, v value) { d.decodeArray(v, elemType, elemFunc) }
|
||||
}
|
||||
|
||||
func readerDecodeFuncOf(typ reflect.Type) decodeFunc {
|
||||
typ = reflect.PtrTo(typ)
|
||||
return func(d *decoder, v value) {
|
||||
if d.err == nil {
|
||||
_, err := v.iface(typ).(io.ReaderFrom).ReadFrom(d)
|
||||
if err != nil {
|
||||
d.setError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeReadInt8(b []byte) int8 {
|
||||
return int8(b[0])
|
||||
}
|
||||
|
||||
func decodeReadInt16(b []byte) int16 {
|
||||
return int16(binary.BigEndian.Uint16(b))
|
||||
}
|
||||
|
||||
func decodeReadInt32(b []byte) int32 {
|
||||
return int32(binary.BigEndian.Uint32(b))
|
||||
}
|
||||
|
||||
func decodeReadInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte, version int16, value interface{}) error {
|
||||
typ := elemTypeOf(value)
|
||||
cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc)
|
||||
key := versionedType{typ: typ, version: version}
|
||||
decode := cache[key]
|
||||
|
||||
if decode == nil {
|
||||
decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
TagID: -2,
|
||||
Compact: true,
|
||||
Nullable: true,
|
||||
})
|
||||
|
||||
newCache := make(map[versionedType]decodeFunc, len(cache)+1)
|
||||
newCache[key] = decode
|
||||
|
||||
for typ, fun := range cache {
|
||||
newCache[typ] = fun
|
||||
}
|
||||
|
||||
unmarshalers.Store(newCache)
|
||||
}
|
||||
|
||||
d, _ := decoders.Get().(*decoder)
|
||||
if d == nil {
|
||||
d = &decoder{reader: bytes.NewReader(nil)}
|
||||
}
|
||||
|
||||
d.remain = len(data)
|
||||
r, _ := d.reader.(*bytes.Reader)
|
||||
r.Reset(data)
|
||||
|
||||
defer func() {
|
||||
r.Reset(nil)
|
||||
d.Reset(r, 0)
|
||||
decoders.Put(d)
|
||||
}()
|
||||
|
||||
decode(d, valueOf(value))
|
||||
return dontExpectEOF(d.err)
|
||||
}
|
||||
|
||||
var (
|
||||
decoders sync.Pool // *decoder
|
||||
unmarshalers atomic.Value // map[versionedType]decodeFunc
|
||||
)
|
Reference in New Issue
Block a user