Fix the memory leaks in AMQP and Kafka dissectors

This commit is contained in:
M. Mert Yildiran 2021-08-25 19:46:46 +03:00
parent 9b04494931
commit efe8e849e4
No known key found for this signature in database
GPG Key ID: D42ADB236521BF7A
4 changed files with 13 additions and 3 deletions

View File

@ -9,6 +9,7 @@ import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
)
@ -54,6 +55,10 @@ func (r *AmqpReader) ReadFrame() (frame frame, err error) {
channel := binary.BigEndian.Uint16(scratch[1:3])
size := binary.BigEndian.Uint32(scratch[3:7])
if size > (1000000 * 128) {
return nil, fmt.Errorf("An AMQP message cannot be bigger than 128MB")
}
switch typ {
case frameMethod:
if frame, err = r.parseMethodFrame(channel, size); err != nil {

View File

@ -4,7 +4,6 @@ import (
"bufio"
"encoding/json"
"fmt"
"io"
"log"
"github.com/up9inc/mizu/tap/api"
@ -42,13 +41,11 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, em
if isClient {
_, _, err := ReadRequest(b, tcpID)
if err != nil {
io.ReadAll(b)
break
}
} else {
err := ReadResponse(b, tcpID, emitter)
if err != nil {
io.ReadAll(b)
break
}
}

View File

@ -21,6 +21,10 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID) (apiKey ApiKey, apiVersion int16
d := &decoder{reader: r, remain: 4}
size := d.readInt32()
if size > 1000000 {
return 0, 0, fmt.Errorf("A Kafka message cannot be bigger than 1MB")
}
if err = d.err; err != nil {
err = dontExpectEOF(err)
return 0, 0, err

View File

@ -19,6 +19,10 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, emitter api.Emitter) (err error
d := &decoder{reader: r, remain: 4}
size := d.readInt32()
if size > 1000000 {
return fmt.Errorf("A Kafka message cannot be bigger than 1MB")
}
if err = d.err; err != nil {
err = dontExpectEOF(err)
return err