mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-07 13:32:29 +00:00
Refactor tap
module to achieve synchronously closing other protocol dissectors upon identification (#1026)
* Remove `tcpStreamWrapper` struct * Refactor `tap` module and move some of the code to `tap/api` module * Move `TrafficFilteringOptions` struct to `shared` module * Change the `Dissect` method signature to have `*TcpReader` as an argument * Add `CloseOtherProtocolDissectors` method and use it to synchronously close the other protocol dissectors * Run `go mod tidy` in `cli` module * Rename `SuperIdentifier` struct to `ProtoIdentifier` * Remove `SuperTimer` struct * Bring back `CloseTimedoutTcpStreamChannels` method * Run `go mod tidy` everywhere * Remove `GOGC` environment variable from tapper * Fix the tests * Bring back `debug.FreeOSMemory()` call * Make `CloseOtherProtocolDissectors` method mutexed * Revert "Remove `GOGC` environment variable from tapper" This reverts commitcfc2484bbb
. * Bring back the removed `checksum`, `nooptcheck` and `ignorefsmerr` flags * Define a bunch of interfaces and don't export any new structs from `tap/api` * Keep the interfaces in `tap/api` but move the structs to `tap/tcp` * Fix the unit tests by depending on `github.com/up9inc/mizu/tap` * Use the modified `tlsEmitter` * Define `TlsChunk` interface and make `tlsReader` implement `TcpReader` * Remove unused fields in `tlsReader` * Define `ReassemblyStream` interface and separate `gopacket` specififc fields to `tcpReassemblyStream` struct Such that make `tap/api` don't depend on `gopacket` * Remove the unused fields * Make `tlsPoller` implement `TcpStream` interface and remove the call to `NewTcpStreamDummy` method * Remove unused fields from `tlsPoller` * Remove almost all of the setter methods in `TcpReader` and `TcpStream` interface and remove `TlsChunk` interface * Revert "Revert "Remove `GOGC` environment variable from tapper"" This reverts commitab2b9a803b
. * Revert "Bring back `debug.FreeOSMemory()` call" This reverts commit1cce863bbb
. * Remove excess comment * Fix acceptance tests (`logger` module) #run_acceptance_tests * Bring back `github.com/patrickmn/go-cache` * Fix `NewTcpStream` method signature * Put `tcpReader` and `tcpStream` mocks into protocol dissectors to remove `github.com/up9inc/mizu/tap` dependency * Fix AMQP tests * Revert960ba644cd
* Revert `go.mod` and `go.sum` files in protocol dissectors * Fix the comment position * Revert `AppStatsInst` change * Fix indent * Fix CLI build * Fix linter error * Fix error msg * Revert some of the changes in `chunk.go`
This commit is contained in:
@@ -1,202 +1,136 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers" // pulls in all layers decoders
|
||||
"github.com/google/gopacket/reassembly"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"github.com/up9inc/mizu/tap/diagnose"
|
||||
)
|
||||
|
||||
/* It's a connection (bidirectional)
|
||||
* Implements gopacket.reassembly.Stream interface (Accept, ReassembledSG, ReassemblyComplete)
|
||||
* ReassembledSG gets called when new reassembled data is ready (i.e. bytes in order, no duplicates, complete)
|
||||
* In our implementation, we pass information from ReassembledSG to the tcpReader through a shared channel.
|
||||
* In our implementation, we pass information from ReassembledSG to the TcpReader through a shared channel.
|
||||
*/
|
||||
type tcpStream struct {
|
||||
id int64
|
||||
isClosed bool
|
||||
superIdentifier *api.SuperIdentifier
|
||||
tcpstate *reassembly.TCPSimpleFSM
|
||||
fsmerr bool
|
||||
optchecker reassembly.TCPOptionCheck
|
||||
net, transport gopacket.Flow
|
||||
isDNS bool
|
||||
protoIdentifier *api.ProtoIdentifier
|
||||
isTapTarget bool
|
||||
clients []tcpReader
|
||||
servers []tcpReader
|
||||
ident string
|
||||
clients []api.TcpReader
|
||||
servers []api.TcpReader
|
||||
origin api.Capture
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
createdAt time.Time
|
||||
streamsMap api.TcpStreamMap
|
||||
sync.Mutex
|
||||
streamsMap *tcpStreamMap
|
||||
}
|
||||
|
||||
func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassembly.TCPFlowDirection, nextSeq reassembly.Sequence, start *bool, ac reassembly.AssemblerContext) bool {
|
||||
// FSM
|
||||
if !t.tcpstate.CheckState(tcp, dir) {
|
||||
diagnose.TapErrors.SilentError("FSM-rejection", "%s: Packet rejected by FSM (state:%s)", t.ident, t.tcpstate.String())
|
||||
diagnose.InternalStats.RejectFsm++
|
||||
if !t.fsmerr {
|
||||
t.fsmerr = true
|
||||
diagnose.InternalStats.RejectConnFsm++
|
||||
}
|
||||
if !*ignorefsmerr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Options
|
||||
err := t.optchecker.Accept(tcp, ci, dir, nextSeq, start)
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("OptionChecker-rejection", "%s: Packet rejected by OptionChecker: %s", t.ident, err)
|
||||
diagnose.InternalStats.RejectOpt++
|
||||
if !*nooptcheck {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Checksum
|
||||
accept := true
|
||||
if *checksum {
|
||||
c, err := tcp.ComputeChecksum()
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("ChecksumCompute", "%s: Got error computing checksum: %s", t.ident, err)
|
||||
accept = false
|
||||
} else if c != 0x0 {
|
||||
diagnose.TapErrors.SilentError("Checksum", "%s: Invalid checksum: 0x%x", t.ident, c)
|
||||
accept = false
|
||||
}
|
||||
}
|
||||
if !accept {
|
||||
diagnose.InternalStats.RejectOpt++
|
||||
}
|
||||
|
||||
*start = true
|
||||
|
||||
return accept
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.AssemblerContext) {
|
||||
dir, _, _, skip := sg.Info()
|
||||
length, saved := sg.Lengths()
|
||||
// update stats
|
||||
sgStats := sg.Stats()
|
||||
if skip > 0 {
|
||||
diagnose.InternalStats.MissedBytes += skip
|
||||
}
|
||||
diagnose.InternalStats.Sz += length - saved
|
||||
diagnose.InternalStats.Pkt += sgStats.Packets
|
||||
if sgStats.Chunks > 1 {
|
||||
diagnose.InternalStats.Reassembled++
|
||||
}
|
||||
diagnose.InternalStats.OutOfOrderPackets += sgStats.QueuedPackets
|
||||
diagnose.InternalStats.OutOfOrderBytes += sgStats.QueuedBytes
|
||||
if length > diagnose.InternalStats.BiggestChunkBytes {
|
||||
diagnose.InternalStats.BiggestChunkBytes = length
|
||||
}
|
||||
if sgStats.Packets > diagnose.InternalStats.BiggestChunkPackets {
|
||||
diagnose.InternalStats.BiggestChunkPackets = sgStats.Packets
|
||||
}
|
||||
if sgStats.OverlapBytes != 0 && sgStats.OverlapPackets == 0 {
|
||||
// In the original example this was handled with panic().
|
||||
// I don't know what this error means or how to handle it properly.
|
||||
diagnose.TapErrors.SilentError("Invalid-Overlap", "bytes:%d, pkts:%d", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
}
|
||||
diagnose.InternalStats.OverlapBytes += sgStats.OverlapBytes
|
||||
diagnose.InternalStats.OverlapPackets += sgStats.OverlapPackets
|
||||
|
||||
if skip == -1 && *allowmissinginit {
|
||||
// this is allowed
|
||||
} else if skip != 0 {
|
||||
// Missing bytes in stream: do not even try to parse it
|
||||
return
|
||||
}
|
||||
data := sg.Fetch(length)
|
||||
if t.isDNS {
|
||||
dns := &layers.DNS{}
|
||||
var decoded []gopacket.LayerType
|
||||
if len(data) < 2 {
|
||||
if len(data) > 0 {
|
||||
sg.KeepFrom(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
dnsSize := binary.BigEndian.Uint16(data[:2])
|
||||
missing := int(dnsSize) - len(data[2:])
|
||||
diagnose.TapErrors.Debug("dnsSize: %d, missing: %d", dnsSize, missing)
|
||||
if missing > 0 {
|
||||
diagnose.TapErrors.Debug("Missing some bytes: %d", missing)
|
||||
sg.KeepFrom(0)
|
||||
return
|
||||
}
|
||||
p := gopacket.NewDecodingLayerParser(layers.LayerTypeDNS, dns)
|
||||
err := p.DecodeLayers(data[2:], &decoded)
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("DNS-parser", "Failed to decode DNS: %v", err)
|
||||
} else {
|
||||
diagnose.TapErrors.Debug("DNS: %s", gopacket.LayerDump(dns))
|
||||
}
|
||||
if len(data) > 2+int(dnsSize) {
|
||||
sg.KeepFrom(2 + int(dnsSize))
|
||||
}
|
||||
} else if t.isTapTarget {
|
||||
if length > 0 {
|
||||
// This is where we pass the reassembled information onwards
|
||||
// This channel is read by an tcpReader object
|
||||
diagnose.AppStats.IncReassembledTcpPayloadsCount()
|
||||
timestamp := ac.GetCaptureInfo().Timestamp
|
||||
if dir == reassembly.TCPDirClientToServer {
|
||||
for i := range t.clients {
|
||||
reader := &t.clients[i]
|
||||
reader.Lock()
|
||||
if !reader.isClosed {
|
||||
reader.msgQueue <- tcpReaderDataMsg{data, timestamp}
|
||||
}
|
||||
reader.Unlock()
|
||||
}
|
||||
} else {
|
||||
for i := range t.servers {
|
||||
reader := &t.servers[i]
|
||||
reader.Lock()
|
||||
if !reader.isClosed {
|
||||
reader.msgQueue <- tcpReaderDataMsg{data, timestamp}
|
||||
}
|
||||
reader.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
func NewTcpStream(isTapTarget bool, streamsMap api.TcpStreamMap, capture api.Capture) api.TcpStream {
|
||||
return &tcpStream{
|
||||
isTapTarget: isTapTarget,
|
||||
protoIdentifier: &api.ProtoIdentifier{},
|
||||
streamsMap: streamsMap,
|
||||
origin: capture,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassemblyComplete(ac reassembly.AssemblerContext) bool {
|
||||
if t.isTapTarget && !t.isClosed {
|
||||
t.Close()
|
||||
}
|
||||
// do not remove the connection to allow last ACK
|
||||
return false
|
||||
func (t *tcpStream) getId() int64 {
|
||||
return t.id
|
||||
}
|
||||
|
||||
func (t *tcpStream) Close() {
|
||||
shouldReturn := false
|
||||
func (t *tcpStream) setId(id int64) {
|
||||
t.id = id
|
||||
}
|
||||
|
||||
func (t *tcpStream) close() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if t.isClosed {
|
||||
shouldReturn = true
|
||||
} else {
|
||||
t.isClosed = true
|
||||
}
|
||||
t.Unlock()
|
||||
if shouldReturn {
|
||||
return
|
||||
}
|
||||
|
||||
t.isClosed = true
|
||||
|
||||
t.streamsMap.Delete(t.id)
|
||||
|
||||
for i := range t.clients {
|
||||
reader := &t.clients[i]
|
||||
reader.Close()
|
||||
reader := t.clients[i]
|
||||
reader.(*tcpReader).close()
|
||||
}
|
||||
for i := range t.servers {
|
||||
reader := &t.servers[i]
|
||||
reader.Close()
|
||||
reader := t.servers[i]
|
||||
reader.(*tcpReader).close()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) addClient(reader api.TcpReader) {
|
||||
t.clients = append(t.clients, reader)
|
||||
}
|
||||
|
||||
func (t *tcpStream) addServer(reader api.TcpReader) {
|
||||
t.servers = append(t.servers, reader)
|
||||
}
|
||||
|
||||
func (t *tcpStream) getClients() []api.TcpReader {
|
||||
return t.clients
|
||||
}
|
||||
|
||||
func (t *tcpStream) getServers() []api.TcpReader {
|
||||
return t.servers
|
||||
}
|
||||
|
||||
func (t *tcpStream) getClient(index int) api.TcpReader {
|
||||
return t.clients[index]
|
||||
}
|
||||
|
||||
func (t *tcpStream) getServer(index int) api.TcpReader {
|
||||
return t.servers[index]
|
||||
}
|
||||
|
||||
func (t *tcpStream) SetProtocol(protocol *api.Protocol) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if t.protoIdentifier.IsClosedOthers {
|
||||
return
|
||||
}
|
||||
|
||||
t.protoIdentifier.Protocol = protocol
|
||||
|
||||
for i := range t.clients {
|
||||
reader := t.clients[i]
|
||||
if reader.GetExtension().Protocol != t.protoIdentifier.Protocol {
|
||||
reader.(*tcpReader).close()
|
||||
}
|
||||
}
|
||||
for i := range t.servers {
|
||||
reader := t.servers[i]
|
||||
if reader.GetExtension().Protocol != t.protoIdentifier.Protocol {
|
||||
reader.(*tcpReader).close()
|
||||
}
|
||||
}
|
||||
|
||||
t.protoIdentifier.IsClosedOthers = true
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetOrigin() api.Capture {
|
||||
return t.origin
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetProtoIdentifier() *api.ProtoIdentifier {
|
||||
return t.protoIdentifier
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetReqResMatcher() api.RequestResponseMatcher {
|
||||
return t.reqResMatcher
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsTapTarget() bool {
|
||||
return t.isTapTarget
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsClosed() bool {
|
||||
return t.isClosed
|
||||
}
|
||||
|
Reference in New Issue
Block a user