mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-15 22:31:44 +00:00
TRA-3212 Passive-Tapper and Mizu share code (#70)
* Use log in tap package instead of fmt. * Moved api/pkg/tap to root. * Added go.mod and go.sum for tap. * Added replace for shared. * api uses tap module instead of tap package. * Removed dependency of tap in shared by moving env var out of tap. * Fixed compilation bugs. * Fixed: Forgot to export struct field HostMode. * Removed unused flag. * Close har output channel when done. * Moved websocket out of mizu and into passive-tapper. * Send connection details over har output channel. * Fixed compilation errors. * Removed unused info from request response cache. * Renamed connection -> connectionID. * Fixed rename bug. * Export setters and getters for filter ips and ports. * Added tap dependency to Dockerfile. * Uncomment error messages. * Renamed `filterIpAddresses` -> `filterAuthorities`. * Renamed ConnectionID -> ConnectionInfo. * Fixed: Missed one replace.
This commit is contained in:
69
tap/cleaner.go
Normal file
69
tap/cleaner.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket/reassembly"
|
||||
)
|
||||
|
||||
type CleanerStats struct {
|
||||
flushed int
|
||||
closed int
|
||||
deleted int
|
||||
}
|
||||
|
||||
type Cleaner struct {
|
||||
assembler *reassembly.Assembler
|
||||
assemblerMutex *sync.Mutex
|
||||
matcher *requestResponseMatcher
|
||||
cleanPeriod time.Duration
|
||||
connectionTimeout time.Duration
|
||||
stats CleanerStats
|
||||
statsMutex sync.Mutex
|
||||
}
|
||||
|
||||
func (cl *Cleaner) clean() {
|
||||
startCleanTime := time.Now()
|
||||
|
||||
cl.assemblerMutex.Lock()
|
||||
flushed, closed := cl.assembler.FlushCloseOlderThan(startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.assemblerMutex.Unlock()
|
||||
|
||||
deleted := cl.matcher.deleteOlderThan(startCleanTime.Add(-cl.connectionTimeout))
|
||||
|
||||
cl.statsMutex.Lock()
|
||||
cl.stats.flushed += flushed
|
||||
cl.stats.closed += closed
|
||||
cl.stats.deleted += deleted
|
||||
cl.statsMutex.Unlock()
|
||||
}
|
||||
|
||||
func (cl *Cleaner) start() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(cl.cleanPeriod)
|
||||
|
||||
for true {
|
||||
<-ticker.C
|
||||
cl.clean()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (cl *Cleaner) dumpStats() CleanerStats {
|
||||
cl.statsMutex.Lock()
|
||||
|
||||
stats := CleanerStats{
|
||||
flushed: cl.stats.flushed,
|
||||
closed : cl.stats.closed,
|
||||
deleted: cl.stats.deleted,
|
||||
}
|
||||
|
||||
cl.stats.flushed = 0
|
||||
cl.stats.closed = 0
|
||||
cl.stats.deleted = 0
|
||||
|
||||
cl.statsMutex.Unlock()
|
||||
|
||||
return stats
|
||||
}
|
12
tap/go.mod
Normal file
12
tap/go.mod
Normal file
@@ -0,0 +1,12 @@
|
||||
module github.com/up9inc/mizu/tap
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/martian v2.1.0+incompatible
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758
|
||||
)
|
31
tap/go.sum
Normal file
31
tap/go.sum
Normal file
@@ -0,0 +1,31 @@
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc h1:Ak86L+yDSOzKFa7WM5bf5itSOo1e3Xh8bm5YCMUXIjQ=
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
241
tap/grpc_assembler.go
Normal file
241
tap/grpc_assembler.go
Normal file
@@ -0,0 +1,241 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
const frameHeaderLen = 9
|
||||
var clientPreface = []byte(http2.ClientPreface)
|
||||
const initialHeaderTableSize = 4096
|
||||
const protoHTTP2 = "HTTP/2.0"
|
||||
const protoMajorHTTP2 = 2
|
||||
const protoMinorHTTP2 = 0
|
||||
|
||||
var maxHTTP2DataLen int = maxHTTP2DataLenDefault // value initialized during init
|
||||
|
||||
type messageFragment struct {
|
||||
headers []hpack.HeaderField
|
||||
data []byte
|
||||
}
|
||||
|
||||
type fragmentsByStream map[uint32]*messageFragment
|
||||
|
||||
func (fbs *fragmentsByStream) appendFrame(streamID uint32, frame http2.Frame) {
|
||||
switch frame := frame.(type) {
|
||||
case *http2.MetaHeadersFrame:
|
||||
if existingFragment, ok := (*fbs)[streamID]; ok {
|
||||
existingFragment.headers = append(existingFragment.headers, frame.Fields...)
|
||||
} else {
|
||||
// new fragment
|
||||
(*fbs)[streamID] = &messageFragment{headers: frame.Fields}
|
||||
}
|
||||
case *http2.DataFrame:
|
||||
newDataLen := len(frame.Data())
|
||||
if existingFragment, ok := (*fbs)[streamID]; ok {
|
||||
existingDataLen := len(existingFragment.data)
|
||||
// Never save more than maxHTTP2DataLen bytes
|
||||
numBytesToAppend := int(math.Min(float64(maxHTTP2DataLen - existingDataLen), float64(newDataLen)))
|
||||
|
||||
existingFragment.data = append(existingFragment.data, frame.Data()[:numBytesToAppend]...)
|
||||
} else {
|
||||
// new fragment
|
||||
// In principle, should not happen with DATA frames, because they are always preceded by HEADERS
|
||||
|
||||
// Never save more than maxHTTP2DataLen bytes
|
||||
numBytesToAppend := int(math.Min(float64(maxHTTP2DataLen), float64(newDataLen)))
|
||||
|
||||
(*fbs)[streamID] = &messageFragment{data: frame.Data()[:numBytesToAppend]}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fbs *fragmentsByStream) pop(streamID uint32) ([]hpack.HeaderField, []byte) {
|
||||
headers := (*fbs)[streamID].headers
|
||||
data := (*fbs)[streamID].data
|
||||
delete(*fbs, streamID)
|
||||
|
||||
return headers, data
|
||||
}
|
||||
|
||||
func createGrpcAssembler(b *bufio.Reader) GrpcAssembler {
|
||||
var framerOutput bytes.Buffer
|
||||
framer := http2.NewFramer(&framerOutput, b)
|
||||
framer.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
||||
return GrpcAssembler{
|
||||
fragmentsByStream: make(fragmentsByStream),
|
||||
framer: framer,
|
||||
}
|
||||
}
|
||||
|
||||
type GrpcAssembler struct {
|
||||
fragmentsByStream fragmentsByStream
|
||||
framer *http2.Framer
|
||||
}
|
||||
|
||||
func (ga *GrpcAssembler) readMessage() (uint32, interface{}, error) {
|
||||
// Exactly one Framer is used for each half connection.
|
||||
// (Instead of creating a new Framer for each ReadFrame operation)
|
||||
// This is needed in order to decompress the headers,
|
||||
// because the compression context is updated with each requests/response.
|
||||
frame, err := ga.framer.ReadFrame()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
streamID := frame.Header().StreamID
|
||||
|
||||
ga.fragmentsByStream.appendFrame(streamID, frame)
|
||||
|
||||
if !(ga.isStreamEnd(frame)) {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
headers, data := ga.fragmentsByStream.pop(streamID)
|
||||
|
||||
// Note: header keys are converted by http.Header.Set to canonical names, e.g. content-type -> Content-Type.
|
||||
// By converting the keys we violate the HTTP/2 specification, which state that all headers must be lowercase.
|
||||
headersHTTP1 := make(http.Header)
|
||||
for _, header := range headers {
|
||||
headersHTTP1.Add(header.Name, header.Value)
|
||||
}
|
||||
dataString := base64.StdEncoding.EncodeToString(data)
|
||||
|
||||
// Use http1 types only because they are expected in http_matcher.
|
||||
// TODO: Create an interface that will be used by http_matcher:registerRequest and http_matcher:registerRequest
|
||||
// to accept both HTTP/1.x and HTTP/2 requests and responses
|
||||
var messageHTTP1 interface{}
|
||||
if _, ok := headersHTTP1[":method"]; ok {
|
||||
messageHTTP1 = http.Request{
|
||||
URL: &url.URL{},
|
||||
Method: "POST",
|
||||
Header: headersHTTP1,
|
||||
Proto: protoHTTP2,
|
||||
ProtoMajor: protoMajorHTTP2,
|
||||
ProtoMinor: protoMinorHTTP2,
|
||||
Body: io.NopCloser(strings.NewReader(dataString)),
|
||||
ContentLength: int64(len(dataString)),
|
||||
}
|
||||
} else if _, ok := headersHTTP1[":status"]; ok {
|
||||
messageHTTP1 = http.Response{
|
||||
Header: headersHTTP1,
|
||||
Proto: protoHTTP2,
|
||||
ProtoMajor: protoMajorHTTP2,
|
||||
ProtoMinor: protoMinorHTTP2,
|
||||
Body: io.NopCloser(strings.NewReader(dataString)),
|
||||
ContentLength: int64(len(dataString)),
|
||||
}
|
||||
} else {
|
||||
return 0, nil, errors.New("Failed to assemble stream: neither a request nor a message")
|
||||
}
|
||||
|
||||
return streamID, messageHTTP1, nil
|
||||
}
|
||||
|
||||
func (ga *GrpcAssembler) isStreamEnd(frame http2.Frame) bool {
|
||||
switch frame := frame.(type) {
|
||||
case *http2.MetaHeadersFrame:
|
||||
if frame.StreamEnded() {
|
||||
return true
|
||||
}
|
||||
case *http2.DataFrame:
|
||||
if frame.StreamEnded() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
/* Check if HTTP/2. Remove HTTP/2 client preface from start of buffer if present
|
||||
*/
|
||||
func checkIsHTTP2Connection(b *bufio.Reader, isClient bool) (bool, error) {
|
||||
if isClient {
|
||||
return checkIsHTTP2ClientStream(b)
|
||||
}
|
||||
|
||||
return checkIsHTTP2ServerStream(b)
|
||||
}
|
||||
|
||||
func prepareHTTP2Connection(b *bufio.Reader, isClient bool) error {
|
||||
if !isClient {
|
||||
return nil
|
||||
}
|
||||
|
||||
return discardClientPreface(b)
|
||||
}
|
||||
|
||||
func checkIsHTTP2ClientStream(b *bufio.Reader) (bool, error) {
|
||||
return checkClientPreface(b)
|
||||
}
|
||||
|
||||
func checkIsHTTP2ServerStream(b *bufio.Reader) (bool, error) {
|
||||
buf, err := b.Peek(frameHeaderLen)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If response starts with this text, it is HTTP/1.x
|
||||
if bytes.Compare(buf, []byte("HTTP/1.0 ")) == 0 || bytes.Compare(buf, []byte("HTTP/1.1 ")) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check server connection preface (a settings frame)
|
||||
frameHeader := http2.FrameHeader{
|
||||
Length: uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2]),
|
||||
Type: http2.FrameType(buf[3]),
|
||||
Flags: http2.Flags(buf[4]),
|
||||
StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
|
||||
}
|
||||
|
||||
if frameHeader.Type != http2.FrameSettings {
|
||||
// If HTTP/2, but not start of stream, will also fulfill this condition.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func checkClientPreface(b *bufio.Reader) (bool, error) {
|
||||
bytesStart, err := b.Peek(len(clientPreface))
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if len(bytesStart) != len(clientPreface) {
|
||||
return false, errors.New("checkClientPreface: not enough bytes read")
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytesStart, clientPreface) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func discardClientPreface(b *bufio.Reader) error {
|
||||
if isClientPrefacePresent, err := checkClientPreface(b); err != nil {
|
||||
return err
|
||||
} else if !isClientPrefacePresent{
|
||||
return errors.New("discardClientPreface: does not begin with client preface")
|
||||
}
|
||||
|
||||
// Remove client preface string from the buffer
|
||||
n, err := b.Discard(len(clientPreface))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if n != len(clientPreface) {
|
||||
return errors.New("discardClientPreface: failed to discard client preface")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
255
tap/har_writer.go
Normal file
255
tap/har_writer.go
Normal file
@@ -0,0 +1,255 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/martian/har"
|
||||
)
|
||||
|
||||
const readPermission = 0644
|
||||
const tempFilenamePrefix = "har_writer"
|
||||
|
||||
type PairChanItem struct {
|
||||
Request *http.Request
|
||||
RequestTime time.Time
|
||||
Response *http.Response
|
||||
ResponseTime time.Time
|
||||
RequestSenderIp string
|
||||
ConnectionInfo *ConnectionInfo
|
||||
}
|
||||
|
||||
func openNewHarFile(filename string) *HarFile {
|
||||
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, readPermission)
|
||||
if err != nil {
|
||||
log.Panicf("Failed to open output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
|
||||
harFile := HarFile{file: file, entryCount: 0}
|
||||
harFile.writeHeader()
|
||||
|
||||
return &harFile
|
||||
}
|
||||
|
||||
type HarFile struct {
|
||||
file *os.File
|
||||
entryCount int
|
||||
}
|
||||
|
||||
func NewEntry(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time) (*har.Entry, error) {
|
||||
harRequest, err := har.NewRequest(request, true)
|
||||
if err != nil {
|
||||
SilentError("convert-request-to-har", "Failed converting request to HAR %s (%v,%+v)", err, err, err)
|
||||
return nil, errors.New("Failed converting request to HAR")
|
||||
}
|
||||
|
||||
harResponse, err := har.NewResponse(response, true)
|
||||
if err != nil {
|
||||
SilentError("convert-response-to-har", "Failed converting response to HAR %s (%v,%+v)", err, err, err)
|
||||
return nil, errors.New("Failed converting response to HAR")
|
||||
}
|
||||
|
||||
if harRequest.PostData != nil && strings.HasPrefix(harRequest.PostData.MimeType, "application/grpc") {
|
||||
// Force HTTP/2 gRPC into HAR template
|
||||
|
||||
harRequest.URL = fmt.Sprintf("%s://%s%s", request.Header.Get(":scheme"), request.Header.Get(":authority"), request.Header.Get(":path"))
|
||||
|
||||
status, err := strconv.Atoi(response.Header.Get(":status"))
|
||||
if err != nil {
|
||||
SilentError("convert-response-status-for-har", "Failed converting status to int %s (%v,%+v)", err, err, err)
|
||||
return nil, errors.New("Failed converting response status to int for HAR")
|
||||
}
|
||||
harResponse.Status = status
|
||||
} else {
|
||||
// Martian copies http.Request.URL.String() to har.Request.URL, which usually contains the path.
|
||||
// However, according to the HAR spec, the URL field needs to be the absolute URL.
|
||||
var scheme string
|
||||
if request.URL.Scheme != "" {
|
||||
scheme = request.URL.Scheme
|
||||
} else {
|
||||
scheme = "http"
|
||||
}
|
||||
harRequest.URL = fmt.Sprintf("%s://%s%s", scheme, request.Host, request.URL)
|
||||
}
|
||||
|
||||
totalTime := responseTime.Sub(requestTime).Round(time.Millisecond).Milliseconds()
|
||||
if totalTime < 1 {
|
||||
totalTime = 1
|
||||
}
|
||||
|
||||
harEntry := har.Entry{
|
||||
StartedDateTime: time.Now().UTC(),
|
||||
Time: totalTime,
|
||||
Request: harRequest,
|
||||
Response: harResponse,
|
||||
Cache: &har.Cache{},
|
||||
Timings: &har.Timings{
|
||||
Send: -1,
|
||||
Wait: -1,
|
||||
Receive: totalTime,
|
||||
},
|
||||
}
|
||||
|
||||
return &harEntry, nil
|
||||
}
|
||||
|
||||
func (f *HarFile) WriteEntry(harEntry *har.Entry) {
|
||||
harEntryJson, err := json.Marshal(harEntry)
|
||||
if err != nil {
|
||||
SilentError("har-entry-marshal", "Failed converting har entry object to JSON%s (%v,%+v)", err, err, err)
|
||||
return
|
||||
}
|
||||
|
||||
var separator string
|
||||
if f.GetEntryCount() > 0 {
|
||||
separator = ","
|
||||
} else {
|
||||
separator = ""
|
||||
}
|
||||
|
||||
harEntryString := append([]byte(separator), harEntryJson...)
|
||||
|
||||
if _, err := f.file.Write(harEntryString); err != nil {
|
||||
log.Panicf("Failed to write to output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
|
||||
f.entryCount++
|
||||
}
|
||||
|
||||
func (f *HarFile) GetEntryCount() int {
|
||||
return f.entryCount
|
||||
}
|
||||
|
||||
func (f *HarFile) Close() {
|
||||
f.writeTrailer()
|
||||
|
||||
err := f.file.Close()
|
||||
if err != nil {
|
||||
log.Panicf("Failed to close output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f*HarFile) writeHeader() {
|
||||
header := []byte(`{"log": {"version": "1.2", "creator": {"name": "Mizu", "version": "0.0.1"}, "entries": [`)
|
||||
if _, err := f.file.Write(header); err != nil {
|
||||
log.Panicf("Failed to write header to output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f*HarFile) writeTrailer() {
|
||||
trailer := []byte("]}}")
|
||||
if _, err := f.file.Write(trailer); err != nil {
|
||||
log.Panicf("Failed to write trailer to output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func NewHarWriter(outputDir string, maxEntries int) *HarWriter {
|
||||
return &HarWriter{
|
||||
OutputDirPath: outputDir,
|
||||
MaxEntries: maxEntries,
|
||||
PairChan: make(chan *PairChanItem),
|
||||
OutChan: make(chan *OutputChannelItem, 1000),
|
||||
currentFile: nil,
|
||||
done: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
type OutputChannelItem struct {
|
||||
HarEntry *har.Entry
|
||||
ConnectionInfo *ConnectionInfo
|
||||
}
|
||||
|
||||
type HarWriter struct {
|
||||
OutputDirPath string
|
||||
MaxEntries int
|
||||
PairChan chan *PairChanItem
|
||||
OutChan chan *OutputChannelItem
|
||||
currentFile *HarFile
|
||||
done chan bool
|
||||
}
|
||||
|
||||
func (hw *HarWriter) WritePair(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time, connectionInfo *ConnectionInfo) {
|
||||
hw.PairChan <- &PairChanItem{
|
||||
Request: request,
|
||||
RequestTime: requestTime,
|
||||
Response: response,
|
||||
ResponseTime: responseTime,
|
||||
ConnectionInfo: connectionInfo,
|
||||
}
|
||||
}
|
||||
|
||||
func (hw *HarWriter) Start() {
|
||||
if hw.OutputDirPath != "" {
|
||||
if err := os.MkdirAll(hw.OutputDirPath, os.ModePerm); err != nil {
|
||||
log.Panicf("Failed to create output directory: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for pair := range hw.PairChan {
|
||||
harEntry, err := NewEntry(pair.Request, pair.RequestTime, pair.Response, pair.ResponseTime)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if hw.OutputDirPath != "" {
|
||||
if hw.currentFile == nil {
|
||||
hw.openNewFile()
|
||||
}
|
||||
|
||||
hw.currentFile.WriteEntry(harEntry)
|
||||
|
||||
if hw.currentFile.GetEntryCount() >= hw.MaxEntries {
|
||||
hw.closeFile()
|
||||
}
|
||||
} else {
|
||||
hw.OutChan <- &OutputChannelItem{
|
||||
HarEntry: harEntry,
|
||||
ConnectionInfo: pair.ConnectionInfo,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hw.currentFile != nil {
|
||||
hw.closeFile()
|
||||
}
|
||||
hw.done <- true
|
||||
} ()
|
||||
}
|
||||
|
||||
func (hw *HarWriter) Stop() {
|
||||
close(hw.PairChan)
|
||||
<-hw.done
|
||||
close(hw.OutChan)
|
||||
}
|
||||
|
||||
func (hw *HarWriter) openNewFile() {
|
||||
filename := filepath.Join(os.TempDir(), fmt.Sprintf("%s_%d", tempFilenamePrefix, time.Now().UnixNano()))
|
||||
hw.currentFile = openNewHarFile(filename)
|
||||
}
|
||||
|
||||
func (hw *HarWriter) closeFile() {
|
||||
hw.currentFile.Close()
|
||||
tmpFilename := hw.currentFile.file.Name()
|
||||
hw.currentFile = nil
|
||||
|
||||
filename := buildFilename(hw.OutputDirPath, time.Now())
|
||||
err := os.Rename(tmpFilename, filename)
|
||||
if err != nil {
|
||||
SilentError("Rename-file", "cannot rename file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildFilename(dir string, t time.Time) string {
|
||||
// (epoch time in nanoseconds)__(YYYY_Month_DD__hh-mm-ss).har
|
||||
filename := fmt.Sprintf("%d__%s.har", t.UnixNano(), t.Format("2006_Jan_02__15-04-05"))
|
||||
return filepath.Join(dir, filename)
|
||||
}
|
138
tap/http_matcher.go
Normal file
138
tap/http_matcher.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
type requestResponsePair struct {
|
||||
Request httpMessage `json:"request"`
|
||||
Response httpMessage `json:"response"`
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
ClientIP string
|
||||
ClientPort string
|
||||
ServerIP string
|
||||
ServerPort string
|
||||
}
|
||||
|
||||
type httpMessage struct {
|
||||
isRequest bool
|
||||
captureTime time.Time
|
||||
orig interface{}
|
||||
connectionInfo ConnectionInfo
|
||||
}
|
||||
|
||||
|
||||
// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap cmap.ConcurrentMap
|
||||
|
||||
}
|
||||
|
||||
func createResponseRequestMatcher() requestResponseMatcher {
|
||||
newMatcher := &requestResponseMatcher{openMessagesMap: cmap.New()}
|
||||
return *newMatcher
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time) *requestResponsePair {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
connectionInfo := &ConnectionInfo{
|
||||
ClientIP: split[0],
|
||||
ClientPort: split[2],
|
||||
ServerIP: split[1],
|
||||
ServerPort: split[3],
|
||||
}
|
||||
|
||||
requestHTTPMessage := httpMessage{
|
||||
isRequest: true,
|
||||
captureTime: captureTime,
|
||||
orig: request,
|
||||
connectionInfo: *connectionInfo,
|
||||
}
|
||||
|
||||
if response, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
responseHTTPMessage := response.(*httpMessage)
|
||||
if responseHTTPMessage.isRequest {
|
||||
SilentError("Request-Duplicate", "Got duplicate request with same identifier")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Response for %s", key)
|
||||
return matcher.preparePair(&requestHTTPMessage, responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &requestHTTPMessage)
|
||||
Debug("Registered open Request for %s", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time) *requestResponsePair {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
responseHTTPMessage := httpMessage{
|
||||
isRequest: false,
|
||||
captureTime: captureTime,
|
||||
orig: response,
|
||||
}
|
||||
|
||||
if request, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
requestHTTPMessage := request.(*httpMessage)
|
||||
if !requestHTTPMessage.isRequest {
|
||||
SilentError("Response-Duplicate", "Got duplicate response with same identifier")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Request for %s", key)
|
||||
return matcher.preparePair(requestHTTPMessage, &responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &responseHTTPMessage)
|
||||
Debug("Registered open Response for %s", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) preparePair(requestHTTPMessage *httpMessage, responseHTTPMessage *httpMessage) *requestResponsePair {
|
||||
return &requestResponsePair{
|
||||
Request: *requestHTTPMessage,
|
||||
Response: *responseHTTPMessage,
|
||||
}
|
||||
}
|
||||
|
||||
func splitIdent(ident string) []string {
|
||||
ident = strings.Replace(ident, "->", " ", -1)
|
||||
return strings.Split(ident, " ")
|
||||
}
|
||||
|
||||
func genKey(split []string) string {
|
||||
key := fmt.Sprintf("%s:%s->%s:%s,%s", split[0], split[2], split[1], split[3], split[4])
|
||||
return key
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) deleteOlderThan(t time.Time) int {
|
||||
keysToPop := make([]string, 0)
|
||||
for item := range matcher.openMessagesMap.IterBuffered() {
|
||||
// Map only contains values of type httpMessage
|
||||
message, _ := item.Val.(*httpMessage)
|
||||
|
||||
if message.captureTime.Before(t) {
|
||||
keysToPop = append(keysToPop, item.Key)
|
||||
}
|
||||
}
|
||||
|
||||
numDeleted := len(keysToPop)
|
||||
|
||||
for _, key := range keysToPop {
|
||||
_, _ = matcher.openMessagesMap.Pop(key)
|
||||
}
|
||||
|
||||
return numDeleted
|
||||
}
|
248
tap/http_reader.go
Normal file
248
tap/http_reader.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type httpReaderDataMsg struct {
|
||||
bytes []byte
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
type tcpID struct {
|
||||
srcIP string
|
||||
dstIP string
|
||||
srcPort string
|
||||
dstPort string
|
||||
}
|
||||
|
||||
func (tid *tcpID) String() string {
|
||||
return fmt.Sprintf("%s->%s %s->%s", tid.srcIP, tid.dstIP, tid.srcPort, tid.dstPort)
|
||||
}
|
||||
|
||||
/* httpReader gets reads from a channel of bytes of tcp payload, and parses it into HTTP/1 requests and responses.
|
||||
* The payload is written to the channel by a tcpStream object that is dedicated to one tcp connection.
|
||||
* An httpReader object is unidirectional: it parses either a client stream or a server stream.
|
||||
* Implements io.Reader interface (Read)
|
||||
*/
|
||||
type httpReader struct {
|
||||
ident string
|
||||
tcpID tcpID
|
||||
isClient bool
|
||||
isHTTP2 bool
|
||||
msgQueue chan httpReaderDataMsg // Channel of captured reassembled tcp payload
|
||||
data []byte
|
||||
captureTime time.Time
|
||||
hexdump bool
|
||||
parent *tcpStream
|
||||
grpcAssembler GrpcAssembler
|
||||
messageCount uint
|
||||
harWriter *HarWriter
|
||||
}
|
||||
|
||||
func (h *httpReader) Read(p []byte) (int, error) {
|
||||
var msg httpReaderDataMsg
|
||||
ok := true
|
||||
for ok && len(h.data) == 0 {
|
||||
msg, ok = <-h.msgQueue
|
||||
h.data = msg.bytes
|
||||
h.captureTime = msg.timestamp
|
||||
}
|
||||
if !ok || len(h.data) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
l := copy(p, h.data)
|
||||
h.data = h.data[l:]
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
b := bufio.NewReader(h)
|
||||
|
||||
if isHTTP2, err := checkIsHTTP2Connection(b, h.isClient); err != nil {
|
||||
SilentError("HTTP/2-Prepare-Connection", "stream %s Failed to check if client is HTTP/2: %s (%v,%+v)", h.ident, err, err, err)
|
||||
// Do something?
|
||||
} else {
|
||||
h.isHTTP2 = isHTTP2
|
||||
}
|
||||
|
||||
if h.isHTTP2 {
|
||||
err := prepareHTTP2Connection(b, h.isClient)
|
||||
if err != nil {
|
||||
SilentError("HTTP/2-Prepare-Connection-After-Check", "stream %s error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
}
|
||||
h.grpcAssembler = createGrpcAssembler(b)
|
||||
}
|
||||
|
||||
for true {
|
||||
if h.isHTTP2 {
|
||||
err := h.handleHTTP2Stream()
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP/2", "stream %s error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
} else if h.isClient {
|
||||
err := h.handleHTTP1ClientStream(b)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP-request", "stream %s Request error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
err := h.handleHTTP1ServerStream(b)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP-response", "stream %s Response error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *httpReader) handleHTTP2Stream() error {
|
||||
streamID, messageHTTP1, err := h.grpcAssembler.readMessage()
|
||||
h.messageCount++
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var reqResPair *requestResponsePair
|
||||
|
||||
switch messageHTTP1 := messageHTTP1.(type) {
|
||||
case http.Request:
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, streamID)
|
||||
reqResPair = reqResMatcher.registerRequest(ident, &messageHTTP1, h.captureTime)
|
||||
case http.Response:
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, streamID)
|
||||
reqResPair = reqResMatcher.registerResponse(ident, &messageHTTP1, h.captureTime)
|
||||
}
|
||||
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
&reqResPair.Request.connectionInfo,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *httpReader) handleHTTP1ClientStream(b *bufio.Reader) error {
|
||||
req, err := http.ReadRequest(b)
|
||||
h.messageCount++
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
s := len(body)
|
||||
if err != nil {
|
||||
SilentError("HTTP-request-body", "stream %s Got body err: %s", h.ident, err)
|
||||
} else if h.hexdump {
|
||||
Info("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body))
|
||||
}
|
||||
if err := req.Body.Close(); err != nil {
|
||||
SilentError("HTTP-request-body-close", "stream %s Failed to close request body: %s", h.ident, err)
|
||||
}
|
||||
encoding := req.Header["Content-Encoding"]
|
||||
Info("HTTP/1 Request: %s %s %s (Body:%d) -> %s", h.ident, req.Method, req.URL, s, encoding)
|
||||
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, h.messageCount)
|
||||
reqResPair := reqResMatcher.registerRequest(ident, req, h.captureTime)
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
&reqResPair.Request.connectionInfo,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
h.parent.Lock()
|
||||
h.parent.urls = append(h.parent.urls, req.URL.String())
|
||||
h.parent.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *httpReader) handleHTTP1ServerStream(b *bufio.Reader) error {
|
||||
res, err := http.ReadResponse(b, nil)
|
||||
h.messageCount++
|
||||
var req string
|
||||
h.parent.Lock()
|
||||
if len(h.parent.urls) == 0 {
|
||||
req = fmt.Sprintf("<no-request-seen>")
|
||||
} else {
|
||||
req, h.parent.urls = h.parent.urls[0], h.parent.urls[1:]
|
||||
}
|
||||
h.parent.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
s := len(body)
|
||||
if err != nil {
|
||||
SilentError("HTTP-response-body", "HTTP/%s: failed to get body(parsed len:%d): %s", h.ident, s, err)
|
||||
}
|
||||
if h.hexdump {
|
||||
Info("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body))
|
||||
}
|
||||
if err := res.Body.Close(); err != nil {
|
||||
SilentError("HTTP-response-body-close", "HTTP/%s: failed to close body(parsed len:%d): %s", h.ident, s, err)
|
||||
}
|
||||
sym := ","
|
||||
if res.ContentLength > 0 && res.ContentLength != int64(s) {
|
||||
sym = "!="
|
||||
}
|
||||
contentType, ok := res.Header["Content-Type"]
|
||||
if !ok {
|
||||
contentType = []string{http.DetectContentType(body)}
|
||||
}
|
||||
encoding := res.Header["Content-Encoding"]
|
||||
Info("HTTP/1 Response: %s %s URL:%s (%d%s%d%s) -> %s", h.ident, res.Status, req, res.ContentLength, sym, s, contentType, encoding)
|
||||
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, h.messageCount)
|
||||
reqResPair := reqResMatcher.registerResponse(ident, res, h.captureTime)
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
&reqResPair.Request.connectionInfo,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
62
tap/net_utils.go
Normal file
62
tap/net_utils.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var privateIPBlocks []*net.IPNet
|
||||
|
||||
func init() {
|
||||
initPrivateIPBlocks()
|
||||
}
|
||||
|
||||
// Get this host ipv4 and ipv6 addresses on all interfaces
|
||||
func getLocalhostIPs() ([]string, error) {
|
||||
addrMasks, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
// TODO: return error, log error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
myIPs := make([]string, len(addrMasks))
|
||||
for ii, addr := range addrMasks {
|
||||
myIPs[ii] = strings.Split(addr.String(), "/")[0]
|
||||
}
|
||||
|
||||
return myIPs, nil
|
||||
}
|
||||
|
||||
func isPrivateIP(ipStr string) bool {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, block := range privateIPBlocks {
|
||||
if block.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func initPrivateIPBlocks() {
|
||||
for _, cidr := range []string{
|
||||
"127.0.0.0/8", // IPv4 loopback
|
||||
"10.0.0.0/8", // RFC1918
|
||||
"172.16.0.0/12", // RFC1918
|
||||
"192.168.0.0/16", // RFC1918
|
||||
"169.254.0.0/16", // RFC3927 link-local
|
||||
"::1/128", // IPv6 loopback
|
||||
"fe80::/10", // IPv6 link-local
|
||||
"fc00::/7", // IPv6 unique local addr
|
||||
} {
|
||||
_, block, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
Error("Private-IP-Block-Parse", "parse error on %q: %v", cidr, err)
|
||||
} else {
|
||||
privateIPBlocks = append(privateIPBlocks, block)
|
||||
}
|
||||
}
|
||||
}
|
29
tap/outboundlinks.go
Normal file
29
tap/outboundlinks.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package tap
|
||||
|
||||
type OutboundLink struct {
|
||||
Src string
|
||||
DstIP string
|
||||
DstPort int
|
||||
}
|
||||
|
||||
func NewOutboundLinkWriter() *OutboundLinkWriter {
|
||||
return &OutboundLinkWriter{
|
||||
OutChan: make(chan *OutboundLink),
|
||||
}
|
||||
}
|
||||
|
||||
type OutboundLinkWriter struct {
|
||||
OutChan chan *OutboundLink
|
||||
}
|
||||
|
||||
func (olw *OutboundLinkWriter) WriteOutboundLink(src string, DstIP string, DstPort int) {
|
||||
olw.OutChan <- &OutboundLink{
|
||||
Src: src,
|
||||
DstIP: DstIP,
|
||||
DstPort: DstPort,
|
||||
}
|
||||
}
|
||||
|
||||
func (olw *OutboundLinkWriter) Stop() {
|
||||
close(olw.OutChan)
|
||||
}
|
499
tap/passive_tapper.go
Normal file
499
tap/passive_tapper.go
Normal file
@@ -0,0 +1,499 @@
|
||||
// Copyright 2012 Google, Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the LICENSE file in the root of the source
|
||||
// tree.
|
||||
|
||||
// The pcapdump binary implements a tcpdump-like command line tool with gopacket
|
||||
// using pcap as a backend data collection mechanism.
|
||||
package tap
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/examples/util"
|
||||
"github.com/google/gopacket/ip4defrag"
|
||||
"github.com/google/gopacket/layers" // pulls in all layers decoders
|
||||
"github.com/google/gopacket/pcap"
|
||||
"github.com/google/gopacket/reassembly"
|
||||
)
|
||||
|
||||
const AppPortsEnvVar = "APP_PORTS"
|
||||
const maxHTTP2DataLenEnvVar = "HTTP2_DATA_SIZE_LIMIT"
|
||||
// default is 1MB, more than the max size accepted by collector and traffic-dumper
|
||||
const maxHTTP2DataLenDefault = 1 * 1024 * 1024
|
||||
const cleanPeriod = time.Second * 10
|
||||
var remoteOnlyOutboundPorts = []int { 80, 443 }
|
||||
|
||||
func parseAppPorts(appPortsList string) []int {
|
||||
ports := make([]int, 0)
|
||||
for _, portStr := range strings.Split(appPortsList, ",") {
|
||||
parsedInt, parseError := strconv.Atoi(portStr)
|
||||
if parseError != nil {
|
||||
log.Printf("Provided app port %v is not a valid number!", portStr)
|
||||
} else {
|
||||
ports = append(ports, parsedInt)
|
||||
}
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
var maxcount = flag.Int("c", -1, "Only grab this many packets, then exit")
|
||||
var decoder = flag.String("decoder", "", "Name of the decoder to use (default: guess from capture)")
|
||||
var statsevery = flag.Int("stats", 60, "Output statistics every N seconds")
|
||||
var lazy = flag.Bool("lazy", false, "If true, do lazy decoding")
|
||||
var nodefrag = flag.Bool("nodefrag", false, "If true, do not do IPv4 defrag")
|
||||
var checksum = flag.Bool("checksum", false, "Check TCP checksum") // global
|
||||
var nooptcheck = flag.Bool("nooptcheck", true, "Do not check TCP options (useful to ignore MSS on captures with TSO)") // global
|
||||
var ignorefsmerr = flag.Bool("ignorefsmerr", true, "Ignore TCP FSM errors") // global
|
||||
var allowmissinginit = flag.Bool("allowmissinginit", true, "Support streams without SYN/SYN+ACK/ACK sequence") // global
|
||||
var verbose = flag.Bool("verbose", false, "Be verbose")
|
||||
var debug = flag.Bool("debug", false, "Display debug information")
|
||||
var quiet = flag.Bool("quiet", false, "Be quiet regarding errors")
|
||||
|
||||
// http
|
||||
var nohttp = flag.Bool("nohttp", false, "Disable HTTP parsing")
|
||||
var output = flag.String("output", "", "Path to create file for HTTP 200 OK responses")
|
||||
var writeincomplete = flag.Bool("writeincomplete", false, "Write incomplete response")
|
||||
|
||||
var hexdump = flag.Bool("dump", false, "Dump HTTP request/response as hex") // global
|
||||
var hexdumppkt = flag.Bool("dumppkt", false, "Dump packet as hex")
|
||||
|
||||
// capture
|
||||
var iface = flag.String("i", "en0", "Interface to read packets from")
|
||||
var fname = flag.String("r", "", "Filename to read from, overrides -i")
|
||||
var snaplen = flag.Int("s", 65536, "Snap length (number of bytes max to read per packet")
|
||||
var tstype = flag.String("timestamp_type", "", "Type of timestamps to use")
|
||||
var promisc = flag.Bool("promisc", true, "Set promiscuous mode")
|
||||
var anydirection = flag.Bool("anydirection", false, "Capture http requests to other hosts")
|
||||
var staleTimeoutSeconds = flag.Int("staletimout", 120, "Max time in seconds to keep connections which don't transmit data")
|
||||
|
||||
var memprofile = flag.String("memprofile", "", "Write memory profile")
|
||||
|
||||
// output
|
||||
var dumpToHar = flag.Bool("hardump", false, "Dump traffic to har files")
|
||||
var HarOutputDir = flag.String("hardir", "", "Directory in which to store output har files")
|
||||
var harEntriesPerFile = flag.Int("harentriesperfile", 200, "Number of max number of har entries to store in each file")
|
||||
|
||||
var reqResMatcher = createResponseRequestMatcher() // global
|
||||
var statsTracker = StatsTracker{}
|
||||
|
||||
// global
|
||||
var stats struct {
|
||||
ipdefrag int
|
||||
missedBytes int
|
||||
pkt int
|
||||
sz int
|
||||
totalsz int
|
||||
rejectFsm int
|
||||
rejectOpt int
|
||||
rejectConnFsm int
|
||||
reassembled int
|
||||
outOfOrderBytes int
|
||||
outOfOrderPackets int
|
||||
biggestChunkBytes int
|
||||
biggestChunkPackets int
|
||||
overlapBytes int
|
||||
overlapPackets int
|
||||
}
|
||||
|
||||
type TapOpts struct {
|
||||
HostMode bool
|
||||
}
|
||||
|
||||
var outputLevel int
|
||||
var errorsMap map[string]uint
|
||||
var errorsMapMutex sync.Mutex
|
||||
var nErrors uint
|
||||
var ownIps []string // global
|
||||
var hostMode bool // global
|
||||
|
||||
/* minOutputLevel: Error will be printed only if outputLevel is above this value
|
||||
* t: key for errorsMap (counting errors)
|
||||
* s, a: arguments log.Printf
|
||||
* Note: Too bad for perf that a... is evaluated
|
||||
*/
|
||||
func logError(minOutputLevel int, t string, s string, a ...interface{}) {
|
||||
errorsMapMutex.Lock()
|
||||
nErrors++
|
||||
nb, _ := errorsMap[t]
|
||||
errorsMap[t] = nb + 1
|
||||
errorsMapMutex.Unlock()
|
||||
if outputLevel >= minOutputLevel {
|
||||
formatStr := fmt.Sprintf("%s: %s", t, s)
|
||||
log.Printf(formatStr, a...)
|
||||
}
|
||||
}
|
||||
func Error(t string, s string, a ...interface{}) {
|
||||
logError(0, t, s, a...)
|
||||
}
|
||||
func SilentError(t string, s string, a ...interface{}) {
|
||||
logError(2, t, s, a...)
|
||||
}
|
||||
func Info(s string, a ...interface{}) {
|
||||
if outputLevel >= 1 {
|
||||
log.Printf(s, a...)
|
||||
}
|
||||
}
|
||||
func Debug(s string, a ...interface{}) {
|
||||
if outputLevel >= 2 {
|
||||
log.Printf(s, a...)
|
||||
}
|
||||
}
|
||||
|
||||
func inArrayInt(arr []int, valueToCheck int) bool {
|
||||
for _, value := range arr {
|
||||
if value == valueToCheck {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func inArrayString(arr []string, valueToCheck string) bool {
|
||||
for _, value := range arr {
|
||||
if value == valueToCheck {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Context
|
||||
// The assembler context
|
||||
type Context struct {
|
||||
CaptureInfo gopacket.CaptureInfo
|
||||
}
|
||||
|
||||
func (c *Context) GetCaptureInfo() gopacket.CaptureInfo {
|
||||
return c.CaptureInfo
|
||||
}
|
||||
|
||||
func StartPassiveTapper(opts *TapOpts) (<-chan *OutputChannelItem, <-chan *OutboundLink) {
|
||||
hostMode = opts.HostMode
|
||||
|
||||
var harWriter *HarWriter
|
||||
if *dumpToHar {
|
||||
harWriter = NewHarWriter(*HarOutputDir, *harEntriesPerFile)
|
||||
}
|
||||
outboundLinkWriter := NewOutboundLinkWriter()
|
||||
|
||||
go startPassiveTapper(harWriter, outboundLinkWriter)
|
||||
|
||||
if harWriter != nil {
|
||||
return harWriter.OutChan, outboundLinkWriter.OutChan
|
||||
}
|
||||
|
||||
return nil, outboundLinkWriter.OutChan
|
||||
}
|
||||
|
||||
func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWriter) {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Lshortfile)
|
||||
|
||||
defer util.Run()()
|
||||
if *debug {
|
||||
outputLevel = 2
|
||||
} else if *verbose {
|
||||
outputLevel = 1
|
||||
} else if *quiet {
|
||||
outputLevel = -1
|
||||
}
|
||||
errorsMap = make(map[string]uint)
|
||||
|
||||
if localhostIPs, err := getLocalhostIPs(); err != nil {
|
||||
// TODO: think this over
|
||||
log.Println("Failed to get self IP addresses")
|
||||
Error("Getting-Self-Address", "Error getting self ip address: %s (%v,%+v)", err, err, err)
|
||||
ownIps = make([]string, 0)
|
||||
} else {
|
||||
ownIps = localhostIPs
|
||||
}
|
||||
|
||||
appPortsStr := os.Getenv(AppPortsEnvVar)
|
||||
var appPorts []int
|
||||
if appPortsStr == "" {
|
||||
log.Println("Received empty/no APP_PORTS env var! only listening to http on port 80!")
|
||||
appPorts = make([]int, 0)
|
||||
} else {
|
||||
appPorts = parseAppPorts(appPortsStr)
|
||||
}
|
||||
SetFilterPorts(appPorts)
|
||||
envVal := os.Getenv(maxHTTP2DataLenEnvVar)
|
||||
if envVal == "" {
|
||||
log.Println("Received empty/no HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = maxHTTP2DataLenDefault
|
||||
} else {
|
||||
if convertedInt, err := strconv.Atoi(envVal); err != nil {
|
||||
log.Println("Received invalid HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = maxHTTP2DataLenDefault
|
||||
} else {
|
||||
log.Println("Received HTTP2_DATA_SIZE_LIMIT env var:", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = convertedInt
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("App Ports: %v", gSettings.filterPorts)
|
||||
|
||||
var handle *pcap.Handle
|
||||
var err error
|
||||
if *fname != "" {
|
||||
if handle, err = pcap.OpenOffline(*fname); err != nil {
|
||||
log.Fatalf("PCAP OpenOffline error: %v", err)
|
||||
}
|
||||
} else {
|
||||
// This is a little complicated because we want to allow all possible options
|
||||
// for creating the packet capture handle... instead of all this you can
|
||||
// just call pcap.OpenLive if you want a simple handle.
|
||||
inactive, err := pcap.NewInactiveHandle(*iface)
|
||||
if err != nil {
|
||||
log.Fatalf("could not create: %v", err)
|
||||
}
|
||||
defer inactive.CleanUp()
|
||||
if err = inactive.SetSnapLen(*snaplen); err != nil {
|
||||
log.Fatalf("could not set snap length: %v", err)
|
||||
} else if err = inactive.SetPromisc(*promisc); err != nil {
|
||||
log.Fatalf("could not set promisc mode: %v", err)
|
||||
} else if err = inactive.SetTimeout(time.Second); err != nil {
|
||||
log.Fatalf("could not set timeout: %v", err)
|
||||
}
|
||||
if *tstype != "" {
|
||||
if t, err := pcap.TimestampSourceFromString(*tstype); err != nil {
|
||||
log.Fatalf("Supported timestamp types: %v", inactive.SupportedTimestamps())
|
||||
} else if err := inactive.SetTimestampSource(t); err != nil {
|
||||
log.Fatalf("Supported timestamp types: %v", inactive.SupportedTimestamps())
|
||||
}
|
||||
}
|
||||
if handle, err = inactive.Activate(); err != nil {
|
||||
log.Fatalf("PCAP Activate error: %v", err)
|
||||
}
|
||||
defer handle.Close()
|
||||
}
|
||||
if len(flag.Args()) > 0 {
|
||||
bpffilter := strings.Join(flag.Args(), " ")
|
||||
Info("Using BPF filter %q", bpffilter)
|
||||
if err = handle.SetBPFFilter(bpffilter); err != nil {
|
||||
log.Fatalf("BPF filter error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if *dumpToHar {
|
||||
harWriter.Start()
|
||||
defer harWriter.Stop()
|
||||
}
|
||||
defer outboundLinkWriter.Stop()
|
||||
|
||||
var dec gopacket.Decoder
|
||||
var ok bool
|
||||
decoderName := *decoder
|
||||
if decoderName == "" {
|
||||
decoderName = fmt.Sprintf("%s", handle.LinkType())
|
||||
}
|
||||
if dec, ok = gopacket.DecodersByLayerName[decoderName]; !ok {
|
||||
log.Fatalln("No decoder named", decoderName)
|
||||
}
|
||||
source := gopacket.NewPacketSource(handle, dec)
|
||||
source.Lazy = *lazy
|
||||
source.NoCopy = true
|
||||
Info("Starting to read packets")
|
||||
count := 0
|
||||
bytes := int64(0)
|
||||
start := time.Now()
|
||||
defragger := ip4defrag.NewIPv4Defragmenter()
|
||||
|
||||
streamFactory := &tcpStreamFactory{
|
||||
doHTTP: !*nohttp,
|
||||
harWriter: harWriter,
|
||||
outbountLinkWriter: outboundLinkWriter,
|
||||
|
||||
}
|
||||
streamPool := reassembly.NewStreamPool(streamFactory)
|
||||
assembler := reassembly.NewAssembler(streamPool)
|
||||
var assemblerMutex sync.Mutex
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
|
||||
staleConnectionTimeout := time.Second * time.Duration(*staleTimeoutSeconds)
|
||||
cleaner := Cleaner{
|
||||
assembler: assembler,
|
||||
assemblerMutex: &assemblerMutex,
|
||||
matcher: &reqResMatcher,
|
||||
cleanPeriod: cleanPeriod,
|
||||
connectionTimeout: staleConnectionTimeout,
|
||||
}
|
||||
cleaner.start()
|
||||
|
||||
go func() {
|
||||
statsPeriod := time.Second * time.Duration(*statsevery)
|
||||
ticker := time.NewTicker(statsPeriod)
|
||||
|
||||
for true {
|
||||
<-ticker.C
|
||||
|
||||
// Since the start
|
||||
errorsMapMutex.Lock()
|
||||
errorMapLen := len(errorsMap)
|
||||
errorsSummery := fmt.Sprintf("%v", errorsMap)
|
||||
errorsMapMutex.Unlock()
|
||||
log.Printf("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v) - Errors Summary: %s",
|
||||
count,
|
||||
bytes,
|
||||
time.Since(start),
|
||||
nErrors,
|
||||
errorMapLen,
|
||||
errorsSummery,
|
||||
)
|
||||
|
||||
// At this moment
|
||||
memStats := runtime.MemStats{}
|
||||
runtime.ReadMemStats(&memStats)
|
||||
log.Printf(
|
||||
"mem: %d, goroutines: %d, unmatched messages: %d",
|
||||
memStats.HeapAlloc,
|
||||
runtime.NumGoroutine(),
|
||||
reqResMatcher.openMessagesMap.Count(),
|
||||
)
|
||||
|
||||
// Since the last print
|
||||
cleanStats := cleaner.dumpStats()
|
||||
appStats := statsTracker.dumpStats()
|
||||
log.Printf(
|
||||
"flushed connections %d, closed connections: %d, deleted messages: %d, matched messages: %d",
|
||||
cleanStats.flushed,
|
||||
cleanStats.closed,
|
||||
cleanStats.deleted,
|
||||
appStats.matchedMessages,
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
for packet := range source.Packets() {
|
||||
count++
|
||||
Debug("PACKET #%d", count)
|
||||
data := packet.Data()
|
||||
bytes += int64(len(data))
|
||||
if *hexdumppkt {
|
||||
Debug("Packet content (%d/0x%x) - %s", len(data), len(data), hex.Dump(data))
|
||||
}
|
||||
|
||||
// defrag the IPv4 packet if required
|
||||
if !*nodefrag {
|
||||
ip4Layer := packet.Layer(layers.LayerTypeIPv4)
|
||||
if ip4Layer == nil {
|
||||
continue
|
||||
}
|
||||
ip4 := ip4Layer.(*layers.IPv4)
|
||||
l := ip4.Length
|
||||
newip4, err := defragger.DefragIPv4(ip4)
|
||||
if err != nil {
|
||||
log.Fatalln("Error while de-fragmenting", err)
|
||||
} else if newip4 == nil {
|
||||
Debug("Fragment...")
|
||||
continue // packet fragment, we don't have whole packet yet.
|
||||
}
|
||||
if newip4.Length != l {
|
||||
stats.ipdefrag++
|
||||
Debug("Decoding re-assembled packet: %s", newip4.NextLayerType())
|
||||
pb, ok := packet.(gopacket.PacketBuilder)
|
||||
if !ok {
|
||||
log.Panic("Not a PacketBuilder")
|
||||
}
|
||||
nextDecoder := newip4.NextLayerType()
|
||||
_ = nextDecoder.Decode(newip4.Payload, pb)
|
||||
}
|
||||
}
|
||||
|
||||
tcp := packet.Layer(layers.LayerTypeTCP)
|
||||
if tcp != nil {
|
||||
tcp := tcp.(*layers.TCP)
|
||||
if *checksum {
|
||||
err := tcp.SetNetworkLayerForChecksum(packet.NetworkLayer())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to set network layer for checksum: %s\n", err)
|
||||
}
|
||||
}
|
||||
c := Context{
|
||||
CaptureInfo: packet.Metadata().CaptureInfo,
|
||||
}
|
||||
stats.totalsz += len(tcp.Payload)
|
||||
// log.Println(packet.NetworkLayer().NetworkFlow().Src(), ":", tcp.SrcPort, " -> ", packet.NetworkLayer().NetworkFlow().Dst(), ":", tcp.DstPort)
|
||||
assemblerMutex.Lock()
|
||||
assembler.AssembleWithContext(packet.NetworkLayer().NetworkFlow(), tcp, &c)
|
||||
assemblerMutex.Unlock()
|
||||
}
|
||||
|
||||
done := *maxcount > 0 && count >= *maxcount
|
||||
if done {
|
||||
errorsMapMutex.Lock()
|
||||
errorMapLen := len(errorsMap)
|
||||
errorsMapMutex.Unlock()
|
||||
log.Printf("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)", count, bytes, time.Since(start), nErrors, errorMapLen)
|
||||
}
|
||||
select {
|
||||
case <-signalChan:
|
||||
log.Printf("Caught SIGINT: aborting")
|
||||
done = true
|
||||
default:
|
||||
// NOP: continue
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assemblerMutex.Lock()
|
||||
closed := assembler.FlushAll()
|
||||
assemblerMutex.Unlock()
|
||||
Debug("Final flush: %d closed", closed)
|
||||
if outputLevel >= 2 {
|
||||
streamPool.Dump()
|
||||
}
|
||||
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = pprof.WriteHeapProfile(f)
|
||||
_ = f.Close()
|
||||
}
|
||||
|
||||
streamFactory.WaitGoRoutines()
|
||||
assemblerMutex.Lock()
|
||||
Debug("%s", assembler.Dump())
|
||||
assemblerMutex.Unlock()
|
||||
if !*nodefrag {
|
||||
log.Printf("IPdefrag:\t\t%d", stats.ipdefrag)
|
||||
}
|
||||
log.Printf("TCP stats:")
|
||||
log.Printf(" missed bytes:\t\t%d", stats.missedBytes)
|
||||
log.Printf(" total packets:\t\t%d", stats.pkt)
|
||||
log.Printf(" rejected FSM:\t\t%d", stats.rejectFsm)
|
||||
log.Printf(" rejected Options:\t%d", stats.rejectOpt)
|
||||
log.Printf(" reassembled bytes:\t%d", stats.sz)
|
||||
log.Printf(" total TCP bytes:\t%d", stats.totalsz)
|
||||
log.Printf(" conn rejected FSM:\t%d", stats.rejectConnFsm)
|
||||
log.Printf(" reassembled chunks:\t%d", stats.reassembled)
|
||||
log.Printf(" out-of-order packets:\t%d", stats.outOfOrderPackets)
|
||||
log.Printf(" out-of-order bytes:\t%d", stats.outOfOrderBytes)
|
||||
log.Printf(" biggest-chunk packets:\t%d", stats.biggestChunkPackets)
|
||||
log.Printf(" biggest-chunk bytes:\t%d", stats.biggestChunkBytes)
|
||||
log.Printf(" overlap packets:\t%d", stats.overlapPackets)
|
||||
log.Printf(" overlap bytes:\t\t%d", stats.overlapBytes)
|
||||
log.Printf("Errors: %d", nErrors)
|
||||
for e := range errorsMap {
|
||||
log.Printf(" %s:\t\t%d", e, errorsMap[e])
|
||||
}
|
||||
}
|
31
tap/settings.go
Normal file
31
tap/settings.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package tap
|
||||
|
||||
type globalSettings struct {
|
||||
filterPorts []int
|
||||
filterAuthorities []string
|
||||
}
|
||||
|
||||
var gSettings = &globalSettings{
|
||||
filterPorts: []int{},
|
||||
filterAuthorities: []string{},
|
||||
}
|
||||
|
||||
func SetFilterPorts(ports []int) {
|
||||
gSettings.filterPorts = ports
|
||||
}
|
||||
|
||||
func GetFilterPorts() []int {
|
||||
ports := make([]int, len(gSettings.filterPorts))
|
||||
copy(ports, gSettings.filterPorts)
|
||||
return ports
|
||||
}
|
||||
|
||||
func SetFilterAuthorities(ipAddresses []string) {
|
||||
gSettings.filterAuthorities = ipAddresses
|
||||
}
|
||||
|
||||
func GetFilterIPs() []string {
|
||||
addresses := make([]string, len(gSettings.filterAuthorities))
|
||||
copy(addresses, gSettings.filterAuthorities)
|
||||
return addresses
|
||||
}
|
35
tap/stats_tracker.go
Normal file
35
tap/stats_tracker.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type AppStats struct {
|
||||
matchedMessages int
|
||||
}
|
||||
|
||||
type StatsTracker struct {
|
||||
stats AppStats
|
||||
statsMutex sync.Mutex
|
||||
}
|
||||
|
||||
func (st *StatsTracker) incMatchedMessages() {
|
||||
st.statsMutex.Lock()
|
||||
st.stats.matchedMessages++
|
||||
st.statsMutex.Unlock()
|
||||
}
|
||||
|
||||
func (st *StatsTracker) dumpStats() AppStats {
|
||||
st.statsMutex.Lock()
|
||||
|
||||
stats := AppStats{
|
||||
matchedMessages: st.stats.matchedMessages,
|
||||
}
|
||||
|
||||
st.stats.matchedMessages = 0
|
||||
|
||||
st.statsMutex.Unlock()
|
||||
|
||||
return stats
|
||||
}
|
||||
|
168
tap/tcp_stream.go
Normal file
168
tap/tcp_stream.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers" // pulls in all layers decoders
|
||||
"github.com/google/gopacket/reassembly"
|
||||
)
|
||||
|
||||
/* It's a connection (bidirectional)
|
||||
* Implements gopacket.reassembly.Stream interface (Accept, ReassembledSG, ReassemblyComplete)
|
||||
* ReassembledSG gets called when new reassembled data is ready (i.e. bytes in order, no duplicates, complete)
|
||||
* In our implementation, we pass information from ReassembledSG to the httpReader through a shared channel.
|
||||
*/
|
||||
type tcpStream struct {
|
||||
tcpstate *reassembly.TCPSimpleFSM
|
||||
fsmerr bool
|
||||
optchecker reassembly.TCPOptionCheck
|
||||
net, transport gopacket.Flow
|
||||
isDNS bool
|
||||
isHTTP bool
|
||||
reversed bool
|
||||
client httpReader
|
||||
server httpReader
|
||||
urls []string
|
||||
ident string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassembly.TCPFlowDirection, nextSeq reassembly.Sequence, start *bool, ac reassembly.AssemblerContext) bool {
|
||||
// FSM
|
||||
if !t.tcpstate.CheckState(tcp, dir) {
|
||||
SilentError("FSM-rejection", "%s: Packet rejected by FSM (state:%s)", t.ident, t.tcpstate.String())
|
||||
stats.rejectFsm++
|
||||
if !t.fsmerr {
|
||||
t.fsmerr = true
|
||||
stats.rejectConnFsm++
|
||||
}
|
||||
if !*ignorefsmerr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Options
|
||||
err := t.optchecker.Accept(tcp, ci, dir, nextSeq, start)
|
||||
if err != nil {
|
||||
SilentError("OptionChecker-rejection", "%s: Packet rejected by OptionChecker: %s", t.ident, err)
|
||||
stats.rejectOpt++
|
||||
if !*nooptcheck {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Checksum
|
||||
accept := true
|
||||
if *checksum {
|
||||
c, err := tcp.ComputeChecksum()
|
||||
if err != nil {
|
||||
SilentError("ChecksumCompute", "%s: Got error computing checksum: %s", t.ident, err)
|
||||
accept = false
|
||||
} else if c != 0x0 {
|
||||
SilentError("Checksum", "%s: Invalid checksum: 0x%x", t.ident, c)
|
||||
accept = false
|
||||
}
|
||||
}
|
||||
if !accept {
|
||||
stats.rejectOpt++
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.AssemblerContext) {
|
||||
dir, start, end, skip := sg.Info()
|
||||
length, saved := sg.Lengths()
|
||||
// update stats
|
||||
sgStats := sg.Stats()
|
||||
if skip > 0 {
|
||||
stats.missedBytes += skip
|
||||
}
|
||||
stats.sz += length - saved
|
||||
stats.pkt += sgStats.Packets
|
||||
if sgStats.Chunks > 1 {
|
||||
stats.reassembled++
|
||||
}
|
||||
stats.outOfOrderPackets += sgStats.QueuedPackets
|
||||
stats.outOfOrderBytes += sgStats.QueuedBytes
|
||||
if length > stats.biggestChunkBytes {
|
||||
stats.biggestChunkBytes = length
|
||||
}
|
||||
if sgStats.Packets > stats.biggestChunkPackets {
|
||||
stats.biggestChunkPackets = sgStats.Packets
|
||||
}
|
||||
if sgStats.OverlapBytes != 0 && sgStats.OverlapPackets == 0 {
|
||||
// In the original example this was handled with panic().
|
||||
// I don't know what this error means or how to handle it properly.
|
||||
SilentError("Invalid-Overlap", "bytes:%d, pkts:%d", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
}
|
||||
stats.overlapBytes += sgStats.OverlapBytes
|
||||
stats.overlapPackets += sgStats.OverlapPackets
|
||||
|
||||
var ident string
|
||||
if dir == reassembly.TCPDirClientToServer {
|
||||
ident = fmt.Sprintf("%v %v(%s): ", t.net, t.transport, dir)
|
||||
} else {
|
||||
ident = fmt.Sprintf("%v %v(%s): ", t.net.Reverse(), t.transport.Reverse(), dir)
|
||||
}
|
||||
Debug("%s: SG reassembled packet with %d bytes (start:%v,end:%v,skip:%d,saved:%d,nb:%d,%d,overlap:%d,%d)", ident, length, start, end, skip, saved, sgStats.Packets, sgStats.Chunks, sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
if skip == -1 && *allowmissinginit {
|
||||
// this is allowed
|
||||
} else if skip != 0 {
|
||||
// Missing bytes in stream: do not even try to parse it
|
||||
return
|
||||
}
|
||||
data := sg.Fetch(length)
|
||||
if t.isDNS {
|
||||
dns := &layers.DNS{}
|
||||
var decoded []gopacket.LayerType
|
||||
if len(data) < 2 {
|
||||
if len(data) > 0 {
|
||||
sg.KeepFrom(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
dnsSize := binary.BigEndian.Uint16(data[:2])
|
||||
missing := int(dnsSize) - len(data[2:])
|
||||
Debug("dnsSize: %d, missing: %d", dnsSize, missing)
|
||||
if missing > 0 {
|
||||
Info("Missing some bytes: %d", missing)
|
||||
sg.KeepFrom(0)
|
||||
return
|
||||
}
|
||||
p := gopacket.NewDecodingLayerParser(layers.LayerTypeDNS, dns)
|
||||
err := p.DecodeLayers(data[2:], &decoded)
|
||||
if err != nil {
|
||||
SilentError("DNS-parser", "Failed to decode DNS: %v", err)
|
||||
} else {
|
||||
Debug("DNS: %s", gopacket.LayerDump(dns))
|
||||
}
|
||||
if len(data) > 2+int(dnsSize) {
|
||||
sg.KeepFrom(2 + int(dnsSize))
|
||||
}
|
||||
} else if t.isHTTP {
|
||||
if length > 0 {
|
||||
if *hexdump {
|
||||
Debug("Feeding http with:%s", hex.Dump(data))
|
||||
}
|
||||
// This is where we pass the reassembled information onwards
|
||||
// This channel is read by an httpReader object
|
||||
if dir == reassembly.TCPDirClientToServer && !t.reversed {
|
||||
t.client.msgQueue <- httpReaderDataMsg{data, ac.GetCaptureInfo().Timestamp}
|
||||
} else {
|
||||
t.server.msgQueue <- httpReaderDataMsg{data, ac.GetCaptureInfo().Timestamp}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassemblyComplete(ac reassembly.AssemblerContext) bool {
|
||||
Debug("%s: Connection closed", t.ident)
|
||||
if t.isHTTP {
|
||||
close(t.client.msgQueue)
|
||||
close(t.server.msgQueue)
|
||||
}
|
||||
// do not remove the connection to allow last ACK
|
||||
return false
|
||||
}
|
118
tap/tcp_stream_factory.go
Normal file
118
tap/tcp_stream_factory.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers" // pulls in all layers decoders
|
||||
"github.com/google/gopacket/reassembly"
|
||||
)
|
||||
|
||||
/*
|
||||
* The TCP factory: returns a new Stream
|
||||
* Implements gopacket.reassembly.StreamFactory interface (New)
|
||||
* Generates a new tcp stream for each new tcp connection. Closes the stream when the connection closes.
|
||||
*/
|
||||
type tcpStreamFactory struct {
|
||||
wg sync.WaitGroup
|
||||
doHTTP bool
|
||||
harWriter *HarWriter
|
||||
outbountLinkWriter *OutboundLinkWriter
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.TCP, ac reassembly.AssemblerContext) reassembly.Stream {
|
||||
Debug("* NEW: %s %s", net, transport)
|
||||
fsmOptions := reassembly.TCPSimpleFSMOptions{
|
||||
SupportMissingEstablishment: *allowmissinginit,
|
||||
}
|
||||
Debug("Current App Ports: %v", gSettings.filterPorts)
|
||||
dstIp := net.Dst().String()
|
||||
dstPort := int(tcp.DstPort)
|
||||
|
||||
if factory.shouldNotifyOnOutboundLink(dstIp, dstPort) {
|
||||
factory.outbountLinkWriter.WriteOutboundLink(net.Src().String(), dstIp, dstPort)
|
||||
}
|
||||
isHTTP := factory.shouldTap(dstIp, dstPort)
|
||||
stream := &tcpStream{
|
||||
net: net,
|
||||
transport: transport,
|
||||
isDNS: tcp.SrcPort == 53 || tcp.DstPort == 53,
|
||||
isHTTP: isHTTP && factory.doHTTP,
|
||||
reversed: tcp.SrcPort == 80,
|
||||
tcpstate: reassembly.NewTCPSimpleFSM(fsmOptions),
|
||||
ident: fmt.Sprintf("%s:%s", net, transport),
|
||||
optchecker: reassembly.NewTCPOptionCheck(),
|
||||
}
|
||||
if stream.isHTTP {
|
||||
stream.client = httpReader{
|
||||
msgQueue: make(chan httpReaderDataMsg),
|
||||
ident: fmt.Sprintf("%s %s", net, transport),
|
||||
tcpID: tcpID{
|
||||
srcIP: net.Src().String(),
|
||||
dstIP: net.Dst().String(),
|
||||
srcPort: transport.Src().String(),
|
||||
dstPort: transport.Dst().String(),
|
||||
},
|
||||
hexdump: *hexdump,
|
||||
parent: stream,
|
||||
isClient: true,
|
||||
harWriter: factory.harWriter,
|
||||
}
|
||||
stream.server = httpReader{
|
||||
msgQueue: make(chan httpReaderDataMsg),
|
||||
ident: fmt.Sprintf("%s %s", net.Reverse(), transport.Reverse()),
|
||||
tcpID: tcpID{
|
||||
srcIP: net.Dst().String(),
|
||||
dstIP: net.Src().String(),
|
||||
srcPort: transport.Dst().String(),
|
||||
dstPort: transport.Src().String(),
|
||||
},
|
||||
hexdump: *hexdump,
|
||||
parent: stream,
|
||||
harWriter: factory.harWriter,
|
||||
}
|
||||
factory.wg.Add(2)
|
||||
// Start reading from channels stream.client.bytes and stream.server.bytes
|
||||
go stream.client.run(&factory.wg)
|
||||
go stream.server.run(&factory.wg)
|
||||
}
|
||||
return stream
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) WaitGoRoutines() {
|
||||
factory.wg.Wait()
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) shouldTap(dstIP string, dstPort int) bool {
|
||||
if hostMode {
|
||||
if inArrayString(gSettings.filterAuthorities, fmt.Sprintf("%s:%d", dstIP, dstPort)) == true {
|
||||
return true
|
||||
} else if inArrayString(gSettings.filterAuthorities, dstIP) == true {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else {
|
||||
isTappedPort := dstPort == 80 || (gSettings.filterPorts != nil && (inArrayInt(gSettings.filterPorts, dstPort)))
|
||||
if !isTappedPort {
|
||||
return false
|
||||
}
|
||||
|
||||
if !*anydirection {
|
||||
isDirectedHere := inArrayString(ownIps, dstIP)
|
||||
if !isDirectedHere {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) shouldNotifyOnOutboundLink(dstIP string, dstPort int) bool {
|
||||
if inArrayInt(remoteOnlyOutboundPorts, dstPort) {
|
||||
isDirectedHere := inArrayString(ownIps, dstIP)
|
||||
return !isDirectedHere && !isPrivateIP(dstIP)
|
||||
}
|
||||
return true
|
||||
}
|
Reference in New Issue
Block a user