mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-27 13:33:26 +00:00
Refactor Mizu, define an extension API and add new protocols: AMQP, Kafka (#224)
* Separate HTTP related code into `extensions/http` as a Go plugin * Move `extensions` folder into `tap` folder * Move HTTP files into `tap/extensions/lib` for now * Replace `orcaman/concurrent-map` with `sync.Map` * Remove `grpc_assembler.go` * Remove `github.com/up9inc/mizu/tap/extensions/http/lib` * Add a build script to automatically build extensions from a known path and load them * Start to define the extension API * Implement the `run()` function for the TCP stream * Add support of defining multiple ports to the extension API * Set the extension name inside the extension * Declare the `Dissect` function in the extension API * Dissect HTTP request from inside the HTTP extension * Make the distinction of outbound and inbound ports * Dissect HTTP response from inside the HTTP extension * Bring back the HTTP request-response pair matcher * Return a `*api.RequestResponsePair` from the dissection * Bring back the gRPC-HTTP/2 parser * Fix the issues in `handleHTTP1ClientStream` and `handleHTTP1ServerStream` * Call a function pointer to emit dissected data back to the `tap` package * roee changes - trying to fix agent to work with the "api" object) - ***still not working*** * small mistake in the conflicts * Fix the issues that are introduced by the merge conflict * Add `Emitter` interface to the API and send `OutputChannelItem`(s) to `OutputChannel` * Fix the `HTTP1` handlers * Set `ConnectionInfo` in HTTP handlers * Fix the `Dockerfile` to build the extensions * remove some unwanted code * no message * Re-enable `getStreamProps` function * Migrate back from `gopacket/tcpassembly` to `gopacket/reassembly` * Introduce `HTTPPayload` struct and `HTTPPayloader` interface to `MarshalJSON()` all the data structures that are returned by the HTTP protocol * Read `socketHarOutChannel` instead of `filteredHarChannel` * Connect `OutputChannelItem` to the last WebSocket means that finally the web UI started to work again * Add `.env.example` to React app * Marshal and unmarshal `*http.Request`, `*http.Response` pairs * Move `loadExtensions` into `main.go` and map extensions into `extensionsMap` * Add `Summarize()` method to the `Dissector` interface * Add `Analyze` method to the `Dissector` interface and `MizuEntry` to the extension API * Add `Protocol` struct and make it effect the UI * Refactor `BaseEntryDetails` struct and display the source and destination ports in the UI * Display the protocol name inside the details layout * Add `Represent` method to the `Dissector` interface and manipulate the UI through this method * Make the protocol color affect the details layout color and write protocol abbreviation vertically * Remove everything HTTP related from the `tap` package and make the extension system fully functional * Fix the TypeScript warnings * Bring in the files related AMQP into `amqp` directory * Add `--nodefrag` flag to the tapper and bring in the main AMQP code * Implement the AMQP `BasicPublish` and fix some issues in the UI when the response payload is missing * Implement `representBasicPublish` method * Fix several minor issues * Implement the AMQP `BasicDeliver` * Implement the AMQP `QueueDeclare` * Implement the AMQP `ExchangeDeclare` * Implement the AMQP `ConnectionStart` * Implement the AMQP `ConnectionClose` * Implement the AMQP `QueueBind` * Implement the AMQP `BasicConsume` * Fix an issue in `ConnectionStart` * Fix a linter error * Bring in the files related Kafka into `kafka` directory * Fix the build errors in Kafka Go files * Implement `Dissect` method of Kafka and adapt request-response pair matcher to asynchronous client-server stream * Do the "Is reversed?" checked inside `getStreamProps` and fix an issue in Kafka `Dissect` method * Implement `Analyze`, `Summarize` methods of Kafka * Implement the representations for Kafka `Metadata`, `RequestHeader` and `ResponseHeader` * Refactor the AMQP and Kafka implementations to create the summary string only inside the `Analyze` method * Implement the representations for Kafka `ApiVersions` * Implement the representations for Kafka `Produce` * Implement the representations for Kafka `Fetch` * Implement the representations for Kafka `ListOffsets`, `CreateTopics` and `DeleteTopics` * Fix the encoding of AMQP `BasicPublish` and `BasicDeliver` body * Remove the unnecessary logging * Remove more logging * Introduce `Version` field to `Protocol` struct for dynamically switching the HTTP protocol to HTTP/2 * Fix the issues in analysis and representation of HTTP/2 (gRPC) protocol * Fix the issues in summary section of details layout for HTTP/2 (gRPC) protocol * Fix the read errors that freezes the sniffer in HTTP and Kafka * Fix the issues in HTTP POST data * Fix one more issue in HTTP POST data * Fix an infinite loop in Kafka * Fix another freezing issue in Kafka * Revert "UI Infra - Support multiple entry types + refactoring (#211)" This reverts commitf74a52d4dc
. * Fix more issues that are introduced by the merge * Fix the status code in the summary section * adding the cleaner again (why we removed it?). add TODO: on the extension loop . * fix dockerfile (remove deleting .env file) - it is found in dockerignore and fails to build if the file not exists * fix GetEntrties ("/entries" endpoint) - working with "tapApi.BaseEntryDetail" (moved from shared) * Fix an issue in the UI summary section * Refactor the protocol payload structs * Fix a log message in the passive tapper * Adapt `APP_PORTS` environment variable to the new extension system and change its format to `APP_PORTS='{"http": ["8001"]}' ` * Revert "fix dockerfile (remove deleting .env file) - it is found in dockerignore and fails to build if the file not exists" This reverts commit4f514ae1f4
. * Bring in the necessary changes fromf74a52d4dc
* Open the API server URL in the web browser as soon as Mizu is ready * Make the TCP reader consists of a single Go routine (instead of two) and try to dissect in both client and server mode by rewinding * Swap `TcpID` without overwriting it * Sort extension by priority * Try to dissect with looping through all the extensions * fix getStreamProps function. (it should be passed from CLI as it was before). * Turn TCP reader back into two Goroutines (client and server) * typo * Learn `isClient` from the TCP stream * Set `viewer` style `overflow: "auto"` * Fix the memory leaks in AMQP and Kafka dissectors * Revert some of the changes inbe7c65eb6d
* Remove `allExtensionPorts` since it's no longer needed * Remove `APP_PORTS` since it's no longer needed * Fix all of the minor issues in the React code * Check Kafka header size and fail-fast * Break the dissectors loop upon a successful dissection * Don't break the dissector loop. Protocols might collide * Improve the HTTP request-response counter (still not perfect) * Make the HTTP request-response counter perfect * Revert "Revert some of the changes in be7c65eb6d3fb657a059707da3ca559937e59739" This reverts commit08e7d786d8
. * Bring back `filterItems` and `isHealthCheckByUserAgent` functions * Remove some development artifacts * remove unused and commented lines that are not relevant * Fix the performance in TCP stream factory. Make it create two `tcpReader`(s) per extension * Change a log to debug * Make `*api.CounterPair` a field of `tcpReader` * Set `isTapTarget` to always `true` again since `filterAuthorities` implementation has problems * Remove a variable that's only used for logging even though not introduced by this branch * Bring back the `NumberOfRules` field of `ApplicableRules` struct * Remove the unused `NewEntry` function * Move `k8sResolver == nil` check to a more appropriate place * default healthChecksUserAgentHeaders should be empty array (like the default config value) * remove spam console.log * Rules button cause app to crash (access the service via incorrect property) * Ignore all .env* files in docker build. * Better caching in dockerfile: only copy go.mod before go mod download. * Check for errors while loading an extension * Add a comment about why `Protocol` is not a pointer * Bring back the call to `deleteOlderThan` * Remove the `nil` check * Reduce the maximum allowed AMQP message from 128MB to 1MB * Fix an error that only occurs when a Kafka broker is initiating * Revert the change inb2abd7b990
* Fix the service name resolution in all protocols * Remove the `anydirection` flag and fix the issue in `filterAuthorities` * Pass `sync.Map` by reference to `deleteOlderThan` method * Fix the packet capture issue in standalone mode that's introduced by the removal of `anydirection` * Temporarily resolve the memory exhaustion in AMQP * Fix a nil pointer dereference error * Fix the CLI build error * Fix a memory leak that's identified by `pprof` Co-authored-by: Roee Gadot <roee.gadot@up9.com> Co-authored-by: Nimrod Gilboa Markevich <nimrod@up9.com>
This commit is contained in:
645
tap/extensions/kafka/buffer.go
Normal file
645
tap/extensions/kafka/buffer.go
Normal file
@@ -0,0 +1,645 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Bytes is an interface implemented by types that represent immutable
|
||||
// sequences of bytes.
|
||||
//
|
||||
// Bytes values are used to abstract the location where record keys and
|
||||
// values are read from (e.g. in-memory buffers, network sockets, files).
|
||||
//
|
||||
// The Close method should be called to release resources held by the object
|
||||
// when the program is done with it.
|
||||
//
|
||||
// Bytes values are generally not safe to use concurrently from multiple
|
||||
// goroutines.
|
||||
type Bytes interface {
|
||||
io.ReadCloser
|
||||
// Returns the number of bytes remaining to be read from the payload.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// NewBytes constructs a Bytes value from b.
|
||||
//
|
||||
// The returned value references b, it does not make a copy of the backing
|
||||
// array.
|
||||
//
|
||||
// If b is nil, nil is returned to represent a null BYTES value in the kafka
|
||||
// protocol.
|
||||
func NewBytes(b []byte) Bytes {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
r := new(bytesReader)
|
||||
r.Reset(b)
|
||||
return r
|
||||
}
|
||||
|
||||
// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the
|
||||
// length of b to minimize the memory footprint.
|
||||
//
|
||||
// The function returns a nil slice if b is nil.
|
||||
// func ReadAll(b Bytes) ([]byte, error) {
|
||||
// if b == nil {
|
||||
// return nil, nil
|
||||
// }
|
||||
// s := make([]byte, b.Len())
|
||||
// _, err := io.ReadFull(b, s)
|
||||
// return s, err
|
||||
// }
|
||||
|
||||
type bytesReader struct{ bytes.Reader }
|
||||
|
||||
func (*bytesReader) Close() error { return nil }
|
||||
|
||||
type refCount uintptr
|
||||
|
||||
func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) }
|
||||
|
||||
func (rc *refCount) unref(onZero func()) {
|
||||
if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 {
|
||||
onZero()
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Size of the memory buffer for a single page. We use a farily
|
||||
// large size here (64 KiB) because batches exchanged with kafka
|
||||
// tend to be multiple kilobytes in size, sometimes hundreds.
|
||||
// Using large pages amortizes the overhead of the page metadata
|
||||
// and algorithms to manage the pages.
|
||||
pageSize = 65536
|
||||
)
|
||||
|
||||
type page struct {
|
||||
refc refCount
|
||||
offset int64
|
||||
length int
|
||||
buffer *[pageSize]byte
|
||||
}
|
||||
|
||||
func newPage(offset int64) *page {
|
||||
p, _ := pagePool.Get().(*page)
|
||||
if p != nil {
|
||||
p.offset = offset
|
||||
p.length = 0
|
||||
p.ref()
|
||||
} else {
|
||||
p = &page{
|
||||
refc: 1,
|
||||
offset: offset,
|
||||
buffer: &[pageSize]byte{},
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *page) ref() { p.refc.ref() }
|
||||
|
||||
func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) }
|
||||
|
||||
func (p *page) slice(begin, end int64) []byte {
|
||||
i, j := begin-p.offset, end-p.offset
|
||||
|
||||
if i < 0 {
|
||||
i = 0
|
||||
} else if i > pageSize {
|
||||
i = pageSize
|
||||
}
|
||||
|
||||
if j < 0 {
|
||||
j = 0
|
||||
} else if j > pageSize {
|
||||
j = pageSize
|
||||
}
|
||||
|
||||
if i < j {
|
||||
return p.buffer[i:j]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *page) Cap() int { return pageSize }
|
||||
|
||||
func (p *page) Len() int { return p.length }
|
||||
|
||||
func (p *page) Size() int64 { return int64(p.length) }
|
||||
|
||||
func (p *page) Truncate(n int) {
|
||||
if n < p.length {
|
||||
p.length = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *page) ReadAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
if off > int64(p.length) {
|
||||
return 0, nil
|
||||
}
|
||||
return copy(b, p.buffer[off:p.length]), nil
|
||||
}
|
||||
|
||||
func (p *page) ReadFrom(r io.Reader) (int64, error) {
|
||||
n, err := io.ReadFull(r, p.buffer[p.length:])
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
p.length += n
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
func (p *page) WriteAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
n := copy(p.buffer[off:], b)
|
||||
if end := int(off) + n; end > p.length {
|
||||
p.length = end
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (p *page) Write(b []byte) (int, error) {
|
||||
return p.WriteAt(b, p.offset+int64(p.length))
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*page)(nil)
|
||||
_ io.ReaderFrom = (*page)(nil)
|
||||
_ io.Writer = (*page)(nil)
|
||||
_ io.WriterAt = (*page)(nil)
|
||||
)
|
||||
|
||||
type pageBuffer struct {
|
||||
refc refCount
|
||||
pages contiguousPages
|
||||
length int
|
||||
cursor int
|
||||
}
|
||||
|
||||
func newPageBuffer() *pageBuffer {
|
||||
b, _ := pageBufferPool.Get().(*pageBuffer)
|
||||
if b != nil {
|
||||
b.cursor = 0
|
||||
b.refc.ref()
|
||||
} else {
|
||||
b = &pageBuffer{
|
||||
refc: 1,
|
||||
pages: make(contiguousPages, 0, 16),
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) refTo(ref *pageRef, begin, end int64) {
|
||||
length := end - begin
|
||||
|
||||
if length > math.MaxUint32 {
|
||||
panic("reference to contiguous buffer pages exceeds the maximum size of 4 GB")
|
||||
}
|
||||
|
||||
ref.pages = append(ref.buffer[:0], pb.pages.slice(begin, end)...)
|
||||
ref.pages.ref()
|
||||
ref.offset = begin
|
||||
ref.length = uint32(length)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ref(begin, end int64) *pageRef {
|
||||
ref := new(pageRef)
|
||||
pb.refTo(ref, begin, end)
|
||||
return ref
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) unref() {
|
||||
pb.refc.unref(func() {
|
||||
pb.pages.unref()
|
||||
pb.pages.clear()
|
||||
pb.pages = pb.pages[:0]
|
||||
pb.length = 0
|
||||
pageBufferPool.Put(pb)
|
||||
})
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) newPage() *page {
|
||||
return newPage(int64(pb.length))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Len() int {
|
||||
return pb.length - pb.cursor
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Size() int64 {
|
||||
return int64(pb.length)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Discard(n int) (int, error) {
|
||||
remain := pb.length - pb.cursor
|
||||
if remain < n {
|
||||
n = remain
|
||||
}
|
||||
pb.cursor += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Truncate(n int) {
|
||||
if n < pb.length {
|
||||
pb.length = n
|
||||
|
||||
if n < pb.cursor {
|
||||
pb.cursor = n
|
||||
}
|
||||
|
||||
for i := range pb.pages {
|
||||
if p := pb.pages[i]; p.length <= n {
|
||||
n -= p.length
|
||||
} else {
|
||||
if n > 0 {
|
||||
pb.pages[i].Truncate(n)
|
||||
i++
|
||||
}
|
||||
pb.pages[i:].unref()
|
||||
pb.pages[i:].clear()
|
||||
pb.pages = pb.pages[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
pb.cursor = int(c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadByte() (byte, error) {
|
||||
b := [1]byte{}
|
||||
_, err := pb.Read(b[:])
|
||||
return b[0], err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Read(b []byte) (int, error) {
|
||||
if pb.cursor >= pb.length {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := pb.ReadAt(b, int64(pb.cursor))
|
||||
pb.cursor += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) {
|
||||
return pb.pages.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
rn := int64(0)
|
||||
|
||||
for {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if free == 0 {
|
||||
tail = pb.newPage()
|
||||
free = pageSize
|
||||
pb.pages = append(pb.pages, tail)
|
||||
}
|
||||
|
||||
n, err := tail.ReadFrom(r)
|
||||
pb.length += int(n)
|
||||
rn += n
|
||||
if n < int64(free) {
|
||||
return rn, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteString(s string) (int, error) {
|
||||
return pb.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Write(b []byte) (int, error) {
|
||||
wn := len(b)
|
||||
if wn == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
for len(b) != 0 {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if len(b) <= free {
|
||||
tail.Write(b)
|
||||
pb.length += len(b)
|
||||
break
|
||||
}
|
||||
|
||||
tail.Write(b[:free])
|
||||
b = b[free:]
|
||||
|
||||
pb.length += free
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) {
|
||||
n, err := pb.pages.WriteAt(b, off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if n < len(b) {
|
||||
pb.Write(b[n:])
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var wn int
|
||||
var err error
|
||||
pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += n
|
||||
return err == nil
|
||||
})
|
||||
pb.cursor += wn
|
||||
return int64(wn), err
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*pageBuffer)(nil)
|
||||
_ io.ReaderFrom = (*pageBuffer)(nil)
|
||||
_ io.StringWriter = (*pageBuffer)(nil)
|
||||
_ io.Writer = (*pageBuffer)(nil)
|
||||
_ io.WriterAt = (*pageBuffer)(nil)
|
||||
_ io.WriterTo = (*pageBuffer)(nil)
|
||||
|
||||
pagePool sync.Pool
|
||||
pageBufferPool sync.Pool
|
||||
)
|
||||
|
||||
type contiguousPages []*page
|
||||
|
||||
func (pages contiguousPages) ref() {
|
||||
for _, p := range pages {
|
||||
p.ref()
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) unref() {
|
||||
for _, p := range pages {
|
||||
p.unref()
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) clear() {
|
||||
for i := range pages {
|
||||
pages[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) {
|
||||
rn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.ReadAt(b, off)
|
||||
b = b[n:]
|
||||
rn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return rn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) {
|
||||
wn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.WriteAt(b, off)
|
||||
b = b[n:]
|
||||
wn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) slice(begin, end int64) contiguousPages {
|
||||
i := pages.indexOf(begin)
|
||||
j := pages.indexOf(end)
|
||||
if j < len(pages) {
|
||||
j++
|
||||
}
|
||||
return pages[i:j]
|
||||
}
|
||||
|
||||
func (pages contiguousPages) indexOf(offset int64) int {
|
||||
if len(pages) == 0 {
|
||||
return 0
|
||||
}
|
||||
return int((offset - pages[0].offset) / pageSize)
|
||||
}
|
||||
|
||||
func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) {
|
||||
for _, p := range pages.slice(begin, end) {
|
||||
if !f(p.slice(begin, end)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = contiguousPages{}
|
||||
_ io.WriterAt = contiguousPages{}
|
||||
)
|
||||
|
||||
type pageRef struct {
|
||||
buffer [2]*page
|
||||
pages contiguousPages
|
||||
offset int64
|
||||
cursor int64
|
||||
length uint32
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (ref *pageRef) unref() {
|
||||
if atomic.CompareAndSwapUint32(&ref.once, 0, 1) {
|
||||
ref.pages.unref()
|
||||
ref.pages.clear()
|
||||
ref.pages = nil
|
||||
ref.offset = 0
|
||||
ref.cursor = 0
|
||||
ref.length = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) }
|
||||
|
||||
func (ref *pageRef) Size() int64 { return int64(ref.length) }
|
||||
|
||||
func (ref *pageRef) Close() error { ref.unref(); return nil }
|
||||
|
||||
func (ref *pageRef) String() string {
|
||||
return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length)
|
||||
}
|
||||
|
||||
func (ref *pageRef) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(ref.cursor, int64(ref.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
ref.cursor = c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadByte() (byte, error) {
|
||||
var c byte
|
||||
var ok bool
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
c, ok = b[0], true
|
||||
return false
|
||||
})
|
||||
if ok {
|
||||
ref.cursor++
|
||||
} else {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) Read(b []byte) (int, error) {
|
||||
if ref.cursor >= int64(ref.length) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := ref.ReadAt(b, ref.cursor)
|
||||
ref.cursor += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) {
|
||||
limit := ref.offset + int64(ref.length)
|
||||
off += ref.offset
|
||||
|
||||
if off >= limit {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if off+int64(len(b)) > limit {
|
||||
b = b[:limit-off]
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
n, err := ref.pages.ReadAt(b, off)
|
||||
if n == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) {
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += int64(n)
|
||||
return err == nil
|
||||
})
|
||||
ref.cursor += wn
|
||||
return
|
||||
}
|
||||
|
||||
func (ref *pageRef) scan(off int64, f func([]byte) bool) {
|
||||
begin := ref.offset + off
|
||||
end := ref.offset + int64(ref.length)
|
||||
ref.pages.scan(begin, end, f)
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.Closer = (*pageRef)(nil)
|
||||
_ io.Seeker = (*pageRef)(nil)
|
||||
_ io.Reader = (*pageRef)(nil)
|
||||
_ io.ReaderAt = (*pageRef)(nil)
|
||||
_ io.WriterTo = (*pageRef)(nil)
|
||||
)
|
||||
|
||||
type pageRefAllocator struct {
|
||||
refs []pageRef
|
||||
head int
|
||||
size int
|
||||
}
|
||||
|
||||
func (a *pageRefAllocator) newPageRef() *pageRef {
|
||||
if a.head == len(a.refs) {
|
||||
a.refs = make([]pageRef, a.size)
|
||||
a.head = 0
|
||||
}
|
||||
ref := &a.refs[a.head]
|
||||
a.head++
|
||||
return ref
|
||||
}
|
||||
|
||||
func unref(x interface{}) {
|
||||
if r, _ := x.(interface{ unref() }); r != nil {
|
||||
r.unref()
|
||||
}
|
||||
}
|
||||
|
||||
func seek(cursor, limit, offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
// absolute offset
|
||||
case io.SeekCurrent:
|
||||
offset = cursor + offset
|
||||
case io.SeekEnd:
|
||||
offset = limit - offset
|
||||
default:
|
||||
return -1, fmt.Errorf("seek: invalid whence value: %d", whence)
|
||||
}
|
||||
if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
if offset > limit {
|
||||
offset = limit
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func closeBytes(b Bytes) {
|
||||
if b != nil {
|
||||
b.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func resetBytes(b Bytes) {
|
||||
if r, _ := b.(interface{ Reset() }); r != nil {
|
||||
r.Reset()
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user