mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-26 21:13:15 +00:00
* modified Dockerfile to work for both amd64 (Intel) and arm64 (M1) * added changelog * Update `Dockerfile` to have `ARCH` build argument * Remove `docs/CHANGES.md` * Upgrade the Basenine version from `v0.3.0` to `v0.4.6` * Update `publish.yml` to have `ARCH` build argument * Switch `BasenineImageRepo` to Docker Hub * Have separate build arguments for `ARCH` and `GOARCH` * Upgrade the Basenine version from `v0.4.6` to `v0.4.10` * Oops forgot to update the 10th duplicated shell script * Fix the oopsie and reduce duplications * Fix `Dockerfile` * Fix the incompatibility issue between Go plugins and gold linker in Alpine inside `Dockerfile` * Fix `asm: xxhash_amd64.s:120: when dynamic linking, R15 is clobbered by a global variable access` error * Update `Dockerfile` to have cross-compilation on an AMD64 machine Also revert changes in the shell scripts * Delete `debug.Dockerfile` * Create a custom base (`debian:buster-slim` based) image for the shipped image * Replace `mertyildiran/debian-pcap` with `up9inc/debian-pcap` * Upgrade Basenine version to `v0.4.12` * Use `debian:stable-slim` as the base * Fix the indentation in the `Dockerfile` * Update `publish.yml` * Enable `publish.yml` for `feature/multiarch_build` branch * Tag correctly and set `ARCH` Docker argument * Remove the lines that are forgotten to be removed from the shell scripts * Add `MizuAgentImageRepo` constant and use it as default `AgentImage` value * Bring back `Set up Cloud SDK` step to `Build the CLI and publish` job * Build ARM64 CLI for Linux as well * Revert "Enable `publish.yml` for `feature/multiarch_build` branch" This reverts commitd30be4c1f0
. * Revert Go 1.17 upgrade * Remove `build_extensions_debug.sh` as well * Make the `Dockerfile` to compile the agent statically * Statically link the protocol extensions * Fix `Dockerfile` * Bring back `-s -w` flags * Verify the signatures of the downloads in `dockcross/linux-arm64-musl` * Revert modifications in some shell scripts * Make the `BUILDARCH` and `TARGETARCH` separation in the `Dockerfile` * Separate cross-compilation builder image into a separate repo named `up9inc/linux-arm64-musl-go-libpcap` * Fill the shell script and specify the tag for `dockcross/linux-arm64-musl` * Remove the unnecessary dependencies from `builder-native-base` * Improve the comments in the `Dockerfile` * Upgrade Basenine version to `v0.4.13` * Fix `Dockerfile` * Revert "Revert "Enable `publish.yml` for `feature/multiarch_build` branch"" This reverts commit303e466bdc
. * Revert "Revert "Revert "Enable `publish.yml` for `feature/multiarch_build` branch""" This reverts commit0fe252bbdb
. * Remove `push-docker-debug` from the `Makefile` * Rename `publish.yml` to `release.yml` Co-authored-by: Alex Haiut <alex@up9.com>
92 lines
2.4 KiB
Go
92 lines
2.4 KiB
Go
package kafka
|
|
|
|
import (
|
|
"fmt"
|
|
)
|
|
|
|
// Error represents client-side protocol errors.
|
|
type Error string
|
|
|
|
func (e Error) Error() string { return string(e) }
|
|
|
|
func Errorf(msg string, args ...interface{}) Error {
|
|
return Error(fmt.Sprintf(msg, args...))
|
|
}
|
|
|
|
const (
|
|
// ErrNoTopic is returned when a request needs to be sent to a specific
|
|
ErrNoTopic Error = "topic not found"
|
|
|
|
// ErrNoPartition is returned when a request needs to be sent to a specific
|
|
// partition, but the client did not find it in the cluster metadata.
|
|
ErrNoPartition Error = "topic partition not found"
|
|
|
|
// ErrNoLeader is returned when a request needs to be sent to a partition
|
|
// leader, but the client could not determine what the leader was at this
|
|
// time.
|
|
ErrNoLeader Error = "topic partition has no leader"
|
|
|
|
// ErrNoRecord is returned when attempting to write a message containing an
|
|
// empty record set (which kafka forbids).
|
|
//
|
|
// We handle this case client-side because kafka will close the connection
|
|
// that it received an empty produce request on, causing all concurrent
|
|
// requests to be aborted.
|
|
ErrNoRecord Error = "record set contains no records"
|
|
|
|
// ErrNoReset is returned by ResetRecordReader when the record reader does
|
|
// not support being reset.
|
|
ErrNoReset Error = "record sequence does not support reset"
|
|
)
|
|
|
|
type TopicError struct {
|
|
Topic string
|
|
Err error
|
|
}
|
|
|
|
func NewTopicError(topic string, err error) *TopicError {
|
|
return &TopicError{Topic: topic, Err: err}
|
|
}
|
|
|
|
func NewErrNoTopic(topic string) *TopicError {
|
|
return NewTopicError(topic, ErrNoTopic)
|
|
}
|
|
|
|
func (e *TopicError) Error() string {
|
|
return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic)
|
|
}
|
|
|
|
func (e *TopicError) Unwrap() error {
|
|
return e.Err
|
|
}
|
|
|
|
type TopicPartitionError struct {
|
|
Topic string
|
|
Partition int32
|
|
Err error
|
|
}
|
|
|
|
func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError {
|
|
return &TopicPartitionError{
|
|
Topic: topic,
|
|
Partition: partition,
|
|
Err: err,
|
|
}
|
|
}
|
|
|
|
func NewErrNoPartition(topic string, partition int32) *TopicPartitionError {
|
|
return NewTopicPartitionError(topic, partition, ErrNoPartition)
|
|
}
|
|
|
|
func NewErrNoLeader(topic string, partition int32) *TopicPartitionError {
|
|
return NewTopicPartitionError(topic, partition, ErrNoLeader)
|
|
}
|
|
|
|
func (e *TopicPartitionError) Error() string {
|
|
return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition)
|
|
}
|
|
|
|
func (e *TopicPartitionError) Unwrap() error {
|
|
return e.Err
|
|
}
|