Renamed collector, aggregator to api server, api folder to agent (#133)

* Renamed aggregator -> apiServer.

* Format errors with container names.

* Renamed collector -> apiServer.

* Rephrased help messages.

* Moved api -> agent.

* Continue renameing api -> agent in Makefile and Dockerfiles.
This commit is contained in:
nimrod-up9
2021-07-22 17:17:17 +03:00
committed by GitHub
parent a2150b4a78
commit 803681a239
45 changed files with 75 additions and 76 deletions

View File

@@ -13,10 +13,10 @@ ENV CGO_ENABLED=1 GOOS=linux GOARCH=amd64
RUN apk add libpcap-dev gcc g++ make RUN apk add libpcap-dev gcc g++ make
# Move to api working directory (/api-build). # Move to agent working directory (/agent-build).
WORKDIR /app/api-build WORKDIR /app/agent-build
COPY api/go.mod api/go.sum ./ COPY agent/go.mod agent/go.sum ./
COPY shared/go.mod shared/go.mod ../shared/ COPY shared/go.mod shared/go.mod ../shared/
COPY tap/go.mod tap/go.mod ../tap/ COPY tap/go.mod tap/go.mod ../tap/
RUN go mod download RUN go mod download
@@ -28,10 +28,10 @@ ARG GIT_BRANCH
ARG BUILD_TIMESTAMP ARG BUILD_TIMESTAMP
ARG SEM_VER ARG SEM_VER
# Copy and build api code # Copy and build agent code
COPY shared ../shared COPY shared ../shared
COPY tap ../tap COPY tap ../tap
COPY api . COPY agent .
RUN go build -ldflags="-s -w \ RUN go build -ldflags="-s -w \
-X 'mizuserver/pkg/version.GitCommitHash=${COMMIT_HASH}' \ -X 'mizuserver/pkg/version.GitCommitHash=${COMMIT_HASH}' \
-X 'mizuserver/pkg/version.Branch=${GIT_BRANCH}' \ -X 'mizuserver/pkg/version.Branch=${GIT_BRANCH}' \
@@ -45,10 +45,10 @@ RUN apk add bash libpcap-dev tcpdump
WORKDIR /app WORKDIR /app
# Copy binary and config files from /build to root folder of scratch container. # Copy binary and config files from /build to root folder of scratch container.
COPY --from=builder ["/app/api-build/mizuagent", "."] COPY --from=builder ["/app/agent-build/mizuagent", "."]
COPY --from=site-build ["/app/ui-build/build", "site"] COPY --from=site-build ["/app/ui-build/build", "site"]
COPY api/start.sh . COPY agent/start.sh .
# this script runs both apiserver and passivetapper and exits either if one of them exits, preventing a scenario where the container runs without one process # this script runs both apiserver and passivetapper and exits either if one of them exits, preventing a scenario where the container runs without one process
ENTRYPOINT "/app/mizuagent" ENTRYPOINT "/app/mizuagent"

View File

@@ -8,7 +8,7 @@ SHELL=/bin/bash
# HELP # HELP
# This will output the help for each task # This will output the help for each task
# thanks to https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html # thanks to https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
.PHONY: help ui api cli tap docker .PHONY: help ui agent cli tap docker
help: ## This help. help: ## This help.
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@@ -27,10 +27,10 @@ ui: ## build UI
cli: # build CLI cli: # build CLI
@echo "building cli"; cd cli && $(MAKE) build @echo "building cli"; cd cli && $(MAKE) build
api: ## build API server agent: ## build mizuagent server
@(echo "building API server .." ) @(echo "building mizu agent .." )
@(cd api; go build -o build/apiserver main.go) @(cd agent; go build -o build/mizuagent main.go)
@ls -l api/build @ls -l agent/build
#tap: ## build tap binary #tap: ## build tap binary
# @(cd tap; go build -o build/tap ./src) # @(cd tap; go build -o build/tap ./src)
@@ -55,13 +55,13 @@ push-cli:
gsutil setmeta -r -h "Cache-Control:public, max-age=30" gs://${BUCKET_PATH}/\* gsutil setmeta -r -h "Cache-Control:public, max-age=30" gs://${BUCKET_PATH}/\*
clean: clean-ui clean-api clean-cli clean-docker ## Clean all build artifacts clean: clean-ui clean-agent clean-cli clean-docker ## Clean all build artifacts
clean-ui: clean-ui:
@(rm -rf ui/build ; echo "UI cleanup done" ) @(rm -rf ui/build ; echo "UI cleanup done" )
clean-api: clean-agent:
@(rm -rf api/build ; echo "api cleanup done" ) @(rm -rf agent/build ; echo "agent cleanup done" )
clean-cli: clean-cli:
@(cd cli; make clean ; echo "CLI cleanup done" ) @(cd cli; make clean ; echo "CLI cleanup done" )

View File

@@ -1,5 +1,5 @@
# mizu API server # mizu agent
API server for MIZU Agent for MIZU (API server and tapper)
Basic APIs: Basic APIs:
* /fetch - retrieve traffic data * /fetch - retrieve traffic data
* /stats - retrieve statistics of collected data * /stats - retrieve statistics of collected data
@@ -14,7 +14,7 @@ Basic APIs:
### Connecting ### Connecting
1. Start mizu using the cli with the debug image `mizu tap --mizu-image gcr.io/up9-docker-hub/mizu/debug:latest {tapped_pod_name}` 1. Start mizu using the cli with the debug image `mizu tap --mizu-image gcr.io/up9-docker-hub/mizu/debug:latest {tapped_pod_name}`
2. Forward the debug port using `kubectl port-forward -n default mizu-collector 2345:2345` 2. Forward the debug port using `kubectl port-forward -n default mizu-api-server 2345:2345`
3. Run the run/debug configuration you've created earlier in Intellij. 3. Run the run/debug configuration you've created earlier in Intellij.
<small>Do note that dlv won't start the api until a debugger connects to it.</small> <small>Do note that dlv won't start the api until a debugger connects to it.</small>

View File

@@ -22,16 +22,16 @@ import (
) )
var shouldTap = flag.Bool("tap", false, "Run in tapper mode without API") var shouldTap = flag.Bool("tap", false, "Run in tapper mode without API")
var aggregator = flag.Bool("aggregator", false, "Run in aggregator mode with API") var apiServer = flag.Bool("api-server", false, "Run in API server mode with API")
var standalone = flag.Bool("standalone", false, "Run in standalone tapper and API mode") var standalone = flag.Bool("standalone", false, "Run in standalone tapper and API mode")
var aggregatorAddress = flag.String("aggregator-address", "", "Address of mizu collector for tapping") var apiServerAddress = flag.String("api-server-address", "", "Address of mizu API server")
func main() { func main() {
flag.Parse() flag.Parse()
hostMode := os.Getenv(shared.HostModeEnvVar) == "1" hostMode := os.Getenv(shared.HostModeEnvVar) == "1"
tapOpts := &tap.TapOpts{HostMode: hostMode} tapOpts := &tap.TapOpts{HostMode: hostMode}
if !*shouldTap && !*aggregator && !*standalone { if !*shouldTap && !*apiServer && !*standalone {
panic("One of the flags --tap, --api or --standalone must be provided") panic("One of the flags --tap, --api or --standalone must be provided")
} }
@@ -45,8 +45,8 @@ func main() {
hostApi(nil) hostApi(nil)
} else if *shouldTap { } else if *shouldTap {
if *aggregatorAddress == "" { if *apiServerAddress == "" {
panic("Aggregator address must be provided with --aggregator-address when using --tap") panic("API server address must be provided with --api-server-address when using --tap")
} }
tapTargets := getTapTargets() tapTargets := getTapTargets()
@@ -57,14 +57,14 @@ func main() {
harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts) harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts)
socketConnection, err := shared.ConnectToSocketServer(*aggregatorAddress, shared.DEFAULT_SOCKET_RETRIES, shared.DEFAULT_SOCKET_RETRY_SLEEP_TIME, false) socketConnection, err := shared.ConnectToSocketServer(*apiServerAddress, shared.DEFAULT_SOCKET_RETRIES, shared.DEFAULT_SOCKET_RETRY_SLEEP_TIME, false)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error connecting to socket server at %s %v", *aggregatorAddress, err)) panic(fmt.Sprintf("Error connecting to socket server at %s %v", *apiServerAddress, err))
} }
go pipeChannelToSocket(socketConnection, harOutputChannel) go pipeChannelToSocket(socketConnection, harOutputChannel)
go api.StartReadingOutbound(outboundLinkOutputChannel) go api.StartReadingOutbound(outboundLinkOutputChannel)
} else if *aggregator { } else if *apiServer {
socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000) socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000)
filteredHarChannel := make(chan *tap.OutputChannelItem) filteredHarChannel := make(chan *tap.OutputChannelItem)

View File

@@ -15,7 +15,7 @@ import (
) )
func RunMizuFetch(fetch *MizuFetchOptions) { func RunMizuFetch(fetch *MizuFetchOptions) {
mizuProxiedUrl := kubernetes.GetMizuCollectorProxiedHostAndPath(fetch.MizuPort) mizuProxiedUrl := kubernetes.GetMizuApiServerProxiedHostAndPath(fetch.MizuPort)
resp, err := http.Get(fmt.Sprintf("http://%s/api/har?from=%v&to=%v", mizuProxiedUrl, fetch.FromTimestamp, fetch.ToTimestamp)) resp, err := http.Get(fmt.Sprintf("http://%s/api/har?from=%v&to=%v", mizuProxiedUrl, fetch.FromTimestamp, fetch.ToTimestamp))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)

View File

@@ -99,7 +99,7 @@ func init() {
tapCmd.Flags().Uint16VarP(&mizuTapOptions.SleepIntervalSec, "upload-interval", "", 10, "Interval in seconds for uploading data to UP9") tapCmd.Flags().Uint16VarP(&mizuTapOptions.SleepIntervalSec, "upload-interval", "", 10, "Interval in seconds for uploading data to UP9")
tapCmd.Flags().BoolVarP(&mizuTapOptions.AllNamespaces, "all-namespaces", "A", false, "Tap all namespaces") tapCmd.Flags().BoolVarP(&mizuTapOptions.AllNamespaces, "all-namespaces", "A", false, "Tap all namespaces")
tapCmd.Flags().StringVarP(&mizuTapOptions.KubeConfigPath, "kube-config", "k", "", "Path to kube-config file") tapCmd.Flags().StringVarP(&mizuTapOptions.KubeConfigPath, "kube-config", "k", "", "Path to kube-config file")
tapCmd.Flags().StringVarP(&mizuTapOptions.MizuImage, "mizu-image", "", fmt.Sprintf("gcr.io/up9-docker-hub/mizu/%s:%s", mizu.Branch, mizu.SemVer), "Custom image for mizu collector") tapCmd.Flags().StringVarP(&mizuTapOptions.MizuImage, "mizu-image", "", fmt.Sprintf("gcr.io/up9-docker-hub/mizu/%s:%s", mizu.Branch, mizu.SemVer), "Custom image for mizu API server")
tapCmd.Flags().StringArrayVarP(&mizuTapOptions.PlainTextFilterRegexes, "regex-masking", "r", nil, "List of regex expressions that are used to filter matching values from text/plain http bodies") tapCmd.Flags().StringArrayVarP(&mizuTapOptions.PlainTextFilterRegexes, "regex-masking", "r", nil, "List of regex expressions that are used to filter matching values from text/plain http bodies")
tapCmd.Flags().StringVarP(&direction, "direction", "", "in", "Record traffic that goes in this direction (relative to the tapped pod): in/any") tapCmd.Flags().StringVarP(&direction, "direction", "", "in", "Record traffic that goes in this direction (relative to the tapped pod): in/any")
tapCmd.Flags().BoolVar(&mizuTapOptions.HideHealthChecks, "hide-healthchecks", false, "hides requests with kube-probe or prometheus user-agent headers") tapCmd.Flags().BoolVar(&mizuTapOptions.HideHealthChecks, "hide-healthchecks", false, "hides requests with kube-probe or prometheus user-agent headers")

View File

@@ -22,7 +22,7 @@ import (
) )
var mizuServiceAccountExists bool var mizuServiceAccountExists bool
var aggregatorService *core.Service var apiServerService *core.Service
const ( const (
updateTappersDelay = 5 * time.Second updateTappersDelay = 5 * time.Second
@@ -100,7 +100,7 @@ func createMizuResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
return err return err
} }
if err := createMizuAggregator(ctx, kubernetesProvider, tappingOptions, mizuApiFilteringOptions); err != nil { if err := createMizuApiServer(ctx, kubernetesProvider, tappingOptions, mizuApiFilteringOptions); err != nil {
return err return err
} }
@@ -120,7 +120,7 @@ func createMizuNamespace(ctx context.Context, kubernetesProvider *kubernetes.Pro
return err return err
} }
func createMizuAggregator(ctx context.Context, kubernetesProvider *kubernetes.Provider, tappingOptions *MizuTapOptions, mizuApiFilteringOptions *shared.TrafficFilteringOptions) error { func createMizuApiServer(ctx context.Context, kubernetesProvider *kubernetes.Provider, tappingOptions *MizuTapOptions, mizuApiFilteringOptions *shared.TrafficFilteringOptions) error {
var err error var err error
mizuServiceAccountExists = createRBACIfNecessary(ctx, kubernetesProvider) mizuServiceAccountExists = createRBACIfNecessary(ctx, kubernetesProvider)
@@ -130,15 +130,15 @@ func createMizuAggregator(ctx context.Context, kubernetesProvider *kubernetes.Pr
} else { } else {
serviceAccountName = "" serviceAccountName = ""
} }
_, err = kubernetesProvider.CreateMizuAggregatorPod(ctx, mizu.ResourcesNamespace, mizu.AggregatorPodName, tappingOptions.MizuImage, serviceAccountName, mizuApiFilteringOptions, tappingOptions.MaxEntriesDBSizeBytes) _, err = kubernetesProvider.CreateMizuApiServerPod(ctx, mizu.ResourcesNamespace, mizu.ApiServerPodName, tappingOptions.MizuImage, serviceAccountName, mizuApiFilteringOptions, tappingOptions.MaxEntriesDBSizeBytes)
if err != nil { if err != nil {
fmt.Printf("Error creating mizu collector pod: %v\n", err) fmt.Printf("Error creating mizu %s pod: %v\n", mizu.ApiServerPodName, err)
return err return err
} }
aggregatorService, err = kubernetesProvider.CreateService(ctx, mizu.ResourcesNamespace, mizu.AggregatorPodName, mizu.AggregatorPodName) apiServerService, err = kubernetesProvider.CreateService(ctx, mizu.ResourcesNamespace, mizu.ApiServerPodName, mizu.ApiServerPodName)
if err != nil { if err != nil {
fmt.Printf("Error creating mizu collector service: %v\n", err) fmt.Printf("Error creating mizu %s service: %v\n", mizu.ApiServerPodName, err)
return err return err
} }
@@ -178,7 +178,7 @@ func updateMizuTappers(ctx context.Context, kubernetesProvider *kubernetes.Provi
mizu.TapperDaemonSetName, mizu.TapperDaemonSetName,
tappingOptions.MizuImage, tappingOptions.MizuImage,
mizu.TapperPodName, mizu.TapperPodName,
fmt.Sprintf("%s.%s.svc.cluster.local", aggregatorService.Name, aggregatorService.Namespace), fmt.Sprintf("%s.%s.svc.cluster.local", apiServerService.Name, apiServerService.Namespace),
nodeToTappedPodIPMap, nodeToTappedPodIPMap,
serviceAccountName, serviceAccountName,
tappingOptions.TapOutgoing, tappingOptions.TapOutgoing,
@@ -288,7 +288,7 @@ func watchPodsForTapping(ctx context.Context, kubernetesProvider *kubernetes.Pro
} }
func portForwardApiPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, tappingOptions *MizuTapOptions) { func portForwardApiPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, tappingOptions *MizuTapOptions) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", mizu.AggregatorPodName)) podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", mizu.ApiServerPodName))
added, modified, removed, errorChan := kubernetes.FilteredWatch(ctx, kubernetesProvider.GetPodWatcher(ctx, mizu.ResourcesNamespace), podExactRegex) added, modified, removed, errorChan := kubernetes.FilteredWatch(ctx, kubernetesProvider.GetPodWatcher(ctx, mizu.ResourcesNamespace), podExactRegex)
isPodReady := false isPodReady := false
timeAfter := time.After(25 * time.Second) timeAfter := time.After(25 * time.Second)
@@ -300,20 +300,20 @@ func portForwardApiPod(ctx context.Context, kubernetesProvider *kubernetes.Provi
case <-added: case <-added:
continue continue
case <-removed: case <-removed:
fmt.Printf("%s removed\n", mizu.AggregatorPodName) fmt.Printf("%s removed\n", mizu.ApiServerPodName)
cancel() cancel()
return return
case modifiedPod := <-modified: case modifiedPod := <-modified:
if modifiedPod.Status.Phase == "Running" && !isPodReady { if modifiedPod.Status.Phase == "Running" && !isPodReady {
isPodReady = true isPodReady = true
go func() { go func() {
err := kubernetes.StartProxy(kubernetesProvider, tappingOptions.GuiPort, mizu.ResourcesNamespace, mizu.AggregatorPodName) err := kubernetes.StartProxy(kubernetesProvider, tappingOptions.GuiPort, mizu.ResourcesNamespace, mizu.ApiServerPodName)
if err != nil { if err != nil {
fmt.Printf("Error occured while running k8s proxy %v\n", err) fmt.Printf("Error occured while running k8s proxy %v\n", err)
cancel() cancel()
} }
}() }()
mizuProxiedUrl := kubernetes.GetMizuCollectorProxiedHostAndPath(tappingOptions.GuiPort) mizuProxiedUrl := kubernetes.GetMizuApiServerProxiedHostAndPath(tappingOptions.GuiPort)
fmt.Printf("Mizu is available at http://%s\n", mizuProxiedUrl) fmt.Printf("Mizu is available at http://%s\n", mizuProxiedUrl)
time.Sleep(time.Second * 5) // Waiting to be sure the proxy is ready time.Sleep(time.Second * 5) // Waiting to be sure the proxy is ready
@@ -336,7 +336,7 @@ func portForwardApiPod(ctx context.Context, kubernetesProvider *kubernetes.Provi
case <-timeAfter: case <-timeAfter:
if !isPodReady { if !isPodReady {
fmt.Printf("error: %s pod was not ready in time", mizu.AggregatorPodName) fmt.Printf("error: %s pod was not ready in time", mizu.ApiServerPodName)
cancel() cancel()
} }
@@ -389,7 +389,7 @@ func waitForFinish(ctx context.Context, cancel context.CancelFunc) {
} }
func syncApiStatus(ctx context.Context, cancel context.CancelFunc, tappingOptions *MizuTapOptions) { func syncApiStatus(ctx context.Context, cancel context.CancelFunc, tappingOptions *MizuTapOptions) {
controlSocketStr := fmt.Sprintf("ws://%s/ws", kubernetes.GetMizuCollectorProxiedHostAndPath(tappingOptions.GuiPort)) controlSocketStr := fmt.Sprintf("ws://%s/ws", kubernetes.GetMizuApiServerProxiedHostAndPath(tappingOptions.GuiPort))
controlSocket, err := mizu.CreateControlSocket(controlSocketStr) controlSocket, err := mizu.CreateControlSocket(controlSocketStr)
if err != nil { if err != nil {
fmt.Printf("error establishing control socket connection %s\n", err) fmt.Printf("error establishing control socket connection %s\n", err)

View File

@@ -25,25 +25,25 @@ func runMizuView(mizuViewOptions *MizuViewOptions) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
exists, err := kubernetesProvider.DoesServicesExist(ctx, mizu.ResourcesNamespace, mizu.AggregatorPodName) exists, err := kubernetesProvider.DoesServicesExist(ctx, mizu.ResourcesNamespace, mizu.ApiServerPodName)
if err != nil { if err != nil {
panic(err) panic(err)
} }
if !exists { if !exists {
fmt.Printf("The %s service not found\n", mizu.AggregatorPodName) fmt.Printf("The %s service not found\n", mizu.ApiServerPodName)
return return
} }
mizuProxiedUrl := kubernetes.GetMizuCollectorProxiedHostAndPath(mizuViewOptions.GuiPort) mizuProxiedUrl := kubernetes.GetMizuApiServerProxiedHostAndPath(mizuViewOptions.GuiPort)
_, err = http.Get(fmt.Sprintf("http://%s/", mizuProxiedUrl)) _, err = http.Get(fmt.Sprintf("http://%s/", mizuProxiedUrl))
if err == nil { if err == nil {
fmt.Printf("Found a running service %s and open port %d\n", mizu.AggregatorPodName, mizuViewOptions.GuiPort) fmt.Printf("Found a running service %s and open port %d\n", mizu.ApiServerPodName, mizuViewOptions.GuiPort)
return return
} }
fmt.Printf("Found service %s, creating k8s proxy\n", mizu.AggregatorPodName) fmt.Printf("Found service %s, creating k8s proxy\n", mizu.ApiServerPodName)
fmt.Printf("Mizu is available at http://%s\n", kubernetes.GetMizuCollectorProxiedHostAndPath(mizuViewOptions.GuiPort)) fmt.Printf("Mizu is available at http://%s\n", kubernetes.GetMizuApiServerProxiedHostAndPath(mizuViewOptions.GuiPort))
err = kubernetes.StartProxy(kubernetesProvider, mizuViewOptions.GuiPort, mizu.ResourcesNamespace, mizu.AggregatorPodName) err = kubernetes.StartProxy(kubernetesProvider, mizuViewOptions.GuiPort, mizu.ResourcesNamespace, mizu.ApiServerPodName)
if err != nil { if err != nil {
fmt.Printf("Error occured while running k8s proxy %v\n", err) fmt.Printf("Error occured while running k8s proxy %v\n", err)
} }

View File

@@ -124,7 +124,7 @@ func (provider *Provider) CreateNamespace(ctx context.Context, name string) (*co
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{}) return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{})
} }
func (provider *Provider) CreateMizuAggregatorPod(ctx context.Context, namespace string, podName string, podImage string, serviceAccountName string, mizuApiFilteringOptions *shared.TrafficFilteringOptions, maxEntriesDBSizeBytes int64) (*core.Pod, error) { func (provider *Provider) CreateMizuApiServerPod(ctx context.Context, namespace string, podName string, podImage string, serviceAccountName string, mizuApiFilteringOptions *shared.TrafficFilteringOptions, maxEntriesDBSizeBytes int64) (*core.Pod, error) {
marshaledFilteringOptions, err := json.Marshal(mizuApiFilteringOptions) marshaledFilteringOptions, err := json.Marshal(mizuApiFilteringOptions)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -132,19 +132,19 @@ func (provider *Provider) CreateMizuAggregatorPod(ctx context.Context, namespace
cpuLimit, err := resource.ParseQuantity("750m") cpuLimit, err := resource.ParseQuantity("750m")
if err != nil { if err != nil {
return nil, errors.New("invalid cpu limit for aggregator container") return nil, errors.New(fmt.Sprintf("invalid cpu limit for %s container", podName))
} }
memLimit, err := resource.ParseQuantity("512Mi") memLimit, err := resource.ParseQuantity("512Mi")
if err != nil { if err != nil {
return nil, errors.New("invalid memory limit for aggregator container") return nil, errors.New(fmt.Sprintf("invalid memory limit for %s container", podName))
} }
cpuRequests, err := resource.ParseQuantity("50m") cpuRequests, err := resource.ParseQuantity("50m")
if err != nil { if err != nil {
return nil, errors.New("invalid cpu request for aggregator container") return nil, errors.New(fmt.Sprintf("invalid cpu request for %s container", podName))
} }
memRequests, err := resource.ParseQuantity("50Mi") memRequests, err := resource.ParseQuantity("50Mi")
if err != nil { if err != nil {
return nil, errors.New("invalid memory request for aggregator container") return nil, errors.New(fmt.Sprintf("invalid memory request for %s container", podName))
} }
pod := &core.Pod{ pod := &core.Pod{
@@ -159,7 +159,7 @@ func (provider *Provider) CreateMizuAggregatorPod(ctx context.Context, namespace
Name: podName, Name: podName,
Image: podImage, Image: podImage,
ImagePullPolicy: core.PullAlways, ImagePullPolicy: core.PullAlways,
Command: []string{"./mizuagent", "--aggregator"}, Command: []string{"./mizuagent", "--api-server"},
Env: []core.EnvVar{ Env: []core.EnvVar{
{ {
Name: shared.HostModeEnvVar, Name: shared.HostModeEnvVar,
@@ -471,7 +471,7 @@ func (provider *Provider) CheckDaemonSetExists(ctx context.Context, namespace st
return false, nil return false, nil
} }
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, aggregatorPodIp string, nodeToTappedPodIPMap map[string][]string, serviceAccountName string, tapOutgoing bool) error { func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeToTappedPodIPMap map[string][]string, serviceAccountName string, tapOutgoing bool) error {
if len(nodeToTappedPodIPMap) == 0 { if len(nodeToTappedPodIPMap) == 0 {
return fmt.Errorf("Daemon set %s must tap at least 1 pod", daemonSetName) return fmt.Errorf("Daemon set %s must tap at least 1 pod", daemonSetName)
} }
@@ -486,7 +486,7 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
"-i", "any", "-i", "any",
"--tap", "--tap",
"--hardump", "--hardump",
"--aggregator-address", fmt.Sprintf("ws://%s/wsTapper", aggregatorPodIp), "--api-server-address", fmt.Sprintf("ws://%s/wsTapper", apiServerPodIp),
} }
if tapOutgoing { if tapOutgoing {
mizuCmd = append(mizuCmd, "--anydirection") mizuCmd = append(mizuCmd, "--anydirection")
@@ -512,19 +512,19 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
) )
cpuLimit, err := resource.ParseQuantity("500m") cpuLimit, err := resource.ParseQuantity("500m")
if err != nil { if err != nil {
return errors.New("invalid cpu limit for tapper container") return errors.New(fmt.Sprintf("invalid cpu limit for %s container", tapperPodName))
} }
memLimit, err := resource.ParseQuantity("1Gi") memLimit, err := resource.ParseQuantity("1Gi")
if err != nil { if err != nil {
return errors.New("invalid memory limit for tapper container") return errors.New(fmt.Sprintf("invalid memory limit for %s container", tapperPodName))
} }
cpuRequests, err := resource.ParseQuantity("50m") cpuRequests, err := resource.ParseQuantity("50m")
if err != nil { if err != nil {
return errors.New("invalid cpu request for tapper container") return errors.New(fmt.Sprintf("invalid cpu request for %s container", tapperPodName))
} }
memRequests, err := resource.ParseQuantity("50Mi") memRequests, err := resource.ParseQuantity("50Mi")
if err != nil { if err != nil {
return errors.New("invalid memory request for tapper container") return errors.New(fmt.Sprintf("invalid memory request for %s container", tapperPodName))
} }
agentResourceLimits := core.ResourceList{ agentResourceLimits := core.ResourceList{
"cpu": cpuLimit, "cpu": cpuLimit,

View File

@@ -40,24 +40,24 @@ func StartProxy(kubernetesProvider *Provider, mizuPort uint16, mizuNamespace str
return server.Serve(l) return server.Serve(l)
} }
func getMizuCollectorProxiedHostAndPath(mizuNamespace string, mizuServiceName string) string { func getMizuApiServerProxiedHostAndPath(mizuNamespace string, mizuServiceName string) string {
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy/", mizuNamespace, mizuServiceName, mizuServicePort) return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy/", mizuNamespace, mizuServiceName, mizuServicePort)
} }
func GetMizuCollectorProxiedHostAndPath(mizuPort uint16) string { func GetMizuApiServerProxiedHostAndPath(mizuPort uint16) string {
return fmt.Sprintf("localhost:%d/mizu", mizuPort) return fmt.Sprintf("localhost:%d/mizu", mizuPort)
} }
func getRerouteHttpHandlerMizuAPI(proxyHandler http.Handler, mizuNamespace string, mizuServiceName string) http.Handler { func getRerouteHttpHandlerMizuAPI(proxyHandler http.Handler, mizuNamespace string, mizuServiceName string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = strings.Replace(r.URL.Path, "/mizu/", getMizuCollectorProxiedHostAndPath(mizuNamespace, mizuServiceName), 1) r.URL.Path = strings.Replace(r.URL.Path, "/mizu/", getMizuApiServerProxiedHostAndPath(mizuNamespace, mizuServiceName), 1)
proxyHandler.ServeHTTP(w, r) proxyHandler.ServeHTTP(w, r)
}) })
} }
func getRerouteHttpHandlerMizuStatic(proxyHandler http.Handler, mizuNamespace string, mizuServiceName string) http.Handler { func getRerouteHttpHandlerMizuStatic(proxyHandler http.Handler, mizuNamespace string, mizuServiceName string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = strings.Replace(r.URL.Path, "/static/", fmt.Sprintf("%s/static/", getMizuCollectorProxiedHostAndPath(mizuNamespace, mizuServiceName)), 1) r.URL.Path = strings.Replace(r.URL.Path, "/static/", fmt.Sprintf("%s/static/", getMizuApiServerProxiedHostAndPath(mizuNamespace, mizuServiceName)), 1)
proxyHandler.ServeHTTP(w, r) proxyHandler.ServeHTTP(w, r)
}) })
} }

View File

@@ -9,7 +9,7 @@ var (
) )
const ( const (
AggregatorPodName = "mizu-collector" ApiServerPodName = "mizu-api-server"
ClusterRoleBindingName = "mizu-cluster-role-binding" ClusterRoleBindingName = "mizu-cluster-role-binding"
ClusterRoleName = "mizu-cluster-role" ClusterRoleName = "mizu-cluster-role"
K8sAllNamespaces = "" K8sAllNamespaces = ""

View File

@@ -1,4 +1,4 @@
# creates image in which mizu api is remotely debuggable using delve # creates image in which mizu agent is remotely debuggable using delve
FROM node:14-slim AS site-build FROM node:14-slim AS site-build
WORKDIR /app/ui-build WORKDIR /app/ui-build
@@ -14,10 +14,10 @@ ENV CGO_ENABLED=1 GOOS=linux GOARCH=amd64
RUN apk add libpcap-dev gcc g++ make RUN apk add libpcap-dev gcc g++ make
# Move to api working directory (/api-build). # Move to agent working directory (/agent-build).
WORKDIR /app/api-build WORKDIR /app/agent-build
COPY api/go.mod api/go.sum ./ COPY agent/go.mod agent/go.sum ./
COPY shared/go.mod shared/go.mod ../shared/ COPY shared/go.mod shared/go.mod ../shared/
COPY tap/go.mod tap/go.mod ../tap/ COPY tap/go.mod tap/go.mod ../tap/
@@ -25,10 +25,10 @@ RUN go mod download
# cheap trick to make the build faster (As long as go.mod wasn't changes) # cheap trick to make the build faster (As long as go.mod wasn't changes)
RUN go list -f '{{.Path}}@{{.Version}}' -m all | sed 1d | grep -e 'go-cache' -e 'sqlite' | xargs go get RUN go list -f '{{.Path}}@{{.Version}}' -m all | sed 1d | grep -e 'go-cache' -e 'sqlite' | xargs go get
# Copy and build api code # Copy and build agent code
COPY shared ../shared COPY shared ../shared
COPY tap ../tap COPY tap ../tap
COPY api . COPY agent .
RUN go build -gcflags="all=-N -l" -o mizuagent . RUN go build -gcflags="all=-N -l" -o mizuagent .
@@ -38,11 +38,11 @@ RUN apk add bash libpcap-dev tcpdump
WORKDIR /app WORKDIR /app
# Copy binary and config files from /build to root folder of scratch container. # Copy binary and config files from /build to root folder of scratch container.
COPY --from=builder ["/app/api-build/mizuagent", "."] COPY --from=builder ["/app/agent-build/mizuagent", "."]
COPY --from=site-build ["/app/ui-build/build", "site"] COPY --from=site-build ["/app/ui-build/build", "site"]
# install remote debugging tool # install remote debugging tool
RUN go get github.com/go-delve/delve/cmd/dlv RUN go get github.com/go-delve/delve/cmd/dlv
ENTRYPOINT "/app/mizuagent" ENTRYPOINT "/app/mizuagent"
#CMD ["sh", "-c", "dlv --headless=true --listen=:2345 --log --api-version=2 --accept-multiclient exec ./mizuagent -- --aggregator"] #CMD ["sh", "-c", "dlv --headless=true --listen=:2345 --log --api-version=2 --accept-multiclient exec ./mizuagent -- --api-server"]

View File

@@ -33,8 +33,7 @@ import (
const AppPortsEnvVar = "APP_PORTS" const AppPortsEnvVar = "APP_PORTS"
const maxHTTP2DataLenEnvVar = "HTTP2_DATA_SIZE_LIMIT" const maxHTTP2DataLenEnvVar = "HTTP2_DATA_SIZE_LIMIT"
// default is 1MB, more than the max size accepted by collector and traffic-dumper const maxHTTP2DataLenDefault = 1 * 1024 * 1024 // 1MB
const maxHTTP2DataLenDefault = 1 * 1024 * 1024
const cleanPeriod = time.Second * 10 const cleanPeriod = time.Second * 10
var remoteOnlyOutboundPorts = []int { 80, 443 } var remoteOnlyOutboundPorts = []int { 80, 443 }