diff --git a/Dockerfile b/Dockerfile index 19fdf3004..4bb6a4572 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM node:14-slim AS site-build WORKDIR /ui-build -COPY ui ./ +COPY ui . RUN npm i RUN npm run build @@ -15,6 +15,7 @@ RUN apk add libpcap-dev gcc g++ make # Move to tapper working directory (/tap-build). WORKDIR /tap-build + COPY tap/go.mod tap/go.sum ./ RUN go mod download # Copy and build tapper code @@ -23,6 +24,7 @@ RUN go build -ldflags="-s -w" -o passivetapper . # Move to api working directory (/api-build). WORKDIR /api-build + COPY api/go.mod api/go.sum ./ RUN go mod download # Copy and build api code @@ -31,14 +33,28 @@ RUN go build -ldflags="-s -w" -o apiserver . FROM alpine:3.13.5 -RUN apk add parallel libpcap-dev -RUN apk add tcpdump +RUN apk add parallel libpcap-dev tcpdump + # Copy binary and config files from /build to root folder of scratch container. COPY --from=builder ["/api-build/apiserver", "/"] COPY --from=builder ["/tap-build/passivetapper", "/"] COPY --from=site-build ["/ui-build/build", "/site"] -# parallel will exit if one of the executables exits, ensuring this container does not run without one of the processes -ENV HOST_MODE="1" -CMD parallel --halt now,done=1 ::: './apiserver' './passivetapper -i any -hardump -hardir /tmp/up9hars -harentriesperfile 10' \ No newline at end of file +FROM alpine:3.13.5 + +RUN apk add bash libpcap-dev tcpdump + +WORKDIR /app + +# Copy binary and config files from /build to root folder of scratch container. +COPY --from=builder ["/api-build/apiserver", "."] +COPY --from=builder ["/tap-build/passivetapper", "."] +COPY --from=site-build ["/ui-build/build", "site"] + + +COPY api/scripts/multi-runner.sh ./ + +# this script runs both apiserver and passivetapper and exits either if one of them exits, preventing a scenario where the container runs without one process +CMD "./multi-runner.sh" + diff --git a/api/scripts/multi-runner.sh b/api/scripts/multi-runner.sh new file mode 100755 index 000000000..fc2523514 --- /dev/null +++ b/api/scripts/multi-runner.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# this script runs both executables and exits everything if one fails +./apiserver & +./passivetapper -i eth0 & +wait -n +pkill -P $$ diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 055b9991b..ae192e4d6 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -36,6 +36,8 @@ func init() { rootCmd.Flags().StringVarP(&config.Configuration.Namespace, "namespace", "n", "", "Namespace selector") rootCmd.Flags().BoolVarP(&config.Configuration.AllNamespaces, "all-namespaces", "A", false, "Select all namespaces") rootCmd.Flags().StringVarP(&config.Configuration.KubeConfigPath, "kubeconfig", "k", "", "Path to kubeconfig file") + rootCmd.Flags().StringVarP(&config.Configuration.MizuImage, "mizu-image", "", "gcr.io/up9-docker-hub/mizu/develop/v1", "Custom image for mizu collector") + rootCmd.Flags().Uint16VarP(&config.Configuration.MizuPodPort, "mizu-port", "", 8899, "Port which mizu cli will attempt to forward from the mizu collector pod") } // Execute adds all child commands to the root command and sets flags appropriately. diff --git a/cli/config/config.go b/cli/config/config.go index fab01b5ef..3be92624a 100644 --- a/cli/config/config.go +++ b/cli/config/config.go @@ -8,6 +8,8 @@ type Options struct { Namespace string AllNamespaces bool KubeConfigPath string + MizuImage string + MizuPodPort uint16 } var Configuration = &Options{} diff --git a/cli/mizu/mizuRunner.go b/cli/mizu/mizuRunner.go index a50ce3cd1..f75028a8c 100644 --- a/cli/mizu/mizuRunner.go +++ b/cli/mizu/mizuRunner.go @@ -12,10 +12,6 @@ import ( "time" ) -var ( - isPortForwarded = false -) - func Run(podRegex *regexp.Regexp) { kubernetesProvider := kubernetes.NewProvider(config.Configuration.KubeConfigPath, config.Configuration.Namespace) ctx, cancel := context.WithCancel(context.Background()) @@ -58,9 +54,7 @@ func watchPodsForTapping(ctx context.Context, kubernetesProvider *kubernetes.Pro } func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, podName string) { - podImage := "kennethreitz/httpbin:latest" - - pod, err := kubernetesProvider.CreatePod(ctx, podName, podImage) + pod, err := kubernetesProvider.CreatePod(ctx, podName, config.Configuration.MizuImage) if err != nil { fmt.Printf("error creating pod %s", err) cancel() @@ -82,7 +76,7 @@ func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes if modifiedPod.Status.Phase == "Running" && !isPodReady { isPodReady = true var err error - portForward, err = kubernetes.NewPortForward(kubernetesProvider, kubernetesProvider.Namespace, podName, config.Configuration.DashboardPort, 80, cancel) + portForward, err = kubernetes.NewPortForward(kubernetesProvider, kubernetesProvider.Namespace, podName, config.Configuration.DashboardPort, config.Configuration.MizuPodPort, cancel) if !config.Configuration.NoDashboard { fmt.Printf("Dashboard is now available at http://localhost:%d\n", config.Configuration.DashboardPort) } @@ -92,7 +86,7 @@ func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes } } - case <- time.After(10 * time.Second): + case <- time.After(25 * time.Second): if !isPodReady { fmt.Printf("error: %s pod was not ready in time", podName) cancel()