Merge branch 'develop' into feature/workin_first_and_ugly

This commit is contained in:
gadotroee 2021-04-29 08:17:40 +03:00 committed by GitHub
commit e2df769ebd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 36 additions and 15 deletions

View File

@ -2,7 +2,7 @@ FROM node:14-slim AS site-build
WORKDIR /ui-build
COPY ui ./
COPY ui .
RUN npm i
RUN npm run build
@ -15,6 +15,7 @@ RUN apk add libpcap-dev gcc g++ make
# Move to tapper working directory (/tap-build).
WORKDIR /tap-build
COPY tap/go.mod tap/go.sum ./
RUN go mod download
# Copy and build tapper code
@ -23,6 +24,7 @@ RUN go build -ldflags="-s -w" -o passivetapper .
# Move to api working directory (/api-build).
WORKDIR /api-build
COPY api/go.mod api/go.sum ./
RUN go mod download
# Copy and build api code
@ -31,14 +33,28 @@ RUN go build -ldflags="-s -w" -o apiserver .
FROM alpine:3.13.5
RUN apk add parallel libpcap-dev
RUN apk add tcpdump
RUN apk add parallel libpcap-dev tcpdump
# Copy binary and config files from /build to root folder of scratch container.
COPY --from=builder ["/api-build/apiserver", "/"]
COPY --from=builder ["/tap-build/passivetapper", "/"]
COPY --from=site-build ["/ui-build/build", "/site"]
# parallel will exit if one of the executables exits, ensuring this container does not run without one of the processes
ENV HOST_MODE="1"
CMD parallel --halt now,done=1 ::: './apiserver' './passivetapper -i any -hardump -hardir /tmp/up9hars -harentriesperfile 10'
FROM alpine:3.13.5
RUN apk add bash libpcap-dev tcpdump
WORKDIR /app
# Copy binary and config files from /build to root folder of scratch container.
COPY --from=builder ["/api-build/apiserver", "."]
COPY --from=builder ["/tap-build/passivetapper", "."]
COPY --from=site-build ["/ui-build/build", "site"]
COPY api/scripts/multi-runner.sh ./
# this script runs both apiserver and passivetapper and exits either if one of them exits, preventing a scenario where the container runs without one process
CMD "./multi-runner.sh"

7
api/scripts/multi-runner.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
# this script runs both executables and exits everything if one fails
./apiserver &
./passivetapper -i eth0 &
wait -n
pkill -P $$

View File

@ -36,6 +36,8 @@ func init() {
rootCmd.Flags().StringVarP(&config.Configuration.Namespace, "namespace", "n", "", "Namespace selector")
rootCmd.Flags().BoolVarP(&config.Configuration.AllNamespaces, "all-namespaces", "A", false, "Select all namespaces")
rootCmd.Flags().StringVarP(&config.Configuration.KubeConfigPath, "kubeconfig", "k", "", "Path to kubeconfig file")
rootCmd.Flags().StringVarP(&config.Configuration.MizuImage, "mizu-image", "", "gcr.io/up9-docker-hub/mizu/develop/v1", "Custom image for mizu collector")
rootCmd.Flags().Uint16VarP(&config.Configuration.MizuPodPort, "mizu-port", "", 8899, "Port which mizu cli will attempt to forward from the mizu collector pod")
}
// Execute adds all child commands to the root command and sets flags appropriately.

View File

@ -8,6 +8,8 @@ type Options struct {
Namespace string
AllNamespaces bool
KubeConfigPath string
MizuImage string
MizuPodPort uint16
}
var Configuration = &Options{}

View File

@ -12,10 +12,6 @@ import (
"time"
)
var (
isPortForwarded = false
)
func Run(podRegex *regexp.Regexp) {
kubernetesProvider := kubernetes.NewProvider(config.Configuration.KubeConfigPath, config.Configuration.Namespace)
ctx, cancel := context.WithCancel(context.Background())
@ -58,9 +54,7 @@ func watchPodsForTapping(ctx context.Context, kubernetesProvider *kubernetes.Pro
}
func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, podName string) {
podImage := "kennethreitz/httpbin:latest"
pod, err := kubernetesProvider.CreatePod(ctx, podName, podImage)
pod, err := kubernetesProvider.CreatePod(ctx, podName, config.Configuration.MizuImage)
if err != nil {
fmt.Printf("error creating pod %s", err)
cancel()
@ -82,7 +76,7 @@ func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes
if modifiedPod.Status.Phase == "Running" && !isPodReady {
isPodReady = true
var err error
portForward, err = kubernetes.NewPortForward(kubernetesProvider, kubernetesProvider.Namespace, podName, config.Configuration.DashboardPort, 80, cancel)
portForward, err = kubernetes.NewPortForward(kubernetesProvider, kubernetesProvider.Namespace, podName, config.Configuration.DashboardPort, config.Configuration.MizuPodPort, cancel)
if !config.Configuration.NoDashboard {
fmt.Printf("Dashboard is now available at http://localhost:%d\n", config.Configuration.DashboardPort)
}
@ -92,7 +86,7 @@ func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes
}
}
case <- time.After(10 * time.Second):
case <- time.After(25 * time.Second):
if !isPodReady {
fmt.Printf("error: %s pod was not ready in time", podName)
cancel()