Update Dockerfile, multi-runner.sh, and 31 more files...

This commit is contained in:
RamiBerm
2021-04-28 17:32:06 +03:00
parent 7d8655feab
commit 9531f7a14b
19 changed files with 38 additions and 24 deletions

View File

@@ -1,26 +1,37 @@
FROM golang:1.16-alpine AS builder FROM golang:1.16-alpine AS builder
# Set necessary environment variables needed for our image.
ENV CGO_ENABLED=1 GOOS=linux GOARCH=amd64
# Move to working directory (/build). RUN apk add libpcap-dev gcc g++ make
WORKDIR /build
# Move to tapper working directory (/tap-build).
WORKDIR /tap-build
COPY tap/go.mod tap/go.sum ./
RUN go mod download
# Copy and build tapper code
COPY tap/src ./
RUN go build -ldflags="-s -w" -o passivetapper .
# Move to api working directory (/api-build).
WORKDIR ../api-build
# Copy and download dependency using go mod.
COPY go.mod go.sum ./ COPY go.mod go.sum ./
RUN go mod download RUN go mod download
# Copy and build api code
# Copy the code into the container.
COPY . . COPY . .
# Set necessary environmet variables needed for our image and build the API server.
ENV CGO_ENABLED=0 GOOS=linux GOARCH=amd64
RUN go build -ldflags="-s -w" -o apiserver . RUN go build -ldflags="-s -w" -o apiserver .
FROM scratch FROM alpine:3.13.5
RUN apk add bash libpcap-dev
WORKDIR /app
# Copy binary and config files from /build to root folder of scratch container. # Copy binary and config files from /build to root folder of scratch container.
COPY --from=builder ["/build/apiserver", "/"] COPY --from=builder ["/api-build/apiserver", "."]
COPY --from=builder ["/tap-build/passivetapper", "."]
COPY scripts/multi-runner.sh ./
# Export necessary port. # this script runs both apiserver and passivetapper and exits either if one of them exits, preventing a scenario where the container runs without one process
EXPOSE 5000 CMD "./multi-runner.sh"
# Command to run when starting the container.
ENTRYPOINT ["/apiserver"]

5
api/scripts/multi-runner.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/bash
./apiserver &
./passivetapper -i eth0 &
wait -n
pkill -P $$

View File

@@ -36,6 +36,8 @@ func init() {
rootCmd.Flags().StringVarP(&config.Configuration.Namespace, "namespace", "n", "", "Namespace selector") rootCmd.Flags().StringVarP(&config.Configuration.Namespace, "namespace", "n", "", "Namespace selector")
rootCmd.Flags().BoolVarP(&config.Configuration.AllNamespaces, "all-namespaces", "A", false, "Select all namespaces") rootCmd.Flags().BoolVarP(&config.Configuration.AllNamespaces, "all-namespaces", "A", false, "Select all namespaces")
rootCmd.Flags().StringVarP(&config.Configuration.KubeConfigPath, "kubeconfig", "k", "", "Path to kubeconfig file") rootCmd.Flags().StringVarP(&config.Configuration.KubeConfigPath, "kubeconfig", "k", "", "Path to kubeconfig file")
rootCmd.Flags().StringVarP(&config.Configuration.MizuImage, "mizu-image", "", "gcr.io/up9-docker-hub/mizu/develop/v1", "Custom image for mizu collector")
rootCmd.Flags().Uint16VarP(&config.Configuration.MizuPodPort, "mizu-port", "", 8899, "Port which mizu cli will attempt to forward from the mizu collector pod")
} }
// Execute adds all child commands to the root command and sets flags appropriately. // Execute adds all child commands to the root command and sets flags appropriately.

View File

@@ -8,6 +8,8 @@ type Options struct {
Namespace string Namespace string
AllNamespaces bool AllNamespaces bool
KubeConfigPath string KubeConfigPath string
MizuImage string
MizuPodPort uint16
} }
var Configuration = &Options{} var Configuration = &Options{}

View File

@@ -12,10 +12,6 @@ import (
"time" "time"
) )
var (
isPortForwarded = false
)
func Run(podRegex *regexp.Regexp) { func Run(podRegex *regexp.Regexp) {
kubernetesProvider := kubernetes.NewProvider(config.Configuration.KubeConfigPath, config.Configuration.Namespace) kubernetesProvider := kubernetes.NewProvider(config.Configuration.KubeConfigPath, config.Configuration.Namespace)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -58,9 +54,7 @@ func watchPodsForTapping(ctx context.Context, kubernetesProvider *kubernetes.Pro
} }
func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, podName string) { func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, podName string) {
podImage := "kennethreitz/httpbin:latest" pod, err := kubernetesProvider.CreatePod(ctx, podName, config.Configuration.MizuImage)
pod, err := kubernetesProvider.CreatePod(ctx, podName, podImage)
if err != nil { if err != nil {
fmt.Printf("error creating pod %s", err) fmt.Printf("error creating pod %s", err)
cancel() cancel()
@@ -82,7 +76,7 @@ func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes
if modifiedPod.Status.Phase == "Running" && !isPodReady { if modifiedPod.Status.Phase == "Running" && !isPodReady {
isPodReady = true isPodReady = true
var err error var err error
portForward, err = kubernetes.NewPortForward(kubernetesProvider, kubernetesProvider.Namespace, podName, config.Configuration.DashboardPort, 80, cancel) portForward, err = kubernetes.NewPortForward(kubernetesProvider, kubernetesProvider.Namespace, podName, config.Configuration.DashboardPort, config.Configuration.MizuPodPort, cancel)
if !config.Configuration.NoDashboard { if !config.Configuration.NoDashboard {
fmt.Printf("Dashboard is now available at http://localhost:%d\n", config.Configuration.DashboardPort) fmt.Printf("Dashboard is now available at http://localhost:%d\n", config.Configuration.DashboardPort)
} }
@@ -92,7 +86,7 @@ func createPodAndPortForward(ctx context.Context, kubernetesProvider *kubernetes
} }
} }
case <- time.After(10 * time.Second): case <- time.After(25 * time.Second):
if !isPodReady { if !isPodReady {
fmt.Printf("error: %s pod was not ready in time", podName) fmt.Printf("error: %s pod was not ready in time", podName)
cancel() cancel()