mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-25 04:17:25 +00:00
Merge pull request #15 from up9inc/feature/workin_first_and_ugly
First version working and very ugly
This commit is contained in:
@@ -14,3 +14,4 @@ LICENSE
|
||||
.git/
|
||||
.github/
|
||||
build/
|
||||
**/node_modules/
|
||||
|
27
Dockerfile
27
Dockerfile
@@ -1,3 +1,12 @@
|
||||
FROM node:14-slim AS site-build
|
||||
|
||||
WORKDIR /ui-build
|
||||
|
||||
COPY ui .
|
||||
RUN npm i
|
||||
RUN npm run build
|
||||
|
||||
|
||||
FROM golang:1.16-alpine AS builder
|
||||
# Set necessary environment variables needed for our image.
|
||||
ENV CGO_ENABLED=1 GOOS=linux GOARCH=amd64
|
||||
@@ -14,7 +23,7 @@ COPY tap/src ./
|
||||
RUN go build -ldflags="-s -w" -o passivetapper .
|
||||
|
||||
# Move to api working directory (/api-build).
|
||||
WORKDIR ../api-build
|
||||
WORKDIR /api-build
|
||||
|
||||
COPY api/go.mod api/go.sum ./
|
||||
RUN go mod download
|
||||
@@ -22,17 +31,29 @@ RUN go mod download
|
||||
COPY api .
|
||||
RUN go build -ldflags="-s -w" -o apiserver .
|
||||
|
||||
|
||||
FROM alpine:3.13.5
|
||||
RUN apk add parallel libpcap-dev tcpdump
|
||||
|
||||
# Copy binary and config files from /build to root folder of scratch container.
|
||||
COPY --from=builder ["/api-build/apiserver", "/"]
|
||||
COPY --from=builder ["/tap-build/passivetapper", "/"]
|
||||
COPY --from=site-build ["/ui-build/build", "/site"]
|
||||
|
||||
|
||||
FROM alpine:3.13.5
|
||||
|
||||
RUN apk add bash libpcap-dev
|
||||
RUN apk add bash libpcap-dev tcpdump
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary and config files from /build to root folder of scratch container.
|
||||
COPY --from=builder ["/api-build/apiserver", "."]
|
||||
COPY --from=builder ["/tap-build/passivetapper", "."]
|
||||
COPY --from=site-build ["/ui-build/build", "site"]
|
||||
|
||||
|
||||
COPY api/scripts/multi-runner.sh ./
|
||||
|
||||
# this script runs both apiserver and passivetapper and exits either if one of them exits, preventing a scenario where the container runs without one process
|
||||
CMD "./multi-runner.sh"
|
||||
CMD "./multi-runner.sh"
|
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"mizuserver/pkg/inserter"
|
||||
"mizuserver/pkg/middleware"
|
||||
@@ -14,10 +15,13 @@ func main() {
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
go inserter.StartReadingFiles("/tmp/up9hars") // process to read files and insert to DB
|
||||
var harDir = flag.String("hardir", "input", "Directory in which we read har files from")
|
||||
flag.Parse()
|
||||
|
||||
go inserter.StartReadingFiles(*harDir) // process to read files and insert to DB
|
||||
|
||||
|
||||
middleware.FiberMiddleware(app) // Register Fiber's middleware for app.
|
||||
|
||||
app.Static("/", "./site")
|
||||
|
||||
//Simple route to know server is running
|
||||
|
@@ -41,6 +41,7 @@ func StartReadingFiles(workingDir string) {
|
||||
utils.CheckErr(decErr)
|
||||
|
||||
for _, entry := range inputHar.Log.Entries {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
SaveHarToDb(*entry, "")
|
||||
}
|
||||
rmErr := os.Remove(inputFilePath)
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this script runs both executables and exits everything if one fails
|
||||
./apiserver &
|
||||
./passivetapper -i eth0 &
|
||||
./apiserver -hardir /tmp/mizuhars &
|
||||
./passivetapper -i any -hardump -hardir /tmp/mizuhars -harentriesperfile 50 &
|
||||
wait -n
|
||||
pkill -P $$
|
||||
|
@@ -7,7 +7,7 @@
|
||||
| flag | default | purpose |
|
||||
|----------------------|------------------|--------------------------------------------------------------------------------------------------------------|
|
||||
| `--no-dashboard` | `false` | Don't host the dashboard (not applicable at the moment) |
|
||||
| `--dashboard-port` | `3000` | local port that dashboard will be forwarded to |
|
||||
| `--dashboard-port` | `8899` | local port that dashboard will be forwarded to |
|
||||
| `--namespace` | | use namespace different than the one found in kubeconfig |
|
||||
| `--kubeconfig` | | Path to custom kubeconfig file |
|
||||
|
||||
|
@@ -32,11 +32,11 @@ func init() {
|
||||
rootCmd.Flags().BoolVarP(&config.Configuration.DisplayVersion, "version", "v", false, "Print the version and exit")
|
||||
rootCmd.Flags().BoolVarP(&config.Configuration.Quiet, "quiet", "q", false, "No stdout output")
|
||||
rootCmd.Flags().BoolVarP(&config.Configuration.NoDashboard, "no-dashboard", "", false, "Dont host a dashboard")
|
||||
rootCmd.Flags().Uint16VarP(&config.Configuration.DashboardPort, "dashboard-port", "p", 3000, "Provide a custom port for the dashboard webserver")
|
||||
rootCmd.Flags().Uint16VarP(&config.Configuration.DashboardPort, "dashboard-port", "p", 8899, "Provide a custom port for the dashboard webserver")
|
||||
rootCmd.Flags().StringVarP(&config.Configuration.Namespace, "namespace", "n", "", "Namespace selector")
|
||||
rootCmd.Flags().BoolVarP(&config.Configuration.AllNamespaces, "all-namespaces", "A", false, "Select all namespaces")
|
||||
rootCmd.Flags().StringVarP(&config.Configuration.KubeConfigPath, "kubeconfig", "k", "", "Path to kubeconfig file")
|
||||
rootCmd.Flags().StringVarP(&config.Configuration.MizuImage, "mizu-image", "", "gcr.io/up9-docker-hub/mizu/develop/v1", "Custom image for mizu collector")
|
||||
rootCmd.Flags().StringVarP(&config.Configuration.MizuImage, "mizu-image", "", "gcr.io/up9-docker-hub/mizu/develop/v6", "Custom image for mizu collector")
|
||||
rootCmd.Flags().Uint16VarP(&config.Configuration.MizuPodPort, "mizu-port", "", 8899, "Port which mizu cli will attempt to forward from the mizu collector pod")
|
||||
}
|
||||
|
||||
|
@@ -9,10 +9,10 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
_ "k8s.io/client-go/tools/portforward"
|
||||
@@ -71,17 +71,28 @@ func (provider *Provider) GetPods(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (provider *Provider) CreatePod(ctx context.Context, podName string, podImage string) (*core.Pod, error) {
|
||||
privileged := true
|
||||
pod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: provider.Namespace,
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
HostNetwork: true, // very important to make passive tapper see traffic
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: podImage,
|
||||
ImagePullPolicy: core.PullAlways,
|
||||
SecurityContext: &core.SecurityContext{
|
||||
Privileged: &privileged, // must be privileged to get node level traffic
|
||||
},
|
||||
Env: []core.EnvVar{
|
||||
{
|
||||
Name: "HOST_MODE",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: new(int64),
|
||||
|
@@ -84,6 +84,8 @@ func (factory *tcpStreamFactory) WaitGoRoutines() {
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) shouldTap(dstIP string, dstPort int) bool {
|
||||
return true // TODO: this is only for checking it now
|
||||
|
||||
if hostMode {
|
||||
return inArrayString(hostAppAddresses, fmt.Sprintf("%s:%d", dstIP, dstPort))
|
||||
} else {
|
||||
|
Reference in New Issue
Block a user