mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-07-03 19:40:05 +00:00
extracted create and clean resources from tap runner (#557)
This commit is contained in:
parent
52ce6044ea
commit
fd97a09624
@ -1,6 +1,9 @@
|
||||
package cmd
|
||||
|
||||
import "github.com/up9inc/mizu/cli/apiserver"
|
||||
import (
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
)
|
||||
|
||||
func performCleanCommand() {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
@ -8,5 +11,5 @@ func performCleanCommand() {
|
||||
return
|
||||
}
|
||||
|
||||
finishMizuExecution(kubernetesProvider, apiserver.NewProvider(GetApiServerUrl(), apiserver.DefaultRetries, apiserver.DefaultTimeout))
|
||||
finishMizuExecution(kubernetesProvider, apiserver.NewProvider(GetApiServerUrl(), apiserver.DefaultRetries, apiserver.DefaultTimeout), config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace)
|
||||
}
|
||||
|
@ -7,11 +7,9 @@ import (
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
"github.com/up9inc/mizu/cli/mizu/fsUtils"
|
||||
"github.com/up9inc/mizu/cli/resources"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
@ -37,22 +35,6 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, cancel
|
||||
logger.Log.Debugf("proxy ended")
|
||||
}
|
||||
|
||||
func waitForFinish(ctx context.Context, cancel context.CancelFunc) {
|
||||
logger.Log.Debugf("waiting for finish...")
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
// block until ctx cancel is called or termination signal is received
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Log.Debugf("ctx done")
|
||||
break
|
||||
case <-sigChan:
|
||||
logger.Log.Debugf("Got termination signal, canceling execution...")
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath())
|
||||
if err != nil {
|
||||
@ -71,12 +53,12 @@ func handleKubernetesProviderError(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func finishMizuExecution(kubernetesProvider *kubernetes.Provider, apiProvider *apiserver.Provider) {
|
||||
func finishMizuExecution(kubernetesProvider *kubernetes.Provider, apiProvider *apiserver.Provider, isNsRestrictedMode bool, mizuResourcesNamespace string) {
|
||||
telemetry.ReportAPICalls(apiProvider)
|
||||
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
|
||||
defer cancel()
|
||||
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
|
||||
cleanUpMizuResources(removalCtx, cancel, kubernetesProvider)
|
||||
resources.CleanUpMizuResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, mizuResourcesNamespace)
|
||||
}
|
||||
|
||||
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {
|
||||
@ -89,23 +71,3 @@ func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
logger.Log.Errorf("Failed dump logs %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanUpMizuResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider) {
|
||||
logger.Log.Infof("\nRemoving mizu resources")
|
||||
|
||||
var leftoverResources []string
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider)
|
||||
} else {
|
||||
leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider)
|
||||
}
|
||||
|
||||
if len(leftoverResources) > 0 {
|
||||
errMsg := fmt.Sprintf("Failed to remove the following resources, for more info check logs at %s:", fsUtils.GetLogFilePath())
|
||||
for _, resource := range leftoverResources {
|
||||
errMsg += "\n- " + resource
|
||||
}
|
||||
logger.Log.Errorf(uiUtils.Error, errMsg)
|
||||
}
|
||||
}
|
||||
|
@ -4,26 +4,24 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/resources"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
"github.com/getkin/kin-openapi/openapi3"
|
||||
"gopkg.in/yaml.v3"
|
||||
core "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/cmd/goUtils"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
"github.com/up9inc/mizu/cli/mizu/fsUtils"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
@ -37,9 +35,6 @@ const cleanupTimeout = time.Minute
|
||||
type tapState struct {
|
||||
startTime time.Time
|
||||
targetNamespaces []string
|
||||
|
||||
apiServerService *core.Service
|
||||
tapperSyncer *kubernetes.MizuTapperSyncer
|
||||
mizuServiceAccountExists bool
|
||||
}
|
||||
|
||||
@ -131,7 +126,7 @@ func RunMizuTap() {
|
||||
}
|
||||
|
||||
logger.Log.Infof("Waiting for Mizu Agent to start...")
|
||||
if err := createMizuResources(ctx, cancel, kubernetesProvider, serializedValidationRules, serializedContract, serializedMizuConfig); err != nil {
|
||||
if state.mizuServiceAccountExists, err = resources.CreateMizuResources(ctx, cancel, kubernetesProvider, serializedValidationRules, serializedContract, serializedMizuConfig, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace, config.Config.Tap.DaemonMode, config.Config.AgentImage, getSyncEntriesConfig(), config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.ApiServerResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.NoPersistentVolumeClaim); err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err)))
|
||||
|
||||
var statusError *k8serrors.StatusError
|
||||
@ -143,24 +138,24 @@ func RunMizuTap() {
|
||||
return
|
||||
}
|
||||
if config.Config.Tap.DaemonMode {
|
||||
if err := handleDaemonModePostCreation(ctx, cancel, kubernetesProvider, state.targetNamespaces); err != nil {
|
||||
defer finishMizuExecution(kubernetesProvider, apiProvider)
|
||||
if err := handleDaemonModePostCreation(cancel, kubernetesProvider); err != nil {
|
||||
defer finishMizuExecution(kubernetesProvider, apiProvider, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace)
|
||||
cancel()
|
||||
} else {
|
||||
logger.Log.Infof(uiUtils.Magenta, "Mizu is now running in daemon mode, run `mizu view` to connect to the mizu daemon instance")
|
||||
}
|
||||
} else {
|
||||
defer finishMizuExecution(kubernetesProvider, apiProvider)
|
||||
defer finishMizuExecution(kubernetesProvider, apiProvider, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace)
|
||||
|
||||
go goUtils.HandleExcWrapper(watchApiServerEvents, ctx, kubernetesProvider, cancel)
|
||||
go goUtils.HandleExcWrapper(watchApiServerPod, ctx, kubernetesProvider, cancel)
|
||||
|
||||
// block until exit signal or error
|
||||
waitForFinish(ctx, cancel)
|
||||
utils.WaitForFinish(ctx, cancel)
|
||||
}
|
||||
}
|
||||
|
||||
func handleDaemonModePostCreation(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
|
||||
func handleDaemonModePostCreation(cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider) error {
|
||||
apiProvider := apiserver.NewProvider(GetApiServerUrl(), 90, 1*time.Second)
|
||||
|
||||
if err := waitForDaemonModeToBeReady(cancel, kubernetesProvider, apiProvider); err != nil {
|
||||
@ -252,8 +247,6 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
||||
}
|
||||
}()
|
||||
|
||||
state.tapperSyncer = tapperSyncer
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -287,137 +280,6 @@ func readValidationRules(file string) (string, error) {
|
||||
return string(newContent), nil
|
||||
}
|
||||
|
||||
func createMizuResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedContract string, serializedMizuConfig string) error {
|
||||
if !config.Config.IsNsRestrictedMode() {
|
||||
if err := createMizuNamespace(ctx, kubernetesProvider); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := createMizuConfigmap(ctx, kubernetesProvider, serializedValidationRules, serializedContract, serializedMizuConfig); err != nil {
|
||||
logger.Log.Warningf(uiUtils.Warning, fmt.Sprintf("Failed to create resources required for policy validation. Mizu will not validate policy rules. error: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
|
||||
var err error
|
||||
state.mizuServiceAccountExists, err = createRBACIfNecessary(ctx, kubernetesProvider)
|
||||
if err != nil {
|
||||
if !config.Config.Tap.DaemonMode {
|
||||
logger.Log.Warningf(uiUtils.Warning, fmt.Sprintf("Failed to ensure the resources required for IP resolving. Mizu will not resolve target IPs to names. error: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
}
|
||||
|
||||
var serviceAccountName string
|
||||
if state.mizuServiceAccountExists {
|
||||
serviceAccountName = kubernetes.ServiceAccountName
|
||||
} else {
|
||||
serviceAccountName = ""
|
||||
}
|
||||
|
||||
opts := &kubernetes.ApiServerOptions{
|
||||
Namespace: config.Config.MizuResourcesNamespace,
|
||||
PodName: kubernetes.ApiServerPodName,
|
||||
PodImage: config.Config.AgentImage,
|
||||
ServiceAccountName: serviceAccountName,
|
||||
IsNamespaceRestricted: config.Config.IsNsRestrictedMode(),
|
||||
SyncEntriesConfig: getSyncEntriesConfig(),
|
||||
MaxEntriesDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(),
|
||||
Resources: config.Config.Tap.ApiServerResources,
|
||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||
LogLevel: config.Config.LogLevel(),
|
||||
}
|
||||
|
||||
if config.Config.Tap.DaemonMode {
|
||||
if !state.mizuServiceAccountExists {
|
||||
defer cleanUpMizuResources(ctx, cancel, kubernetesProvider)
|
||||
logger.Log.Fatalf(uiUtils.Red, fmt.Sprintf("Failed to ensure the resources required for mizu to run in daemon mode. cannot proceed. error: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
if err := createMizuApiServerDeployment(ctx, kubernetesProvider, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := createMizuApiServerPod(ctx, kubernetesProvider, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
state.apiServerService, err = kubernetesProvider.CreateService(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName, kubernetes.ApiServerPodName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Log.Debugf("Successfully created service: %s", kubernetes.ApiServerPodName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createMizuConfigmap(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedContract string, serializedMizuConfig string) error {
|
||||
err := kubernetesProvider.CreateConfigMap(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName, serializedValidationRules, serializedContract, serializedMizuConfig)
|
||||
return err
|
||||
}
|
||||
|
||||
func createMizuNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider) error {
|
||||
_, err := kubernetesProvider.CreateNamespace(ctx, config.Config.MizuResourcesNamespace)
|
||||
return err
|
||||
}
|
||||
|
||||
func createMizuApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.ApiServerOptions) error {
|
||||
pod, err := kubernetesProvider.GetMizuApiServerPodObject(opts, false, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = kubernetesProvider.CreatePod(ctx, config.Config.MizuResourcesNamespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Log.Debugf("Successfully created API server pod: %s", kubernetes.ApiServerPodName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func createMizuApiServerDeployment(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.ApiServerOptions) error {
|
||||
volumeClaimCreated := false
|
||||
if !config.Config.Tap.NoPersistentVolumeClaim {
|
||||
volumeClaimCreated = TryToCreatePersistentVolumeClaim(ctx, kubernetesProvider)
|
||||
}
|
||||
|
||||
pod, err := kubernetesProvider.GetMizuApiServerPodObject(opts, volumeClaimCreated, kubernetes.PersistentVolumeClaimName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod.Spec.Containers[0].LivenessProbe = &core.Probe{
|
||||
Handler: core.Handler{
|
||||
HTTPGet: &core.HTTPGetAction{
|
||||
Path: "/echo",
|
||||
Port: intstr.FromInt(shared.DefaultApiServerPort),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 1,
|
||||
PeriodSeconds: 10,
|
||||
}
|
||||
if _, err = kubernetesProvider.CreateDeployment(ctx, config.Config.MizuResourcesNamespace, opts.PodName, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Log.Debugf("Successfully created API server deployment: %s", kubernetes.ApiServerPodName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TryToCreatePersistentVolumeClaim(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
isDefaultStorageClassAvailable, err := kubernetesProvider.IsDefaultStorageProviderAvailable(ctx)
|
||||
if err != nil {
|
||||
logger.Log.Warningf(uiUtils.Yellow, "An error occured when checking if a default storage provider exists in this cluster, this means mizu data will be lost on mizu-api-server pod restart")
|
||||
logger.Log.Debugf("error checking if default storage class exists: %v", err)
|
||||
return false
|
||||
} else if !isDefaultStorageClassAvailable {
|
||||
logger.Log.Warningf(uiUtils.Yellow, "Could not find default storage provider in this cluster, this means mizu data will be lost on mizu-api-server pod restart")
|
||||
return false
|
||||
}
|
||||
|
||||
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(ctx, config.Config.MizuResourcesNamespace, kubernetes.PersistentVolumeClaimName, config.Config.Tap.MaxEntriesDBSizeBytes()+mizu.DaemonModePersistentVolumeSizeBufferBytes); err != nil {
|
||||
logger.Log.Warningf(uiUtils.Yellow, "An error has occured while creating a persistent volume claim for mizu, this means mizu data will be lost on mizu-api-server pod restart")
|
||||
logger.Log.Debugf("error creating persistent volume claim: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getMizuApiFilteringOptions() (*api.TrafficFilteringOptions, error) {
|
||||
var compiledRegexSlice []*api.SerializableRegexp
|
||||
|
||||
@ -452,114 +314,6 @@ func getSyncEntriesConfig() *shared.SyncEntriesConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider) []string {
|
||||
leftoverResources := make([]string, 0)
|
||||
|
||||
if err := kubernetesProvider.RemoveService(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.ApiServerPodName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveDaemonSet(ctx, config.Config.MizuResourcesNamespace, kubernetes.TapperDaemonSetName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.TapperDaemonSetName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveConfigMap(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ConfigMap %s in namespace %s", kubernetes.ConfigMapName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveServicAccount(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Service Account %s in namespace %s", kubernetes.ServiceAccountName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveRole(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Role %s in namespace %s", kubernetes.RoleName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemovePod(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.ApiServerPodName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
//daemon mode resources
|
||||
if err := kubernetesProvider.RemoveRoleBinding(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleBindingName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", kubernetes.RoleBindingName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveDeployment(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Deployment %s in namespace %s", kubernetes.ApiServerPodName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemovePersistentVolumeClaim(ctx, config.Config.MizuResourcesNamespace, kubernetes.PersistentVolumeClaimName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("PersistentVolumeClaim %s in namespace %s", kubernetes.PersistentVolumeClaimName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveRole(ctx, config.Config.MizuResourcesNamespace, kubernetes.DaemonRoleName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Role %s in namespace %s", kubernetes.DaemonRoleName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveRoleBinding(ctx, config.Config.MizuResourcesNamespace, kubernetes.DaemonRoleBindingName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", kubernetes.DaemonRoleBindingName, config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
return leftoverResources
|
||||
}
|
||||
|
||||
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider) []string {
|
||||
leftoverResources := make([]string, 0)
|
||||
|
||||
if err := kubernetesProvider.RemoveNamespace(ctx, config.Config.MizuResourcesNamespace); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Namespace %s", config.Config.MizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveClusterRole(ctx, kubernetes.ClusterRoleName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ClusterRole %s", kubernetes.ClusterRoleName)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveClusterRoleBinding(ctx, kubernetes.ClusterRoleBindingName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ClusterRoleBinding %s", kubernetes.ClusterRoleBindingName)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
return leftoverResources
|
||||
}
|
||||
|
||||
func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) {
|
||||
logger.Log.Debugf("Error removing %s: %v", resourceDesc, errormessage.FormatError(err))
|
||||
*leftoverResources = append(*leftoverResources, resourceDesc)
|
||||
}
|
||||
|
||||
func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider) {
|
||||
// Call cancel if a terminating signal was received. Allows user to skip the wait.
|
||||
go func() {
|
||||
waitForFinish(ctx, cancel)
|
||||
}()
|
||||
|
||||
if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, config.Config.MizuResourcesNamespace); err != nil {
|
||||
switch {
|
||||
case ctx.Err() == context.Canceled:
|
||||
logger.Log.Debugf("Do nothing. User interrupted the wait")
|
||||
case err == wait.ErrWaitTimeout:
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Timeout while removing Namespace %s", config.Config.MizuResourcesNamespace))
|
||||
default:
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error while waiting for Namespace %s to be deleted: %v", config.Config.MizuResourcesNamespace, errormessage.FormatError(err)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.ApiServerPodName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
@ -705,21 +459,3 @@ func getNamespaces(kubernetesProvider *kubernetes.Provider) []string {
|
||||
return []string{currentNamespace}
|
||||
}
|
||||
}
|
||||
|
||||
func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider) (bool, error) {
|
||||
if !config.Config.IsNsRestrictedMode() {
|
||||
if err := kubernetesProvider.CreateMizuRBAC(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, mizu.RBACVersion); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
if err := kubernetesProvider.CreateMizuRBACNamespaceRestricted(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, mizu.RBACVersion); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if config.Config.Tap.DaemonMode {
|
||||
if err := kubernetesProvider.CreateDaemonsetRBAC(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.DaemonRoleName, kubernetes.DaemonRoleBindingName, mizu.RBACVersion); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
"net/http"
|
||||
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
@ -71,5 +72,5 @@ func runMizuView() {
|
||||
return
|
||||
}
|
||||
|
||||
waitForFinish(ctx, cancel)
|
||||
utils.WaitForFinish(ctx, cancel)
|
||||
}
|
||||
|
141
cli/resources/cleanResources.go
Normal file
141
cli/resources/cleanResources.go
Normal file
@ -0,0 +1,141 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/mizu/fsUtils"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
func CleanUpMizuResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, mizuResourcesNamespace string) {
|
||||
logger.Log.Infof("\nRemoving mizu resources")
|
||||
|
||||
var leftoverResources []string
|
||||
|
||||
if isNsRestrictedMode {
|
||||
leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, mizuResourcesNamespace)
|
||||
} else {
|
||||
leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, mizuResourcesNamespace)
|
||||
}
|
||||
|
||||
if len(leftoverResources) > 0 {
|
||||
errMsg := fmt.Sprintf("Failed to remove the following resources, for more info check logs at %s:", fsUtils.GetLogFilePath())
|
||||
for _, resource := range leftoverResources {
|
||||
errMsg += "\n- " + resource
|
||||
}
|
||||
logger.Log.Errorf(uiUtils.Error, errMsg)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, mizuResourcesNamespace string) []string {
|
||||
leftoverResources := make([]string, 0)
|
||||
|
||||
if err := kubernetesProvider.RemoveNamespace(ctx, mizuResourcesNamespace); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Namespace %s", mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, mizuResourcesNamespace)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveClusterRole(ctx, kubernetes.ClusterRoleName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ClusterRole %s", kubernetes.ClusterRoleName)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveClusterRoleBinding(ctx, kubernetes.ClusterRoleBindingName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ClusterRoleBinding %s", kubernetes.ClusterRoleBindingName)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
return leftoverResources
|
||||
}
|
||||
|
||||
func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, mizuResourcesNamespace string) {
|
||||
// Call cancel if a terminating signal was received. Allows user to skip the wait.
|
||||
go func() {
|
||||
utils.WaitForFinish(ctx, cancel)
|
||||
}()
|
||||
|
||||
if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, mizuResourcesNamespace); err != nil {
|
||||
switch {
|
||||
case ctx.Err() == context.Canceled:
|
||||
logger.Log.Debugf("Do nothing. User interrupted the wait")
|
||||
case err == wait.ErrWaitTimeout:
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Timeout while removing Namespace %s", mizuResourcesNamespace))
|
||||
default:
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error while waiting for Namespace %s to be deleted: %v", mizuResourcesNamespace, errormessage.FormatError(err)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, mizuResourcesNamespace string) []string {
|
||||
leftoverResources := make([]string, 0)
|
||||
|
||||
if err := kubernetesProvider.RemoveService(ctx, mizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.ApiServerPodName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveDaemonSet(ctx, mizuResourcesNamespace, kubernetes.TapperDaemonSetName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.TapperDaemonSetName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveConfigMap(ctx, mizuResourcesNamespace, kubernetes.ConfigMapName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ConfigMap %s in namespace %s", kubernetes.ConfigMapName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveServicAccount(ctx, mizuResourcesNamespace, kubernetes.ServiceAccountName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Service Account %s in namespace %s", kubernetes.ServiceAccountName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveRole(ctx, mizuResourcesNamespace, kubernetes.RoleName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Role %s in namespace %s", kubernetes.RoleName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemovePod(ctx, mizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.ApiServerPodName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
//install mode resources
|
||||
if err := kubernetesProvider.RemoveRoleBinding(ctx, mizuResourcesNamespace, kubernetes.RoleBindingName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", kubernetes.RoleBindingName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveDeployment(ctx, mizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Deployment %s in namespace %s", kubernetes.ApiServerPodName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemovePersistentVolumeClaim(ctx, mizuResourcesNamespace, kubernetes.PersistentVolumeClaimName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("PersistentVolumeClaim %s in namespace %s", kubernetes.PersistentVolumeClaimName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveRole(ctx, mizuResourcesNamespace, kubernetes.DaemonRoleName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Role %s in namespace %s", kubernetes.DaemonRoleName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveRoleBinding(ctx, mizuResourcesNamespace, kubernetes.DaemonRoleBindingName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", kubernetes.DaemonRoleBindingName, mizuResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
return leftoverResources
|
||||
}
|
||||
|
||||
func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) {
|
||||
logger.Log.Debugf("Error removing %s: %v", resourceDesc, errormessage.FormatError(err))
|
||||
*leftoverResources = append(*leftoverResources, resourceDesc)
|
||||
}
|
163
cli/resources/createResources.go
Normal file
163
cli/resources/createResources.go
Normal file
@ -0,0 +1,163 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/op/go-logging"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func CreateMizuResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedContract string, serializedMizuConfig string, isNsRestrictedMode bool, mizuResourcesNamespace string, isInstallMode bool, agentImage string, syncEntriesConfig *shared.SyncEntriesConfig, maxEntriesDBSizeBytes int64, apiServerResources shared.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, noPersistentVolumeClaim bool) (bool, error) {
|
||||
if !isNsRestrictedMode {
|
||||
if err := createMizuNamespace(ctx, kubernetesProvider, mizuResourcesNamespace); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := createMizuConfigmap(ctx, kubernetesProvider, serializedValidationRules, serializedContract, serializedMizuConfig, mizuResourcesNamespace); err != nil {
|
||||
logger.Log.Warningf(uiUtils.Warning, fmt.Sprintf("Failed to create resources required for policy validation. Mizu will not validate policy rules. error: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
|
||||
mizuServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, mizuResourcesNamespace, isInstallMode)
|
||||
if err != nil {
|
||||
if !isInstallMode {
|
||||
logger.Log.Warningf(uiUtils.Warning, fmt.Sprintf("Failed to ensure the resources required for IP resolving. Mizu will not resolve target IPs to names. error: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
}
|
||||
|
||||
var serviceAccountName string
|
||||
if mizuServiceAccountExists {
|
||||
serviceAccountName = kubernetes.ServiceAccountName
|
||||
} else {
|
||||
serviceAccountName = ""
|
||||
}
|
||||
|
||||
opts := &kubernetes.ApiServerOptions{
|
||||
Namespace: mizuResourcesNamespace,
|
||||
PodName: kubernetes.ApiServerPodName,
|
||||
PodImage: agentImage,
|
||||
ServiceAccountName: serviceAccountName,
|
||||
IsNamespaceRestricted: isNsRestrictedMode,
|
||||
SyncEntriesConfig: syncEntriesConfig,
|
||||
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
|
||||
Resources: apiServerResources,
|
||||
ImagePullPolicy: imagePullPolicy,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
|
||||
if isInstallMode {
|
||||
if !mizuServiceAccountExists {
|
||||
defer CleanUpMizuResources(ctx, cancel, kubernetesProvider, isNsRestrictedMode, mizuResourcesNamespace)
|
||||
logger.Log.Fatalf(uiUtils.Red, fmt.Sprintf("Failed to ensure the resources required for mizu to run in daemon mode. cannot proceed. error: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
if err := createMizuApiServerDeployment(ctx, kubernetesProvider, opts, noPersistentVolumeClaim); err != nil {
|
||||
return mizuServiceAccountExists, err
|
||||
}
|
||||
} else {
|
||||
if err := createMizuApiServerPod(ctx, kubernetesProvider, opts); err != nil {
|
||||
return mizuServiceAccountExists, err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = kubernetesProvider.CreateService(ctx, mizuResourcesNamespace, kubernetes.ApiServerPodName, kubernetes.ApiServerPodName)
|
||||
if err != nil {
|
||||
return mizuServiceAccountExists, err
|
||||
}
|
||||
logger.Log.Debugf("Successfully created service: %s", kubernetes.ApiServerPodName)
|
||||
|
||||
return mizuServiceAccountExists, nil
|
||||
}
|
||||
|
||||
func createMizuNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, mizuResourcesNamespace string) error {
|
||||
_, err := kubernetesProvider.CreateNamespace(ctx, mizuResourcesNamespace)
|
||||
return err
|
||||
}
|
||||
|
||||
func createMizuConfigmap(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedContract string, serializedMizuConfig string, mizuResourcesNamespace string) error {
|
||||
err := kubernetesProvider.CreateConfigMap(ctx, mizuResourcesNamespace, kubernetes.ConfigMapName, serializedValidationRules, serializedContract, serializedMizuConfig)
|
||||
return err
|
||||
}
|
||||
|
||||
func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, mizuResourcesNamespace string, isInstallMode bool) (bool, error) {
|
||||
if !isNsRestrictedMode {
|
||||
if err := kubernetesProvider.CreateMizuRBAC(ctx, mizuResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, mizu.RBACVersion); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
if err := kubernetesProvider.CreateMizuRBACNamespaceRestricted(ctx, mizuResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, mizu.RBACVersion); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if isInstallMode {
|
||||
if err := kubernetesProvider.CreateDaemonsetRBAC(ctx, mizuResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.DaemonRoleName, kubernetes.DaemonRoleBindingName, mizu.RBACVersion); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func createMizuApiServerDeployment(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.ApiServerOptions, noPersistentVolumeClaim bool) error {
|
||||
volumeClaimCreated := false
|
||||
if !noPersistentVolumeClaim {
|
||||
volumeClaimCreated = tryToCreatePersistentVolumeClaim(ctx, kubernetesProvider, opts)
|
||||
}
|
||||
|
||||
pod, err := kubernetesProvider.GetMizuApiServerPodObject(opts, volumeClaimCreated, kubernetes.PersistentVolumeClaimName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod.Spec.Containers[0].LivenessProbe = &core.Probe{
|
||||
Handler: core.Handler{
|
||||
HTTPGet: &core.HTTPGetAction{
|
||||
Path: "/echo",
|
||||
Port: intstr.FromInt(shared.DefaultApiServerPort),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 1,
|
||||
PeriodSeconds: 10,
|
||||
}
|
||||
if _, err = kubernetesProvider.CreateDeployment(ctx, opts.Namespace, opts.PodName, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Log.Debugf("Successfully created API server deployment: %s", kubernetes.ApiServerPodName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func tryToCreatePersistentVolumeClaim(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.ApiServerOptions) bool {
|
||||
isDefaultStorageClassAvailable, err := kubernetesProvider.IsDefaultStorageProviderAvailable(ctx)
|
||||
if err != nil {
|
||||
logger.Log.Warningf(uiUtils.Yellow, "An error occured when checking if a default storage provider exists in this cluster, this means mizu data will be lost on mizu-api-server pod restart")
|
||||
logger.Log.Debugf("error checking if default storage class exists: %v", err)
|
||||
return false
|
||||
} else if !isDefaultStorageClassAvailable {
|
||||
logger.Log.Warningf(uiUtils.Yellow, "Could not find default storage provider in this cluster, this means mizu data will be lost on mizu-api-server pod restart")
|
||||
return false
|
||||
}
|
||||
|
||||
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(ctx, opts.Namespace, kubernetes.PersistentVolumeClaimName, opts.MaxEntriesDBSizeBytes + mizu.DaemonModePersistentVolumeSizeBufferBytes); err != nil {
|
||||
logger.Log.Warningf(uiUtils.Yellow, "An error has occured while creating a persistent volume claim for mizu, this means mizu data will be lost on mizu-api-server pod restart")
|
||||
logger.Log.Debugf("error creating persistent volume claim: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func createMizuApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.ApiServerOptions) error {
|
||||
pod, err := kubernetesProvider.GetMizuApiServerPodObject(opts, false, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Log.Debugf("Successfully created API server pod: %s", kubernetes.ApiServerPodName)
|
||||
return nil
|
||||
}
|
25
cli/utils/waitUtils.go
Normal file
25
cli/utils/waitUtils.go
Normal file
@ -0,0 +1,25 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func WaitForFinish(ctx context.Context, cancel context.CancelFunc) {
|
||||
logger.Log.Debugf("waiting for finish...")
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
// block until ctx cancel is called or termination signal is received
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Log.Debugf("ctx done")
|
||||
break
|
||||
case <-sigChan:
|
||||
logger.Log.Debugf("Got termination signal, canceling execution...")
|
||||
cancel()
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user