From c64a19f1d495d7840f0f775772e537459693af41 Mon Sep 17 00:00:00 2001 From: mudler Date: Thu, 1 Dec 2022 17:44:19 +0100 Subject: [PATCH 1/8] :seedling: Refactor roles Signed-off-by: mudler Signed-off-by: Ettore Di Giacinto --- internal/provider/bootstrap.go | 6 ++++-- internal/role/auto.go | 1 + internal/role/common.go | 16 ++++++++++++++++ internal/role/{ => edgevpn}/master.go | 7 ++++--- internal/role/{ => edgevpn}/worker.go | 7 ++++--- internal/role/schedule.go | 14 +------------- 6 files changed, 30 insertions(+), 21 deletions(-) rename internal/role/{ => edgevpn}/master.go (94%) rename internal/role/{ => edgevpn}/worker.go (92%) diff --git a/internal/provider/bootstrap.go b/internal/provider/bootstrap.go index 96eed10..8c6ac3d 100644 --- a/internal/provider/bootstrap.go +++ b/internal/provider/bootstrap.go @@ -19,6 +19,8 @@ import ( sdk "github.com/kairos-io/kairos/sdk/bus" providerConfig "github.com/kairos-io/provider-kairos/internal/provider/config" "github.com/kairos-io/provider-kairos/internal/role" + edgevpn "github.com/kairos-io/provider-kairos/internal/role/edgevpn" + "github.com/kairos-io/provider-kairos/internal/services" "github.com/kairos-io/kairos/pkg/config" @@ -120,11 +122,11 @@ func Bootstrap(e *pluggable.Event) pluggable.EventResponse { service.WithRoles( service.RoleKey{ Role: "master", - RoleHandler: role.Master(c, providerConfig), + RoleHandler: edgevpn.Master(c, providerConfig), }, service.RoleKey{ Role: "worker", - RoleHandler: role.Worker(c, providerConfig), + RoleHandler: edgevpn.Worker(c, providerConfig), }, service.RoleKey{ Role: "auto", diff --git a/internal/role/auto.go b/internal/role/auto.go index 263177c..9ab4758 100644 --- a/internal/role/auto.go +++ b/internal/role/auto.go @@ -17,6 +17,7 @@ func contains(slice []string, elem string) bool { } return false } + func Auto(cc *config.Config, pconfig *providerConfig.Config) Role { return func(c *service.RoleConfig) error { advertizing, _ := c.Client.AdvertizingNodes() diff --git a/internal/role/common.go b/internal/role/common.go index 4207509..dc08b94 100644 --- a/internal/role/common.go +++ b/internal/role/common.go @@ -19,3 +19,19 @@ func SentinelExist() bool { func CreateSentinel() error { return ioutil.WriteFile("/usr/local/.kairos/deployed", []byte{}, os.ModePerm) } + +func getRoles(client *service.Client, nodes []string) ([]string, map[string]string, bool) { + unassignedNodes := []string{} + currentRoles := map[string]string{} + existsMaster := false + for _, a := range nodes { + role, _ := client.Get("role", a) + currentRoles[a] = role + if role == "master" { + existsMaster = true + } else if role == "" { + unassignedNodes = append(unassignedNodes, a) + } + } + return unassignedNodes, currentRoles, existsMaster +} diff --git a/internal/role/master.go b/internal/role/edgevpn/master.go similarity index 94% rename from internal/role/master.go rename to internal/role/edgevpn/master.go index d6c9b4b..c4c75fb 100644 --- a/internal/role/master.go +++ b/internal/role/edgevpn/master.go @@ -13,6 +13,7 @@ import ( "github.com/kairos-io/kairos/pkg/utils" providerConfig "github.com/kairos-io/provider-kairos/internal/provider/config" + "github.com/kairos-io/provider-kairos/internal/role" service "github.com/mudler/edgevpn/api/client/service" ) @@ -67,7 +68,7 @@ func propagateMasterData(ip string, c *service.RoleConfig) error { return nil } -func Master(cc *config.Config, pconfig *providerConfig.Config) Role { +func Master(cc *config.Config, pconfig *providerConfig.Config) role.Role { return func(c *service.RoleConfig) error { ip := utils.GetInterfaceIP("edgevpn0") @@ -83,7 +84,7 @@ func Master(cc *config.Config, pconfig *providerConfig.Config) Role { } } - if SentinelExist() { + if role.SentinelExist() { c.Logger.Info("Node already configured, backing off") return propagateMasterData(ip, c) } @@ -145,7 +146,7 @@ func Master(cc *config.Config, pconfig *providerConfig.Config) Role { return err } - return CreateSentinel() + return role.CreateSentinel() } } diff --git a/internal/role/worker.go b/internal/role/edgevpn/worker.go similarity index 92% rename from internal/role/worker.go rename to internal/role/edgevpn/worker.go index 3ea526a..3665eda 100644 --- a/internal/role/worker.go +++ b/internal/role/edgevpn/worker.go @@ -10,10 +10,11 @@ import ( "github.com/kairos-io/kairos/pkg/utils" providerConfig "github.com/kairos-io/provider-kairos/internal/provider/config" + "github.com/kairos-io/provider-kairos/internal/role" service "github.com/mudler/edgevpn/api/client/service" ) -func Worker(cc *config.Config, pconfig *providerConfig.Config) Role { +func Worker(cc *config.Config, pconfig *providerConfig.Config) role.Role { return func(c *service.RoleConfig) error { if pconfig.Kairos.Role != "" { @@ -24,7 +25,7 @@ func Worker(cc *config.Config, pconfig *providerConfig.Config) Role { } } - if SentinelExist() { + if role.SentinelExist() { c.Logger.Info("Node already configured, backing off") return nil } @@ -108,6 +109,6 @@ func Worker(cc *config.Config, pconfig *providerConfig.Config) Role { return err } - return CreateSentinel() + return role.CreateSentinel() } } diff --git a/internal/role/schedule.go b/internal/role/schedule.go index 38e30df..00d1aa2 100644 --- a/internal/role/schedule.go +++ b/internal/role/schedule.go @@ -16,19 +16,7 @@ func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pco rand.Seed(time.Now().Unix()) // Assign roles to nodes - currentRoles := map[string]string{} - - existsMaster := false - unassignedNodes := []string{} - for _, a := range nodes { - role, _ := c.Client.Get("role", a) - currentRoles[a] = role - if role == "master" { - existsMaster = true - } else if role == "" { - unassignedNodes = append(unassignedNodes, a) - } - } + unassignedNodes, currentRoles, existsMaster := getRoles(c.Client, nodes) c.Logger.Infof("I'm the leader. My UUID is: %s.\n Current assigned roles: %+v", c.UUID, currentRoles) c.Logger.Infof("Master already present: %t", existsMaster) From 0517b1e76663f84ede806056ef61145100276918 Mon Sep 17 00:00:00 2001 From: mudler Date: Thu, 1 Dec 2022 18:14:05 +0100 Subject: [PATCH 2/8] :sparkles: Add kubeVIP support with p2p hybrid mode In this way, the p2p API will just run the co-ordination to setup KubeVIP automatically to the new cluster. Signed-off-by: Ettore Di Giacinto --- Earthfile | 2 +- internal/provider/bootstrap.go | 21 +++++-- internal/provider/config/config.go | 9 +++ internal/provider/{vpn.go => p2p.go} | 46 +++++++++++++++ internal/role/common.go | 9 +-- internal/role/p2p/common.go | 25 ++++++++ internal/role/p2p/kubevip.go | 74 ++++++++++++++++++++++++ internal/role/{edgevpn => p2p}/master.go | 22 +++++-- internal/role/{edgevpn => p2p}/worker.go | 0 internal/role/schedule.go | 72 ++++++++++++++++++++--- internal/services/edgevpn.go | 45 ++++++++++++++ 11 files changed, 301 insertions(+), 24 deletions(-) rename internal/provider/{vpn.go => p2p.go} (69%) create mode 100644 internal/role/p2p/common.go create mode 100644 internal/role/p2p/kubevip.go rename internal/role/{edgevpn => p2p}/master.go (87%) rename internal/role/{edgevpn => p2p}/worker.go (100%) diff --git a/Earthfile b/Earthfile index 23187a8..b1bcf96 100644 --- a/Earthfile +++ b/Earthfile @@ -124,7 +124,7 @@ docker: && INSTALL_K3S_SKIP_START="true" INSTALL_K3S_SKIP_ENABLE="true" INSTALL_K3S_SKIP_SELINUX_RPM="true" bash installer.sh \ && INSTALL_K3S_SKIP_START="true" INSTALL_K3S_SKIP_ENABLE="true" INSTALL_K3S_SKIP_SELINUX_RPM="true" bash installer.sh agent \ && rm -rf installer.sh - RUN luet install -y utils/edgevpn utils/k9s utils/nerdctl container/kubectl && luet cleanup + RUN luet install -y utils/edgevpn utils/k9s utils/nerdctl container/kubectl utils/kube-vip && luet cleanup # Drop env files from k3s as we will generate them IF [ -e "/etc/rancher/k3s/k3s.env" ] RUN rm -rf /etc/rancher/k3s/k3s.env /etc/rancher/k3s/k3s-agent.env && touch /etc/rancher/k3s/.keep diff --git a/internal/provider/bootstrap.go b/internal/provider/bootstrap.go index 8c6ac3d..0c5f09c 100644 --- a/internal/provider/bootstrap.go +++ b/internal/provider/bootstrap.go @@ -19,7 +19,7 @@ import ( sdk "github.com/kairos-io/kairos/sdk/bus" providerConfig "github.com/kairos-io/provider-kairos/internal/provider/config" "github.com/kairos-io/provider-kairos/internal/role" - edgevpn "github.com/kairos-io/provider-kairos/internal/role/edgevpn" + p2p "github.com/kairos-io/provider-kairos/internal/role/p2p" "github.com/kairos-io/provider-kairos/internal/services" @@ -97,9 +97,18 @@ func Bootstrap(e *pluggable.Event) pluggable.EventResponse { return ErrorEvent("No network token provided, exiting") } - logger.Info("Configuring VPN") - if err := SetupVPN(services.EdgeVPNDefaultInstance, cfg.APIAddress, "/", true, providerConfig); err != nil { - return ErrorEvent("Failed setup VPN: %s", err.Error()) + if !providerConfig.Kairos.Hybrid { + logger.Info("Configuring VPN") + if err := SetupVPN(services.EdgeVPNDefaultInstance, cfg.APIAddress, "/", true, providerConfig); err != nil { + return ErrorEvent("Failed setup VPN: %s", err.Error()) + } + + } else { + logger.Info("Configuring API") + if err := SetupAPI(cfg.APIAddress, "/", true, providerConfig); err != nil { + return ErrorEvent("Failed setup VPN: %s", err.Error()) + } + } networkID := "kairos" @@ -122,11 +131,11 @@ func Bootstrap(e *pluggable.Event) pluggable.EventResponse { service.WithRoles( service.RoleKey{ Role: "master", - RoleHandler: edgevpn.Master(c, providerConfig), + RoleHandler: p2p.Master(c, providerConfig), }, service.RoleKey{ Role: "worker", - RoleHandler: edgevpn.Worker(c, providerConfig), + RoleHandler: p2p.Worker(c, providerConfig), }, service.RoleKey{ Role: "auto", diff --git a/internal/provider/config/config.go b/internal/provider/config/config.go index 313f067..16d71ed 100644 --- a/internal/provider/config/config.go +++ b/internal/provider/config/config.go @@ -6,6 +6,7 @@ type Kairos struct { Role string `yaml:"role,omitempty"` DNS bool `yaml:"dns,omitempty"` LogLevel string `yaml:"loglevel,omitempty"` + Hybrid bool `yaml:"hybrid,omitempty"` } type Config struct { @@ -13,6 +14,14 @@ type Config struct { K3sAgent K3s `yaml:"k3s-agent,omitempty"` K3s K3s `yaml:"k3s,omitempty"` VPN map[string]string `yaml:"vpn,omitempty"` + KubeVIP KubeVIP `yaml:"kubevip,omitempty"` +} + +type KubeVIP struct { + Args []string `yaml:"args,omitempty"` + EIP string `yaml:"eip,omitempty"` + ManifestURL string `yaml:"manifest_url,omitempty"` + Interface string `yaml:"interface,omitempty"` } type K3s struct { diff --git a/internal/provider/vpn.go b/internal/provider/p2p.go similarity index 69% rename from internal/provider/vpn.go rename to internal/provider/p2p.go index 29dc4f4..7a7f91c 100644 --- a/internal/provider/vpn.go +++ b/internal/provider/p2p.go @@ -30,6 +30,52 @@ func SaveOEMCloudConfig(name string, yc yip.YipConfig) error { func SaveCloudConfig(name string, c []byte) error { return ioutil.WriteFile(filepath.Join("oem", fmt.Sprintf("%s.yaml", name)), c, 0700) } + +func SetupAPI(apiAddress, rootDir string, start bool, c *providerConfig.Config) error { + if c.Kairos == nil || c.Kairos.NetworkToken == "" { + return fmt.Errorf("no network token defined") + } + + svc, err := services.P2PAPI(rootDir) + if err != nil { + return fmt.Errorf("could not create svc: %w", err) + } + + apiAddress = strings.ReplaceAll(apiAddress, "https://", "") + apiAddress = strings.ReplaceAll(apiAddress, "http://", "") + + vpnOpts := map[string]string{ + "EDGEVPNTOKEN": c.Kairos.NetworkToken, + "APILISTEN": apiAddress, + } + // Override opts with user-supplied + for k, v := range c.VPN { + vpnOpts[k] = v + } + + os.MkdirAll("/etc/systemd/system.conf.d/", 0600) //nolint:errcheck + // Setup edgevpn instance + err = utils.WriteEnv(filepath.Join(rootDir, "/etc/systemd/system.conf.d/edgevpn-kairos.env"), vpnOpts) + if err != nil { + return fmt.Errorf("could not create write env file: %w", err) + } + + err = svc.WriteUnit() + if err != nil { + return fmt.Errorf("could not create write unit file: %w", err) + } + + if start { + err = svc.Start() + if err != nil { + return fmt.Errorf("could not start svc: %w", err) + } + + return svc.Enable() + } + return nil +} + func SetupVPN(instance, apiAddress, rootDir string, start bool, c *providerConfig.Config) error { if c.Kairos == nil || c.Kairos.NetworkToken == "" { diff --git a/internal/role/common.go b/internal/role/common.go index dc08b94..4a70d6b 100644 --- a/internal/role/common.go +++ b/internal/role/common.go @@ -20,18 +20,15 @@ func CreateSentinel() error { return ioutil.WriteFile("/usr/local/.kairos/deployed", []byte{}, os.ModePerm) } -func getRoles(client *service.Client, nodes []string) ([]string, map[string]string, bool) { +func getRoles(client *service.Client, nodes []string) ([]string, map[string]string) { unassignedNodes := []string{} currentRoles := map[string]string{} - existsMaster := false for _, a := range nodes { role, _ := client.Get("role", a) currentRoles[a] = role - if role == "master" { - existsMaster = true - } else if role == "" { + if role == "" { unassignedNodes = append(unassignedNodes, a) } } - return unassignedNodes, currentRoles, existsMaster + return unassignedNodes, currentRoles } diff --git a/internal/role/p2p/common.go b/internal/role/p2p/common.go new file mode 100644 index 0000000..4f21332 --- /dev/null +++ b/internal/role/p2p/common.go @@ -0,0 +1,25 @@ +package role + +import ( + "fmt" + "net" + + providerConfig "github.com/kairos-io/provider-kairos/internal/provider/config" +) + +func guessInterface(pconfig *providerConfig.Config) string { + if pconfig.KubeVIP.Interface != "" { + return pconfig.KubeVIP.Interface + } + ifaces, err := net.Interfaces() + if err != nil { + fmt.Println("failed getting system interfaces") + return "" + } + for _, i := range ifaces { + if i.Name != "lo" { + return i.Name + } + } + return "" +} diff --git a/internal/role/p2p/kubevip.go b/internal/role/p2p/kubevip.go new file mode 100644 index 0000000..3337cd9 --- /dev/null +++ b/internal/role/p2p/kubevip.go @@ -0,0 +1,74 @@ +package role + +import ( + "fmt" + "io" + "log" + "net/http" + "os" + "strings" + + "github.com/kairos-io/kairos/pkg/utils" + providerConfig "github.com/kairos-io/provider-kairos/internal/provider/config" +) + +func generateKubeVIP(iface, ip string, args []string) (string, error) { + out, err := utils.SH(fmt.Sprintf("kube-vip manifest daemonset --interface %s --address %s --inCluster --taint --controlplane --arp --leaderElection %s", iface, ip, strings.Join(args, " "))) + + if err != nil { + return "", fmt.Errorf("error: %w - %s", err, out) + } + + return out, nil +} + +func downloadFromUrl(url, where string) error { + output, err := os.Create(where) + if err != nil { + return err + } + defer output.Close() + + response, err := http.Get(url) + if err != nil { + return err + + } + defer response.Body.Close() + + _, err = io.Copy(output, response.Body) + return err +} + +func deployKubeVIP(iface, ip string, pconfig *providerConfig.Config) error { + os.MkdirAll("/var/lib/rancher/k3s/server/manifests/", 0650) + + targetFile := "/var/lib/rancher/k3s/server/manifests/kubevip.yaml" + targetCRDFile := "/var/lib/rancher/k3s/server/manifests/kubevipmanifest.yaml" + + manifestUrl := "https://kube-vip.io/manifests/rbac.yaml" + + if pconfig.KubeVIP.ManifestURL != "" { + manifestUrl = pconfig.KubeVIP.ManifestURL + } + + err := downloadFromUrl(manifestUrl, targetCRDFile) + if err != nil { + return err + } + + content, err := generateKubeVIP(iface, ip, pconfig.KubeVIP.Args) + if err == nil { + f, err := os.OpenFile(targetFile, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Println(err) + } + defer f.Close() + if _, err := f.WriteString("\n" + content); err != nil { + log.Println(err) + } + } + + return err +} diff --git a/internal/role/edgevpn/master.go b/internal/role/p2p/master.go similarity index 87% rename from internal/role/edgevpn/master.go rename to internal/role/p2p/master.go index c4c75fb..d9ea410 100644 --- a/internal/role/edgevpn/master.go +++ b/internal/role/p2p/master.go @@ -71,9 +71,16 @@ func propagateMasterData(ip string, c *service.RoleConfig) error { func Master(cc *config.Config, pconfig *providerConfig.Config) role.Role { return func(c *service.RoleConfig) error { - ip := utils.GetInterfaceIP("edgevpn0") - if ip == "" { - return errors.New("node doesn't have an ip yet") + var ip string + iface := guessInterface(pconfig) + ifaceIP := utils.GetInterfaceIP(iface) + if pconfig.Kairos.Hybrid { + ip = pconfig.KubeVIP.EIP + } else { + ip = utils.GetInterfaceIP("edgevpn0") + if ip == "" { + return errors.New("node doesn't have an ip yet") + } } if pconfig.Kairos.Role != "" { @@ -118,7 +125,14 @@ func Master(cc *config.Config, pconfig *providerConfig.Config) role.Role { return err } - args := []string{"--flannel-iface=edgevpn0"} + var args []string + if pconfig.Kairos.Hybrid { + args = []string{fmt.Sprintf("--tls-san=%s", ip), fmt.Sprintf("--node-ip=%s", ifaceIP)} + deployKubeVIP(iface, ip, pconfig) + } else { + args = []string{"--flannel-iface=edgevpn0"} + } + if k3sConfig.ReplaceArgs { args = k3sConfig.Args } else { diff --git a/internal/role/edgevpn/worker.go b/internal/role/p2p/worker.go similarity index 100% rename from internal/role/edgevpn/worker.go rename to internal/role/p2p/worker.go diff --git a/internal/role/schedule.go b/internal/role/schedule.go index 00d1aa2..d419ff2 100644 --- a/internal/role/schedule.go +++ b/internal/role/schedule.go @@ -10,18 +10,72 @@ import ( service "github.com/mudler/edgevpn/api/client/service" ) +// func assignIPs(cidr string, client *service.Client, nodes []string, pconfig *providerConfig.Config) (error, bool) { +// address, _, err := net.ParseCIDR(cidr) +// if err != nil { +// return err, false +// } + +// currentIPS := []string{} +// toAssign := []string{} +// for _, a := range nodes { +// ip, _ := client.Get("ip", a) +// if ip != "" { +// currentIPS = append(currentIPS, ip) +// } else { +// toAssign = append(toAssign, a) +// } +// } + +// if len(toAssign) == 0 { +// return nil, false +// } + +// ip := utils.NextIP(address.String(), currentIPS) +// if err := client.Set("ip", toAssign[0], ip); err != nil { +// return err, false +// } + +// return nil, len(toAssign) != 0 +// } + // scheduleRoles assigns roles to nodes. Meant to be called only by leaders // TODO: HA-Auto. func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pconfig *providerConfig.Config) error { rand.Seed(time.Now().Unix()) // Assign roles to nodes - unassignedNodes, currentRoles, existsMaster := getRoles(c.Client, nodes) - + unassignedNodes, currentRoles := getRoles(c.Client, nodes) c.Logger.Infof("I'm the leader. My UUID is: %s.\n Current assigned roles: %+v", c.UUID, currentRoles) + + existsMaster := false + + masterRole := "master" + workerRole := "worker" + + if pconfig.Kairos.Hybrid { + // err, reschedule := assignIPs(pconfig.KubeVIP.CIDR, c.Client, nodes, pconfig) + // if reschedule { + // return fmt.Errorf("asked to reschedule") + // } + // if err != nil { + // return err + // } + // ip, _ := c.Client.Get("ip", c.UUID) + // c.Logger.Infof("KubeVIP IP: %+v", ip) + c.Logger.Info("hybrid p2p enabled") + // masterRole = "kubevip/master" + // workerRole = "kubevip/worker" + } + + for _, r := range currentRoles { + if r == masterRole { + existsMaster = true + } + } c.Logger.Infof("Master already present: %t", existsMaster) c.Logger.Infof("Unassigned nodes: %+v", unassignedNodes) - + selectedMaster := "" if !existsMaster && len(unassignedNodes) > 0 { var selected string toSelect := unassignedNodes @@ -43,11 +97,12 @@ func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pco selected = toSelect[rand.Intn(len(toSelect)-1)] } - if err := c.Client.Set("role", selected, "master"); err != nil { + if err := c.Client.Set("role", selected, masterRole); err != nil { return err } c.Logger.Info("-> Set master to", selected) - currentRoles[selected] = "master" + currentRoles[selected] = masterRole + selectedMaster = selected // Return here, so next time we get called // makes sure master is set. return nil @@ -55,11 +110,14 @@ func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pco // cycle all empty roles and assign worker roles for _, uuid := range unassignedNodes { - if err := c.Client.Set("role", uuid, "worker"); err != nil { + if selectedMaster == uuid { + continue + } + if err := c.Client.Set("role", uuid, workerRole); err != nil { c.Logger.Error(err) return err } - c.Logger.Info("-> Set worker to", uuid) + c.Logger.Infof("-> Set %s to %s", workerRole, uuid) } c.Logger.Info("Done scheduling") diff --git a/internal/services/edgevpn.go b/internal/services/edgevpn.go index c96ab5d..ff08a89 100644 --- a/internal/services/edgevpn.go +++ b/internal/services/edgevpn.go @@ -26,6 +26,35 @@ if [ -f /etc/environment ]; then source /etc/environment; fi if [ -f /etc/systemd/system.conf.d/edgevpn-kairos.env ]; then source /etc/systemd/system.conf.d/edgevpn-kairos.env; fi set +o allexport` +const edgevpnAPIOpenRC string = `#!/sbin/openrc-run + +depend() { + after net + provide edgevpn +} + +supervisor=supervise-daemon +name="edgevpn" +command="edgevpn api --enable-healthchecks" +supervise_daemon_args="--stdout /var/log/edgevpn.log --stderr /var/log/edgevpn.log" +pidfile="/run/edgevpn.pid" +respawn_delay=5 +set -o allexport +if [ -f /etc/environment ]; then source /etc/environment; fi +if [ -f /etc/systemd/system.conf.d/edgevpn-kairos.env ]; then source /etc/systemd/system.conf.d/edgevpn-kairos.env; fi +set +o allexport` + +const edgevpnAPISystemd string = `[Unit] +Description=P2P API Daemon +After=network.target +[Service] +EnvironmentFile=/etc/systemd/system.conf.d/edgevpn-kairos.env +LimitNOFILE=49152 +ExecStart=edgevpn api --enable-healthchecks +Restart=always +[Install] +WantedBy=multi-user.target` + const edgevpnSystemd string = `[Unit] Description=EdgeVPN Daemon After=network.target @@ -55,3 +84,19 @@ func EdgeVPN(instance, rootDir string) (machine.Service, error) { systemd.WithRoot(rootDir), ) } + +func P2PAPI(rootDir string) (machine.Service, error) { + if utils.IsOpenRCBased() { + return openrc.NewService( + openrc.WithName("edgevpn"), + openrc.WithUnitContent(edgevpnAPIOpenRC), + openrc.WithRoot(rootDir), + ) + } + + return systemd.NewService( + systemd.WithName("edgevpn"), + systemd.WithUnitContent(edgevpnAPISystemd), + systemd.WithRoot(rootDir), + ) +} From f67f2ce284ec855b0a0a5d080076d912896c5614 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 3 Dec 2022 18:41:41 +0100 Subject: [PATCH 3/8] :arrow_up: Bump kairos core Signed-off-by: Ettore Di Giacinto --- go.mod | 8 ++++---- go.sum | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index bb99160..81b1b1b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kairos-io/provider-kairos -go 1.19 +go 1.18 replace github.com/elastic/gosigar => github.com/mudler/gosigar v0.14.3-0.20220502202347-34be910bdaaf @@ -9,7 +9,7 @@ require ( github.com/gliderlabs/ssh v0.2.2 github.com/google/go-containerregistry v0.11.0 github.com/ipfs/go-log v1.0.5 - github.com/kairos-io/kairos v1.24.3-56.0.20221128123446-ab088839ec95 + github.com/kairos-io/kairos v1.3.1 github.com/mudler/edgevpn v0.15.3 github.com/mudler/go-nodepair v0.0.0-20220507212557-7d47aa3cc1f1 github.com/mudler/go-pluggable v0.0.0-20220716112424-189d463e3ff3 @@ -88,8 +88,8 @@ require ( github.com/ipfs/go-ipns v0.1.2 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/ipld/go-ipld-prime v0.16.0 // indirect - github.com/itchyny/gojq v0.12.9 // indirect - github.com/itchyny/timefmt-go v0.1.4 // indirect + github.com/itchyny/gojq v0.12.10 // indirect + github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect diff --git a/go.sum b/go.sum index 3e559e7..d178b17 100644 --- a/go.sum +++ b/go.sum @@ -693,11 +693,11 @@ github.com/ipld/go-ipld-prime v0.16.0 h1:RS5hhjB/mcpeEPJvfyj0qbOj/QL+/j05heZ0qa9 github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/itchyny/gojq v0.12.8/go.mod h1:gE2kZ9fVRU0+JAksaTzjIlgnCa2akU+a1V0WXgJQN5c= -github.com/itchyny/gojq v0.12.9 h1:biKpbKwMxVYhCU1d6mR7qMr3f0Hn9F5k5YykCVb3gmM= -github.com/itchyny/gojq v0.12.9/go.mod h1:T4Ip7AETUXeGpD+436m+UEl3m3tokRgajd5pRfsR5oE= +github.com/itchyny/gojq v0.12.10 h1:6TcS0VYWS6wgntpF/4tnrzwdCMjiTxRAxIqZWfDsDQU= +github.com/itchyny/gojq v0.12.10/go.mod h1:o3FT8Gkbg/geT4pLI0tF3hvip5F3Y/uskjRz9OYa38g= github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A= -github.com/itchyny/timefmt-go v0.1.4 h1:hFEfWVdwsEi+CY8xY2FtgWHGQaBaC3JeHd+cve0ynVM= -github.com/itchyny/timefmt-go v0.1.4/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= +github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= +github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= @@ -749,8 +749,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kairos-io/kairos v1.24.3-56.0.20221128123446-ab088839ec95 h1:dSTM/gePf/Czu7eOKwvv3J91AWMxH/Y4xOGVQNMu+OY= -github.com/kairos-io/kairos v1.24.3-56.0.20221128123446-ab088839ec95/go.mod h1:Rgn/1YTvcTQIdtzvKT96FoMH8tdGiYI9tZwcu1CV1dI= +github.com/kairos-io/kairos v1.3.1 h1:t9q4FnBFxAlOU8yFU/s/df+hhJyZQKAHb4944jeItHs= +github.com/kairos-io/kairos v1.3.1/go.mod h1:HhMh4lGr5gCXRukRI1+y/P8ZwB1+VWYaUOMP1nRoa0o= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kbinani/screenshot v0.0.0-20210720154843-7d3a670d8329 h1:qq2nCpSrXrmvDGRxW0ruW9BVEV1CN2a9YDOExdt+U0o= github.com/kbinani/screenshot v0.0.0-20210720154843-7d3a670d8329/go.mod h1:2VPVQDR4wO7KXHwP+DAypEy67rXf+okUx2zjgpCxZw4= From 7317a8b87d6e0bbe12b6dc504cd7e52969792e45 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 2 Dec 2022 20:39:53 +0100 Subject: [PATCH 4/8] :arrow_up: Update repos Signed-off-by: Ettore Di Giacinto --- repository.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/repository.yaml b/repository.yaml index 7825767..e4e3fac 100644 --- a/repository.yaml +++ b/repository.yaml @@ -9,9 +9,9 @@ repositories: priority: 2 urls: - "quay.io/kairos/packages" - reference: 20221125103743-repository.yaml + reference: 20221202173038-repository.yaml - !!merge <<: *kairos arch: arm64 urls: - "quay.io/kairos/packages-arm64" - reference: 20221125104907-repository.yaml + reference: 20221202173723-repository.yaml From dd9127f7ce3c26810f2167570b605ddbcb4c59ea Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 5 Dec 2022 09:57:44 +0100 Subject: [PATCH 5/8] :seedling: Allow to set minimum number of node to wait for Signed-off-by: Ettore Di Giacinto --- internal/provider/config/config.go | 1 + internal/role/auto.go | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/internal/provider/config/config.go b/internal/provider/config/config.go index 16d71ed..7873a49 100644 --- a/internal/provider/config/config.go +++ b/internal/provider/config/config.go @@ -7,6 +7,7 @@ type Kairos struct { DNS bool `yaml:"dns,omitempty"` LogLevel string `yaml:"loglevel,omitempty"` Hybrid bool `yaml:"hybrid,omitempty"` + MinimumNodes int `yaml:"minimum_nodes,omitempty"` } type Config struct { diff --git a/internal/role/auto.go b/internal/role/auto.go index 9ab4758..87930b1 100644 --- a/internal/role/auto.go +++ b/internal/role/auto.go @@ -23,10 +23,15 @@ func Auto(cc *config.Config, pconfig *providerConfig.Config) Role { advertizing, _ := c.Client.AdvertizingNodes() actives, _ := c.Client.ActiveNodes() + minimumNodes := pconfig.Kairos.MinimumNodes + if minimumNodes == 0 { + minimumNodes = 2 + } + c.Logger.Info("Active nodes:", actives) c.Logger.Info("Advertizing nodes:", advertizing) - if len(advertizing) < 2 { + if len(advertizing) < minimumNodes { c.Logger.Info("Not enough nodes") return nil } From c5b11bdc063d13986af333adbe1f81a7a027a546 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 5 Dec 2022 11:31:41 +0100 Subject: [PATCH 6/8] :seedling: Use static kubevip artifacts Signed-off-by: Ettore Di Giacinto --- internal/assets/assets.go | 18 ++++++++ internal/assets/static/kube_vip_rbac.yaml | 32 ++++++++++++++ internal/role/p2p/kubevip.go | 53 +++++++++++++++-------- 3 files changed, 84 insertions(+), 19 deletions(-) create mode 100644 internal/assets/assets.go create mode 100644 internal/assets/static/kube_vip_rbac.yaml diff --git a/internal/assets/assets.go b/internal/assets/assets.go new file mode 100644 index 0000000..b4dd0d7 --- /dev/null +++ b/internal/assets/assets.go @@ -0,0 +1,18 @@ +package assets + +import ( + "embed" + "io/fs" +) + +//go:embed static +var staticFiles embed.FS + +func GetStaticFS() fs.FS { + fsys, err := fs.Sub(staticFiles, "static") + if err != nil { + panic(err) + } + + return fs.FS(fsys) +} diff --git a/internal/assets/static/kube_vip_rbac.yaml b/internal/assets/static/kube_vip_rbac.yaml new file mode 100644 index 0000000..91d6f28 --- /dev/null +++ b/internal/assets/static/kube_vip_rbac.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-vip + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:kube-vip-role +rules: + - apiGroups: [""] + resources: ["services", "services/status", "nodes", "endpoints"] + verbs: ["list","get","watch", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["list", "get", "watch", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-vip-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-vip-role +subjects: +- kind: ServiceAccount + name: kube-vip + namespace: kube-system \ No newline at end of file diff --git a/internal/role/p2p/kubevip.go b/internal/role/p2p/kubevip.go index 3337cd9..550d183 100644 --- a/internal/role/p2p/kubevip.go +++ b/internal/role/p2p/kubevip.go @@ -3,12 +3,12 @@ package role import ( "fmt" "io" - "log" "net/http" "os" "strings" "github.com/kairos-io/kairos/pkg/utils" + "github.com/kairos-io/provider-kairos/internal/assets" providerConfig "github.com/kairos-io/provider-kairos/internal/provider/config" ) @@ -41,33 +41,48 @@ func downloadFromUrl(url, where string) error { } func deployKubeVIP(iface, ip string, pconfig *providerConfig.Config) error { - os.MkdirAll("/var/lib/rancher/k3s/server/manifests/", 0650) + if err := os.MkdirAll("/var/lib/rancher/k3s/server/manifests/", 0650); err != nil { + return fmt.Errorf("could not create manifest dir") + } targetFile := "/var/lib/rancher/k3s/server/manifests/kubevip.yaml" targetCRDFile := "/var/lib/rancher/k3s/server/manifests/kubevipmanifest.yaml" - manifestUrl := "https://kube-vip.io/manifests/rbac.yaml" - if pconfig.KubeVIP.ManifestURL != "" { - manifestUrl = pconfig.KubeVIP.ManifestURL - } + err := downloadFromUrl(pconfig.KubeVIP.ManifestURL, targetCRDFile) + if err != nil { + return err + } + } else { + f, err := assets.GetStaticFS().Open("kube_vip_rbac.yaml") + if err != nil { + return fmt.Errorf("could not find kube_vip in assets") + } + defer f.Close() - err := downloadFromUrl(manifestUrl, targetCRDFile) - if err != nil { - return err + destination, err := os.Create(targetCRDFile) + if err != nil { + return err + } + defer destination.Close() + _, err = io.Copy(destination, f) + if err != nil { + return err + } } content, err := generateKubeVIP(iface, ip, pconfig.KubeVIP.Args) - if err == nil { - f, err := os.OpenFile(targetFile, - os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - log.Println(err) - } - defer f.Close() - if _, err := f.WriteString("\n" + content); err != nil { - log.Println(err) - } + if err != nil { + return fmt.Errorf("could not generate kubevip %s", err.Error()) + } + + f, err := os.Create(targetFile) + if err != nil { + return fmt.Errorf("could not open %s: %w", f.Name(), err) + } + defer f.Close() + if _, err := f.WriteString("\n" + content); err != nil { + return fmt.Errorf("could not write to %s: %w", f.Name(), err) } return err From 955e8df55e5518db4e5e435910e2c94cc10b9773 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 5 Dec 2022 11:37:53 +0100 Subject: [PATCH 7/8] :seedling: Cleanups Signed-off-by: Ettore Di Giacinto --- internal/role/p2p/kubevip.go | 8 +++--- internal/role/p2p/master.go | 4 ++- internal/role/schedule.go | 48 ++---------------------------------- 3 files changed, 9 insertions(+), 51 deletions(-) diff --git a/internal/role/p2p/kubevip.go b/internal/role/p2p/kubevip.go index 550d183..679bfc7 100644 --- a/internal/role/p2p/kubevip.go +++ b/internal/role/p2p/kubevip.go @@ -22,7 +22,7 @@ func generateKubeVIP(iface, ip string, args []string) (string, error) { return out, nil } -func downloadFromUrl(url, where string) error { +func downloadFromURL(url, where string) error { output, err := os.Create(where) if err != nil { return err @@ -49,7 +49,7 @@ func deployKubeVIP(iface, ip string, pconfig *providerConfig.Config) error { targetCRDFile := "/var/lib/rancher/k3s/server/manifests/kubevipmanifest.yaml" if pconfig.KubeVIP.ManifestURL != "" { - err := downloadFromUrl(pconfig.KubeVIP.ManifestURL, targetCRDFile) + err := downloadFromURL(pconfig.KubeVIP.ManifestURL, targetCRDFile) if err != nil { return err } @@ -81,9 +81,9 @@ func deployKubeVIP(iface, ip string, pconfig *providerConfig.Config) error { return fmt.Errorf("could not open %s: %w", f.Name(), err) } defer f.Close() - if _, err := f.WriteString("\n" + content); err != nil { + if _, err := f.WriteString(content); err != nil { return fmt.Errorf("could not write to %s: %w", f.Name(), err) } - return err + return nil } diff --git a/internal/role/p2p/master.go b/internal/role/p2p/master.go index d9ea410..bd12fe2 100644 --- a/internal/role/p2p/master.go +++ b/internal/role/p2p/master.go @@ -128,7 +128,9 @@ func Master(cc *config.Config, pconfig *providerConfig.Config) role.Role { var args []string if pconfig.Kairos.Hybrid { args = []string{fmt.Sprintf("--tls-san=%s", ip), fmt.Sprintf("--node-ip=%s", ifaceIP)} - deployKubeVIP(iface, ip, pconfig) + if err := deployKubeVIP(iface, ip, pconfig); err != nil { + return fmt.Errorf("failed KubeVIP setup: %w", err) + } } else { args = []string{"--flannel-iface=edgevpn0"} } diff --git a/internal/role/schedule.go b/internal/role/schedule.go index d419ff2..3a7214c 100644 --- a/internal/role/schedule.go +++ b/internal/role/schedule.go @@ -10,35 +10,6 @@ import ( service "github.com/mudler/edgevpn/api/client/service" ) -// func assignIPs(cidr string, client *service.Client, nodes []string, pconfig *providerConfig.Config) (error, bool) { -// address, _, err := net.ParseCIDR(cidr) -// if err != nil { -// return err, false -// } - -// currentIPS := []string{} -// toAssign := []string{} -// for _, a := range nodes { -// ip, _ := client.Get("ip", a) -// if ip != "" { -// currentIPS = append(currentIPS, ip) -// } else { -// toAssign = append(toAssign, a) -// } -// } - -// if len(toAssign) == 0 { -// return nil, false -// } - -// ip := utils.NextIP(address.String(), currentIPS) -// if err := client.Set("ip", toAssign[0], ip); err != nil { -// return err, false -// } - -// return nil, len(toAssign) != 0 -// } - // scheduleRoles assigns roles to nodes. Meant to be called only by leaders // TODO: HA-Auto. func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pconfig *providerConfig.Config) error { @@ -54,18 +25,7 @@ func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pco workerRole := "worker" if pconfig.Kairos.Hybrid { - // err, reschedule := assignIPs(pconfig.KubeVIP.CIDR, c.Client, nodes, pconfig) - // if reschedule { - // return fmt.Errorf("asked to reschedule") - // } - // if err != nil { - // return err - // } - // ip, _ := c.Client.Get("ip", c.UUID) - // c.Logger.Infof("KubeVIP IP: %+v", ip) - c.Logger.Info("hybrid p2p enabled") - // masterRole = "kubevip/master" - // workerRole = "kubevip/worker" + c.Logger.Info("hybrid p2p with KubeVIP enabled") } for _, r := range currentRoles { @@ -75,7 +35,7 @@ func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pco } c.Logger.Infof("Master already present: %t", existsMaster) c.Logger.Infof("Unassigned nodes: %+v", unassignedNodes) - selectedMaster := "" + if !existsMaster && len(unassignedNodes) > 0 { var selected string toSelect := unassignedNodes @@ -102,7 +62,6 @@ func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pco } c.Logger.Info("-> Set master to", selected) currentRoles[selected] = masterRole - selectedMaster = selected // Return here, so next time we get called // makes sure master is set. return nil @@ -110,9 +69,6 @@ func scheduleRoles(nodes []string, c *service.RoleConfig, cc *config.Config, pco // cycle all empty roles and assign worker roles for _, uuid := range unassignedNodes { - if selectedMaster == uuid { - continue - } if err := c.Client.Set("role", uuid, workerRole); err != nil { c.Logger.Error(err) return err From 18a0b14a51d8fa906cded04914431e39ef3747f7 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 5 Dec 2022 13:32:05 +0100 Subject: [PATCH 8/8] :seedling: Configure worker Signed-off-by: Ettore Di Giacinto --- internal/role/p2p/worker.go | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/internal/role/p2p/worker.go b/internal/role/p2p/worker.go index 3665eda..4deb2c5 100644 --- a/internal/role/p2p/worker.go +++ b/internal/role/p2p/worker.go @@ -44,13 +44,6 @@ func Worker(cc *config.Config, pconfig *providerConfig.Config) role.Role { nodeToken = strings.TrimRight(nodeToken, "\n") - ip := utils.GetInterfaceIP("edgevpn0") - if ip == "" { - return errors.New("node doesn't have an ip yet") - } - - c.Logger.Info("Configuring k3s-agent", ip, masterIP, nodeToken) - svc, err := machine.K3sAgent() if err != nil { return err @@ -75,6 +68,26 @@ func Worker(cc *config.Config, pconfig *providerConfig.Config) role.Role { env = k3sConfig.Env } + args := []string{ + "--with-node-id", + } + + if pconfig.Kairos.Hybrid { + iface := guessInterface(pconfig) + ip := utils.GetInterfaceIP(iface) + args = append(args, + fmt.Sprintf("--node-ip %s", ip)) + } else { + ip := utils.GetInterfaceIP("edgevpn0") + if ip == "" { + return errors.New("node doesn't have an ip yet") + } + args = append(args, + fmt.Sprintf("--node-ip %s", ip), + "--flannel-iface=edgevpn0") + } + + c.Logger.Info("Configuring k3s-agent", masterIP, nodeToken, args) // Setup systemd unit and starts it if err := utils.WriteEnv(machine.K3sEnvUnit("k3s-agent"), env, @@ -82,11 +95,6 @@ func Worker(cc *config.Config, pconfig *providerConfig.Config) role.Role { return err } - args := []string{ - "--with-node-id", - fmt.Sprintf("--node-ip %s", ip), - "--flannel-iface=edgevpn0", - } if k3sConfig.ReplaceArgs { args = k3sConfig.Args } else {