Compare commits

..

32 Commits

Author SHA1 Message Date
Dimitris Karakasilis
e0138fe609 Merge pull request #45 from kairos-io/2069-mdns-kms
Do an mdns lookup when KMS url ends in .local
2024-01-25 15:58:08 +02:00
Dimitris Karakasilis
fe5d338ed5 Use renovate to bump the base image for the iso
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-25 15:15:40 +02:00
Dimitris Karakasilis
d708fcfa26 Skip test that is not ready yet
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-25 12:40:14 +02:00
Dimitris Karakasilis
2e63d50125 Change test expectation
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-25 12:38:41 +02:00
Dimitris Karakasilis
d4e8b2adc2 Add neednet grub setting to mdns notes (it's needed)
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-25 12:36:58 +02:00
Dimitris Karakasilis
10dcecdc85 Allow test to expect failed installation
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-25 10:27:02 +02:00
Dimitris Karakasilis
3c4663afa5 Fix problem when MACHINE_SPICY is not set
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-25 09:57:44 +02:00
Dimitris Karakasilis
95a352f4b4 Implement a test for discoverable KMS
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-25 09:39:17 +02:00
Dimitris Karakasilis
fbfd7c9f07 Add TODO for test
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-23 18:55:10 +02:00
Dimitris Karakasilis
7d84c01663 Fix tests
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-23 17:25:46 +02:00
Dimitris Karakasilis
311b8adda0 Migrate mdns functions from tpm helpers to this repo
because tpm has nothing to do with mdns.

TODO: Remove the functions from tpm helpers and bump the module here

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2024-01-23 12:53:44 +02:00
Dimitris Karakasilis
bf59ecd475 Bump tpm-helpers
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2023-12-20 14:56:32 +02:00
Dimitris Karakasilis
71e90b94aa Remove instructions that don't work after rebase
Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2023-12-20 14:47:19 +02:00
Dimitris Karakasilis
3d2d2de9dc Do an mdns lookup when KMS url ends in .local
Part of: https://github.com/kairos-io/kairos/issues/2069

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2023-12-20 14:40:23 +02:00
renovate[bot]
c42e66a9de Update module github.com/kairos-io/kairos-sdk to v0.0.15 2023-10-27 12:39:28 +00:00
Dimitris Karakasilis
da93e626c5 Merge pull request #43 from kairos-io/1836-more-logs
1836 more logs
2023-10-27 09:20:27 +03:00
Dimitris Karakasilis
ecbbe1499e Add more logs and refactor the server handers
- Flatten if/else logic by handling errors and returning early
- Use different logger for server logs. Also handle skipped errors.
- Remove unecessary for loop
- --zap-log-level can already be used (and it works)
- Remove non-existent enki flag
- Run tests with KVM enabled on self-hosted runners
  and also don't add grub.cfg since it's already there in the base image
- Remove non-used earthly target

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
2023-10-27 09:17:48 +03:00
Mauro Morales
09981d750e Configure automerge for patch updates 2023-10-04 16:56:52 +02:00
Itxaka
b5b4d0d042 🤖 Add concurrency to CI (#33) 2023-06-15 11:20:19 +02:00
renovate[bot]
8420155746 Update docker/build-push-action action to v4 (#32)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-15 11:15:24 +02:00
renovate[bot]
a9359bf713 Update actions/checkout action to v3 (#31)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-15 11:15:01 +02:00
renovate[bot]
b31467e925 Update module github.com/mudler/yip to v1.2.0 (#28)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-15 11:14:48 +02:00
renovate[bot]
c5dc8db56b Update module github.com/jaypipes/ghw to v0.11.0 (#26)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-15 11:10:42 +02:00
renovate[bot]
a80703a556 Update module github.com/onsi/gomega to v1.27.8 (#24)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-15 11:10:22 +02:00
renovate[bot]
0b6f771d32 Update module github.com/kairos-io/kcrypt to v0.7.0 (#27)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-15 11:08:46 +02:00
renovate[bot]
72dd7d3e50 Update module github.com/onsi/ginkgo/v2 to v2.10.0 (#29)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-15 11:08:11 +02:00
Itxaka
0619047a20 Drop kairos and use sdk for collector (#20) 2023-06-15 09:35:01 +02:00
renovate[bot]
715664969a Add renovate.json (#6)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Itxaka <itxaka.garcia@spectrocloud.com>
2023-06-14 14:38:01 +02:00
Mauro Morales
bcda5b5b38 Update issue templates
relates to https://github.com/kairos-io/kairos/issues/1483
2023-06-13 12:17:08 +02:00
Itxaka
b2a0330dd8 Fix lint
Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>
2023-05-10 00:31:15 +02:00
Itxaka
0b68d90081 Bump ghw and fix label (#17)
* Bump ghw and fix label

old label was the new FilesystemLabel. Now the label refers to the
partition label which is different

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* bump deps

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* Rework ginkgo

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* docker login

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* [Will drop]Allow building kcrypt from branches

Otherwise any changes that need both wont pass tests.

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* Dont build the iso 5 times

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* This confirms Im dumb and dont know how to program

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* debug logs

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* debug

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* 🤖 run in github CI

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* Debug

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* debug

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

* Add /tmp/oem to scan dirs for config

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>

---------

Signed-off-by: Itxaka <itxaka.garcia@spectrocloud.com>
2023-05-10 00:24:58 +02:00
Itxaka
40267d4c24 Merge pull request #13 from kairos-io/bump-go-version-to-1.20.2
⬆️ Bump go to 1.20
2023-03-30 21:37:09 +02:00
22 changed files with 1159 additions and 854 deletions

View File

@@ -0,0 +1,12 @@
---
name: File issues on main Kairos repo
about: Tell users to file their issues on the main Kairos repo
title: ''
labels: ''
assignees: ''
---
:warning: All Kairos issues are tracked in our main repo, please file your issue there, thanks! :warning:
https://github.com/kairos-io/kairos/issues

View File

@@ -9,8 +9,55 @@ on:
paths-ignore:
- 'README.md'
concurrency:
group: ci-e2e-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
build-iso:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: ^1.20
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_LOGIN }}
password: ${{ secrets.DOCKER_PASS }}
- name: Install earthly
uses: Luet-lab/luet-install-action@v1
with:
repository: quay.io/kairos/packages
packages: utils/earthly
- name: build iso
run: |
# Configure earthly to use the docker mirror in CI
# https://docs.earthly.dev/ci-integration/pull-through-cache#configuring-earthly-to-use-the-cache
mkdir -p ~/.earthly/
cat << EOF > ~/.earthly/config.yml
global:
buildkit_additional_config: |
[registry."docker.io"]
mirrors = ["registry.docker-mirror.svc.cluster.local:5000"]
[registry."registry.docker-mirror.svc.cluster.local:5000"]
insecure = true
EOF
earthly -P +iso
- uses: actions/upload-artifact@v3
with:
name: challenger.iso.zip
path: |
build/*.iso
e2e-tests:
needs:
- build-iso
runs-on: self-hosted
strategy:
fail-fast: false
@@ -21,6 +68,7 @@ jobs:
- label: "remote-static"
- label: "remote-https-pinned"
- label: "remote-https-bad-cert"
- label: "discoverable-kms"
steps:
- name: Checkout code
uses: actions/checkout@v3
@@ -30,6 +78,23 @@ jobs:
uses: actions/setup-go@v4
with:
go-version: ^1.20
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_LOGIN }}
password: ${{ secrets.DOCKER_PASS }}
- name: Install deps
run: |
curl -L https://github.com/mudler/luet/releases/download/0.33.0/luet-0.33.0-linux-amd64 -o luet
chmod +x luet
sudo mv luet /usr/bin/luet
sudo mkdir -p /etc/luet/repos.conf.d/
sudo luet repo add -y kairos --url quay.io/kairos/packages --type docker
LUET_NOLOCK=true sudo -E luet install -y container/kubectl utils/k3d utils/earthly
- name: Download artifacts
uses: actions/download-artifact@v3
with:
name: challenger.iso.zip
- name: Run tests
env:
LABEL: ${{ matrix.label }}
@@ -39,32 +104,13 @@ jobs:
sudo apt install -y git qemu-system-x86 qemu-utils swtpm jq make glibc-tools \
openssl curl gettext ca-certificates curl gnupg lsb-release
curl -L https://github.com/mudler/luet/releases/download/0.33.0/luet-0.33.0-linux-amd64 -o luet
chmod +x luet
sudo mv luet /usr/bin/luet
sudo mkdir -p /etc/luet/repos.conf.d/
sudo luet repo add -y kairos --url quay.io/kairos/packages --type docker
LUET_NOLOCK=true sudo -E luet install -y container/kubectl utils/k3d utils/earthly
earthly -P +iso
export ISO=$PWD/build/challenger.iso
go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
go get github.com/onsi/gomega/...
go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.7.1
go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.7.1
go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.7.1
# Configure earthly to use the docker mirror in CI
# https://docs.earthly.dev/ci-integration/pull-through-cache#configuring-earthly-to-use-the-cache
cat << EOF > ~/.earthly/config.yml
global:
buildkit_additional_config: |
[registry."docker.io"]
mirrors = ["registry.docker-mirror.svc.cluster.local:5000"]
[registry."registry.docker-mirror.svc.cluster.local:5000"]
insecure = true
EOF
export ISO=$PWD/$(ls *.iso)
# We run with sudo to be able to access /dev/kvm
sudo -E ./scripts/e2e-tests.sh
./scripts/e2e-tests.sh
- uses: actions/upload-artifact@v3
if: failure()
with:
name: ${{ matrix.label }}-test.logs.zip
path: tests/**/logs/*
if-no-files-found: warn

View File

@@ -8,12 +8,17 @@ on:
tags:
- '*'
concurrency:
group: ci-image-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Prepare
id: prep
@@ -53,7 +58,7 @@ jobs:
password: ${{ secrets.QUAY_PASSWORD }}
- name: Build
uses: docker/build-push-action@v2
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: .

View File

@@ -6,6 +6,12 @@ on:
pull_request:
paths:
- '**'
concurrency:
group: ci-lint-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
env:
FORCE_COLOR: 1
jobs:

View File

@@ -6,6 +6,11 @@ on:
- master
pull_request:
concurrency:
group: ci-unit-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
unit-tests:
runs-on: ubuntu-latest

View File

@@ -1,5 +1,9 @@
VERSION 0.6
ARG BASE_IMAGE=quay.io/kairos/core-ubuntu:latest
# renovate: datasource=github-releases depName=kairos-io/kairos
ARG KAIROS_VERSION="v2.5.0"
ARG BASE_IMAGE=quay.io/kairos/ubuntu:23.10-core-amd64-generic-$KAIROS_VERSION
ARG OSBUILDER_IMAGE=quay.io/kairos/osbuilder-tools
# renovate: datasource=docker depName=golang
ARG GO_VERSION=1.20
@@ -10,12 +14,11 @@ build-challenger:
COPY . /work
WORKDIR /work
RUN CGO_ENABLED=0 go build -o kcrypt-discovery-challenger ./cmd/discovery
SAVE ARTIFACT /work/kcrypt-discovery-challenger AS LOCAL kcrypt-discovery-challenger
SAVE ARTIFACT /work/kcrypt-discovery-challenger kcrypt-discovery-challenger AS LOCAL kcrypt-discovery-challenger
image:
FROM github.com/Itxaka/kairos:drop_kcrypt_dracut+image
FROM $BASE_IMAGE
ARG IMAGE
RUN cat /etc/os-release
COPY +build-challenger/kcrypt-discovery-challenger /system/discovery/kcrypt-discovery-challenger
SAVE IMAGE $IMAGE
@@ -23,20 +26,13 @@ image-rootfs:
FROM +image
SAVE ARTIFACT --keep-own /. rootfs
grub-files:
FROM alpine
RUN apk add wget
RUN wget https://raw.githubusercontent.com/c3os-io/c3os/master/overlay/files-iso/boot/grub2/grub.cfg -O grub.cfg
SAVE ARTIFACT --keep-own grub.cfg grub.cfg
iso:
ARG OSBUILDER_IMAGE
ARG ISO_NAME=challenger
FROM $OSBUILDER_IMAGE
WORKDIR /build
COPY --keep-own +grub-files/grub.cfg /build/files-iso/boot/grub2/grub.cfg
COPY --keep-own +image-rootfs/rootfs /build/rootfs
RUN /entrypoint.sh --name $ISO_NAME --debug build-iso --squash-no-compression --date=false --local --overlay-iso /build/files-iso --output /build/ dir:/build/rootfs
RUN /entrypoint.sh --name $ISO_NAME --debug build-iso --squash-no-compression --date=false --output /build/ dir:/build/rootfs
SAVE ARTIFACT /build/$ISO_NAME.iso kairos.iso AS LOCAL build/$ISO_NAME.iso
SAVE ARTIFACT /build/$ISO_NAME.iso.sha256 kairos.iso.sha256 AS LOCAL build/$ISO_NAME.iso.sha256
@@ -51,14 +47,8 @@ test:
COPY go.mod go.sum ./
RUN go mod download && go mod verify
RUN go get github.com/onsi/gomega/...
RUN go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.1.4
RUN go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.1.4
RUN go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.1.4
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
COPY . /work
RUN PATH=$PATH:$GOPATH/bin ginkgo run --covermode=atomic --coverprofile=coverage.out -p -r pkg/challenger cmd/discovery/client
RUN go run github.com/onsi/ginkgo/v2/ginkgo run --covermode=atomic --coverprofile=coverage.out -p -r pkg/challenger cmd/discovery/client
SAVE ARTIFACT coverage.out AS LOCAL coverage.out
# Generic targets
@@ -88,12 +78,6 @@ e2e-tests-image:
COPY . /test
WORKDIR /test
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
RUN go get github.com/onsi/gomega/...
RUN go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.7.1
RUN go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.7.1
RUN go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.7.1
IF [ -e /test/build/kairos.iso ]
ENV ISO=/test/build/kairos.iso
ELSE

View File

@@ -16,6 +16,9 @@ import (
"github.com/mudler/yip/pkg/utils"
)
// Because of how go-pluggable works, we can't just print to stdout
const LOGFILE = "/tmp/kcrypt-challenger-client.log"
var errPartNotFound error = fmt.Errorf("pass for partition not found")
var errBadCertificate error = fmt.Errorf("unknown certificate")
@@ -30,6 +33,10 @@ func NewClient() (*Client, error) {
// echo '{ "data": "{ \\"label\\": \\"LABEL\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
func (c *Client) Start() error {
if err := os.RemoveAll(LOGFILE); err != nil { // Start fresh
return fmt.Errorf("removing the logfile: %w", err)
}
factory := pluggable.NewPluginFactory()
// Input: bus.EventInstallPayload
@@ -59,7 +66,7 @@ func (c *Client) Start() error {
return factory.Run(pluggable.EventType(os.Args[1]), os.Stdin, os.Stdout)
}
func (c *Client) generatePass(postEndpoint string, p *block.Partition) error {
func (c *Client) generatePass(postEndpoint string, headers map[string]string, p *block.Partition) error {
rand := utils.RandomString(32)
pass, err := tpm.EncryptBlob([]byte(rand))
@@ -71,10 +78,14 @@ func (c *Client) generatePass(postEndpoint string, p *block.Partition) error {
opts := []tpm.Option{
tpm.WithCAs([]byte(c.Config.Kcrypt.Challenger.Certificate)),
tpm.AppendCustomCAToSystemCA,
tpm.WithAdditionalHeader("label", p.Label),
tpm.WithAdditionalHeader("label", p.FilesystemLabel),
tpm.WithAdditionalHeader("name", p.Name),
tpm.WithAdditionalHeader("uuid", p.UUID),
}
for k, v := range headers {
opts = append(opts, tpm.WithAdditionalHeader(k, v))
}
conn, err := tpm.Connection(postEndpoint, opts...)
if err != nil {
return err
@@ -84,20 +95,27 @@ func (c *Client) generatePass(postEndpoint string, p *block.Partition) error {
}
func (c *Client) waitPass(p *block.Partition, attempts int) (pass string, err error) {
// IF we don't have any server configured, just do local
if c.Config.Kcrypt.Challenger.Server == "" {
additionalHeaders := map[string]string{}
serverURL := c.Config.Kcrypt.Challenger.Server
// If we don't have any server configured, just do local
if serverURL == "" {
return localPass(c.Config)
}
challengeEndpoint := fmt.Sprintf("%s/getPass", c.Config.Kcrypt.Challenger.Server)
postEndpoint := fmt.Sprintf("%s/postPass", c.Config.Kcrypt.Challenger.Server)
if c.Config.Kcrypt.Challenger.MDNS {
serverURL, additionalHeaders, err = queryMDNS(serverURL)
}
getEndpoint := fmt.Sprintf("%s/getPass", serverURL)
postEndpoint := fmt.Sprintf("%s/postPass", serverURL)
for tries := 0; tries < attempts; tries++ {
var generated bool
pass, generated, err = getPass(challengeEndpoint, c.Config.Kcrypt.Challenger.Certificate, p)
pass, generated, err = getPass(getEndpoint, additionalHeaders, c.Config.Kcrypt.Challenger.Certificate, p)
if err == errPartNotFound {
// IF server doesn't have a pass for us, then we generate one and we set it
err = c.generatePass(postEndpoint, p)
err = c.generatePass(postEndpoint, additionalHeaders, p)
if err != nil {
return
}
@@ -118,7 +136,7 @@ func (c *Client) waitPass(p *block.Partition, attempts int) (pass string, err er
return
}
fmt.Printf("Failed with error: %s . Will retry.\n", err.Error())
logToFile("Failed with error: %s . Will retry.\n", err.Error())
time.Sleep(1 * time.Second) // network errors? retry
}
@@ -145,3 +163,14 @@ func (c *Client) decryptPassphrase(pass string) (string, error) {
return string(passBytes), err
}
func logToFile(format string, a ...any) {
s := fmt.Sprintf(format, a...)
file, err := os.OpenFile(LOGFILE, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
panic(err)
}
defer file.Close()
file.WriteString(s)
}

View File

@@ -1,8 +1,9 @@
package client
import (
"github.com/kairos-io/kairos/pkg/config"
"github.com/kairos-io/kairos-sdk/collector"
kconfig "github.com/kairos-io/kcrypt/pkg/config"
"gopkg.in/yaml.v3"
)
type Client struct {
@@ -12,11 +13,10 @@ type Client struct {
type Config struct {
Kcrypt struct {
Challenger struct {
Server string `yaml:"challenger_server,omitempty"`
// Non-volatile index memory: where we store the encrypted passphrase (offline mode)
NVIndex string `yaml:"nv_index,omitempty"`
// Certificate index: this is where the rsa pair that decrypts the passphrase lives
CIndex string `yaml:"c_index,omitempty"`
MDNS bool `yaml:"mdns,omitempty"`
Server string `yaml:"challenger_server,omitempty"`
NVIndex string `yaml:"nv_index,omitempty"` // Non-volatile index memory: where we store the encrypted passphrase (offline mode)
CIndex string `yaml:"c_index,omitempty"` // Certificate index: this is where the rsa pair that decrypts the passphrase lives
TPMDevice string `yaml:"tpm_device,omitempty"`
Certificate string `yaml:"certificate,omitempty"`
}
@@ -26,12 +26,21 @@ type Config struct {
func unmarshalConfig() (Config, error) {
var result Config
c, err := config.Scan(config.Directories(kconfig.ConfigScanDirs...), config.NoLogs)
o := &collector.Options{NoLogs: true, MergeBootCMDLine: false}
if err := o.Apply(collector.Directories(append(kconfig.ConfigScanDirs, "/tmp/oem")...)); err != nil {
return result, err
}
c, err := collector.Scan(o, func(d []byte) ([]byte, error) {
return d, nil
})
if err != nil {
return result, err
}
if err = c.Unmarshal(&result); err != nil {
a, _ := c.String()
err = yaml.Unmarshal([]byte(a), &result)
if err != nil {
return result, err
}

View File

@@ -16,13 +16,19 @@ import (
const DefaultNVIndex = "0x1500000"
func getPass(server, certificate string, partition *block.Partition) (string, bool, error) {
msg, err := tpm.Get(server,
func getPass(server string, headers map[string]string, certificate string, partition *block.Partition) (string, bool, error) {
opts := []tpm.Option{
tpm.WithCAs([]byte(certificate)),
tpm.AppendCustomCAToSystemCA,
tpm.WithAdditionalHeader("label", partition.Label),
tpm.WithAdditionalHeader("label", partition.FilesystemLabel),
tpm.WithAdditionalHeader("name", partition.Name),
tpm.WithAdditionalHeader("uuid", partition.UUID))
tpm.WithAdditionalHeader("uuid", partition.UUID),
}
for k, v := range headers {
opts = append(opts, tpm.WithAdditionalHeader(k, v))
}
msg, err := tpm.Get(server, opts...)
if err != nil {
return "", false, err
}

View File

@@ -0,0 +1,85 @@
package client
import (
"fmt"
"net/url"
"strconv"
"strings"
"time"
"github.com/hashicorp/mdns"
)
const (
MDNSServiceType = "_kcrypt._tcp"
MDNSTimeout = 15 * time.Second
)
// queryMDNS will make an mdns query on local network to find a kcrypt challenger server
// instance. If none is found, the original URL is returned and no additional headers.
// If a response is received, the IP address and port from the response will be returned// and an additional "Host" header pointing to the original host.
func queryMDNS(originalURL string) (string, map[string]string, error) {
additionalHeaders := map[string]string{}
var err error
parsedURL, err := url.Parse(originalURL)
if err != nil {
return originalURL, additionalHeaders, fmt.Errorf("parsing the original host: %w", err)
}
host := parsedURL.Host
if !strings.HasSuffix(host, ".local") { // sanity check
return "", additionalHeaders, fmt.Errorf("domain should end in \".local\" when using mdns")
}
mdnsIP, mdnsPort := discoverMDNSServer(host)
if mdnsIP == "" { // no reply
logToFile("no reply from mdns\n")
return originalURL, additionalHeaders, nil
}
additionalHeaders["Host"] = parsedURL.Host
newURL := strings.ReplaceAll(originalURL, host, mdnsIP)
// Remove any port in the original url
if port := parsedURL.Port(); port != "" {
newURL = strings.ReplaceAll(newURL, port, "")
}
// Add any possible port from the mdns response
if mdnsPort != "" {
newURL = strings.ReplaceAll(newURL, mdnsIP, fmt.Sprintf("%s:%s", mdnsIP, mdnsPort))
}
return newURL, additionalHeaders, nil
}
// discoverMDNSServer performs an mDNS query to discover any running kcrypt challenger
// servers on the same network that matches the given hostname.
// If a response if received, the IP address and the Port from the response are returned.
func discoverMDNSServer(hostname string) (string, string) {
// Make a channel for results and start listening
entriesCh := make(chan *mdns.ServiceEntry, 4)
defer close(entriesCh)
logToFile("Will now wait for some mdns server to respond\n")
// Start the lookup. It will block until we read from the chan.
mdns.Lookup(MDNSServiceType, entriesCh)
expectedHost := hostname + "." // FQDN
// Wait until a matching server is found or we reach a timeout
for {
select {
case entry := <-entriesCh:
logToFile("mdns response received\n")
if entry.Host == expectedHost {
logToFile("%s matches %s\n", entry.Host, expectedHost)
return entry.AddrV4.String(), strconv.Itoa(entry.Port) // TODO: v6?
} else {
logToFile("%s didn't match %s\n", entry.Host, expectedHost)
}
case <-time.After(MDNSTimeout):
logToFile("timed out waiting for mdns\n")
return "", ""
}
}
}

View File

@@ -20,14 +20,13 @@ import (
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
@@ -44,10 +43,7 @@ var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
RunSpecs(t, "Control")
}
var _ = BeforeSuite(func() {

View File

@@ -1,3 +1,3 @@
#!/bin/bash
docker run --privileged -v /var/run/docker.sock:/var/run/docker.sock --rm -t -v $(pwd):/workspace -v earthly-tmp:/tmp/earthly:rw earthly/earthly:v0.6.21 --allow-privileged $@
docker run --privileged -v /var/run/docker.sock:/var/run/docker.sock --rm -t -v $(pwd):/workspace -v earthly-tmp:/tmp/earthly:rw earthly/earthly:v0.7.8 --allow-privileged $@

160
go.mod
View File

@@ -3,142 +3,162 @@ module github.com/kairos-io/kairos-challenger
go 1.20
require (
github.com/go-logr/logr v1.2.4
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.5.0
github.com/jaypipes/ghw v0.9.0
github.com/kairos-io/kairos v1.24.3-56.0.20230208235509-4d28f3b87f60
github.com/kairos-io/kcrypt v0.5.0
github.com/kairos-io/tpm-helpers v0.0.0-20230119140150-3fa97128ef6b
github.com/hashicorp/mdns v1.0.5
github.com/jaypipes/ghw v0.11.0
github.com/kairos-io/kairos-sdk v0.0.15
github.com/kairos-io/kcrypt v0.7.0
github.com/kairos-io/tpm-helpers v0.0.0-20240123063624-f7a3fcc66199
github.com/mudler/go-pluggable v0.0.0-20230126220627-7710299a0ae5
github.com/mudler/go-processmanager v0.0.0-20220724164624-c45b5c61312d
github.com/mudler/yip v1.0.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.8.1
github.com/onsi/gomega v1.26.0
github.com/mudler/yip v1.3.0
github.com/onsi/ginkgo/v2 v2.11.0
github.com/onsi/gomega v1.27.8
github.com/pkg/errors v0.9.1
github.com/spectrocloud/peg v0.0.0-20230214140930-4d6672f825b2
github.com/spectrocloud/peg v0.0.0-20230407121159-2e15270c4a46
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.24.2
k8s.io/apimachinery v0.24.2
k8s.io/client-go v0.24.2
sigs.k8s.io/controller-runtime v0.12.2
k8s.io/api v0.27.2
k8s.io/apimachinery v0.27.2
k8s.io/client-go v0.27.2
sigs.k8s.io/controller-runtime v0.15.0
)
require (
atomicgo.dev/cursor v0.1.1 // indirect
atomicgo.dev/cursor v0.1.3 // indirect
atomicgo.dev/keyboard v0.2.9 // indirect
cloud.google.com/go v0.93.3 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
atomicgo.dev/schedule v0.0.2 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.11.1 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/avast/retry-go v3.0.0+incompatible // indirect
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bramvdbogaerde/go-scp v1.2.1 // indirect
github.com/cavaliergopher/grab/v3 v3.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 // indirect
github.com/codingsince1985/checksum v1.2.6 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/containerd v1.7.7 // indirect
github.com/containerd/continuity v0.4.2 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denisbrodbeck/machineid v1.0.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/docker/cli v23.0.5+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v23.0.5+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/folbricht/tpmk v0.1.2-0.20230104073416-f20b20c289d7 // indirect
github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-logr/zapr v1.2.4 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/certificate-transparency-go v1.1.4 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-attestation v0.4.4-0.20220404204839-8820d49b18d9 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-containerregistry v0.15.2 // indirect
github.com/google/go-tpm v0.3.3 // indirect
github.com/google/go-tpm-tools v0.3.10 // indirect
github.com/google/go-tspi v0.3.0 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gookit/color v1.5.2 // indirect
github.com/gookit/color v1.5.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/huandu/xstrings v1.3.3 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect
github.com/itchyny/gojq v0.12.11 // indirect
github.com/itchyny/gojq v0.12.13 // indirect
github.com/itchyny/timefmt-go v0.1.5 // indirect
github.com/joho/godotenv v1.5.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/lithammer/fuzzysearch v1.1.5 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/lithammer/fuzzysearch v1.1.8 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.41 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/pterm/pterm v0.12.54 // indirect
github.com/prometheus/client_golang v1.15.1 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/pterm/pterm v0.12.63 // indirect
github.com/qeesung/image2ascii v1.0.1 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/sergi/go-diff v1.3.1 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/twpayne/go-vfs v1.7.2 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
github.com/wayneashleyberry/terminal-dimensions v1.1.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/net v0.6.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.13.0 // indirect
golang.org/x/oauth2 v0.7.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.9.3 // indirect
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
howett.net/plist v1.0.0 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/component-base v0.24.2 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
k8s.io/apiextensions-apiserver v0.27.2 // indirect
k8s.io/component-base v0.27.2 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

665
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -23,6 +23,7 @@ import (
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
@@ -120,7 +121,9 @@ func main() {
os.Exit(1)
}
go challenger.Start(context.Background(), clientset, reconciler, namespace, challengerAddr)
serverLog := ctrl.Log.WithName("server")
go challenger.Start(context.Background(), serverLog, clientset, reconciler, namespace, challengerAddr)
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {

103
mdns-notes.md Normal file
View File

@@ -0,0 +1,103 @@
# Prerequisites
Nodes and KMS should be on the same local network (mdns requirement)
# Steps
- Create a cluster with a port bound to the host:
```
k3d cluster create kcrypt -p '30000:30000@server:0'
```
(we are going to assign this port to the kcrypt challenger server and advertise it over mdns)
- Follow [the instructions to setup the kcrypt challenger server](https://github.com/kairos-io/kcrypt-challenger#installation):
```
helm repo add kairos https://kairos-io.github.io/helm-charts
helm install kairos-crd kairos/kairos-crds
```
Create the following 'kcrypt-challenger-values.yaml` file:
```yaml
service:
challenger:
type: "NodePort"
port: 8082
nodePort: 30000
```
and deploy the challenger server with it:
```bash
helm install -f kcrypt-challenger-values.yaml kairos-challenger kairos/kairos-challenger
```
- Add the sealedvolume and secret for the tpm chip:
```
apiVersion: v1
kind: Secret
metadata:
name: example-host-tpm-secret
namespace: default
type: Opaque
stringData:
pass: "awesome-passphrase"
---
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: example-host
namespace: default
spec:
TPMHash: "5640e37f4016da16b841a93880dcc44886904392fa3c86681087b77db5afedbe"
partitions:
- label: COS_PERSISTENT
secret:
name: example-host-tpm-secret
path: pass
quarantined: false
```
- Start the [simple-mdns-server](https://github.com/kairos-io/simple-mdns-server)
```
go run . --port 30000 --interfaceName enp121s0 --serviceType _kcrypt._tcp --hostName mychallenger.local
```
- Start a node in manual install mode
- Replace `/system/discovery/kcrypt-discovery-challenger` with a custom build (until we merge)
- Create the following config:
```
#cloud-config
users:
- name: kairos
passwd: kairos
install:
grub_options:
extra_cmdline: "rd.neednet=1"
encrypted_partitions:
- COS_PERSISTENT
# Kcrypt configuration block
kcrypt:
challenger:
mdns: true
challenger_server: "http://mychallenger.local"
```
- Install:
```
kairos-agent manual-install --device auto config.yaml
```

View File

@@ -4,12 +4,13 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/go-logr/logr"
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
"github.com/kairos-io/kairos-challenger/pkg/constants"
"github.com/kairos-io/kairos-challenger/pkg/payload"
@@ -97,8 +98,8 @@ func getPubHash(token string) (string, error) {
return tpm.DecodePubHash(ek)
}
func Start(ctx context.Context, kclient *kubernetes.Clientset, reconciler *controllers.SealedVolumeReconciler, namespace, address string) {
fmt.Println("Challenger started at", address)
func Start(ctx context.Context, logger logr.Logger, kclient *kubernetes.Clientset, reconciler *controllers.SealedVolumeReconciler, namespace, address string) {
logger.Info("Challenger started", "address", address)
s := http.Server{
Addr: address,
ReadTimeout: 10 * time.Second,
@@ -107,189 +108,214 @@ func Start(ctx context.Context, kclient *kubernetes.Clientset, reconciler *contr
m := http.NewServeMux()
errorMessage := func(writer io.WriteCloser, errMsg string) {
err := json.NewEncoder(writer).Encode(payload.Data{Error: errMsg})
if err != nil {
fmt.Println("error encoding the response to json", err.Error())
}
fmt.Println(errMsg)
}
m.HandleFunc("/postPass", func(w http.ResponseWriter, r *http.Request) {
conn, _ := upgrader.Upgrade(w, r, nil) // error ignored for sake of simplicity
for {
fmt.Println("Receiving passphrase")
if err := tpm.AuthRequest(r, conn); err != nil {
fmt.Println("error", err.Error())
return
}
defer conn.Close()
fmt.Println("[Receiving passphrase] auth succeeded")
token := r.Header.Get("Authorization")
hashEncoded, err := getPubHash(token)
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
logger.Error(err, "upgrading connection")
return
}
defer func() {
err := conn.Close()
if err != nil {
fmt.Println("error decoding pubhash", err.Error())
return
logger.Error(err, "closing the connection")
}
fmt.Println("[Receiving passphrase] pubhash", hashEncoded)
}()
label := r.Header.Get("label")
name := r.Header.Get("name")
uuid := r.Header.Get("uuid")
v := &payload.Data{}
logger.Info("Receiving passphrase")
if err := tpm.AuthRequest(r, conn); err != nil {
errorMessage(conn, logger, err, "auth request")
return
}
logger.Info("[Receiving passphrase] auth succeeded")
volumeList := &keyserverv1alpha1.SealedVolumeList{}
token := r.Header.Get("Authorization")
hashEncoded, err := getPubHash(token)
if err != nil {
errorMessage(conn, logger, err, "decoding pubhash")
return
}
logger.Info("[Receiving passphrase] pubhash", "encodedhash", hashEncoded)
label := r.Header.Get("label")
name := r.Header.Get("name")
uuid := r.Header.Get("uuid")
v := &payload.Data{}
logger.Info("Reading request data", "label", label, "name", name, "uuid", uuid)
volumeList := &keyserverv1alpha1.SealedVolumeList{}
for {
if err := reconciler.List(ctx, volumeList, &client.ListOptions{Namespace: namespace}); err != nil {
fmt.Println("Failed listing volumes")
fmt.Println(err)
logger.Error(err, "listing volumes")
continue
}
sealedVolumeData := findVolumeFor(PassphraseRequestData{
TPMHash: hashEncoded,
Label: label,
DeviceName: name,
UUID: uuid,
}, volumeList)
if sealedVolumeData == nil {
fmt.Println("No TPM Hash found for", hashEncoded)
conn.Close()
return
}
if err := conn.ReadJSON(v); err != nil {
fmt.Println("error", err.Error())
return
}
if v.HasPassphrase() && !v.HasError() {
secretName, secretPath := sealedVolumeData.DefaultSecret()
_, err := kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
fmt.Printf("Failed getting secret: %s\n", err.Error())
continue
}
secret := corev1.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "apps/v1",
},
ObjectMeta: v1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
StringData: map[string]string{
secretPath: v.Passphrase,
constants.GeneratedByKey: v.GeneratedBy,
},
Type: "Opaque",
}
_, err := kclient.CoreV1().Secrets(namespace).Create(ctx, &secret, v1.CreateOptions{})
if err != nil {
fmt.Println("failed during secret creation:", err.Error())
}
} else {
fmt.Println("Posted for already existing secret - ignoring")
}
} else {
fmt.Println("Invalid answer from client: doesn't contain any passphrase")
}
break
}
logger.Info("Looking up volume with request data")
sealedVolumeData := findVolumeFor(PassphraseRequestData{
TPMHash: hashEncoded,
Label: label,
DeviceName: name,
UUID: uuid,
}, volumeList)
if sealedVolumeData == nil {
errorMessage(conn, logger, fmt.Errorf("no TPM Hash found for %s", hashEncoded), "")
return
}
logger.Info("[Looking up volume with request data] succeeded")
if err := conn.ReadJSON(v); err != nil {
logger.Error(err, "reading json from connection")
return
}
if !v.HasPassphrase() {
errorMessage(conn, logger, fmt.Errorf("invalid answer from client: doesn't contain any passphrase"), "")
}
if v.HasError() {
errorMessage(conn, logger, fmt.Errorf("error: %s", v.Error), v.Error)
}
secretName, secretPath := sealedVolumeData.DefaultSecret()
logger.Info("Looking up secret in with name", "name", secretName, "namespace", namespace)
_, err = kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
if err == nil {
logger.Info("Posted for already existing secret - ignoring")
return
}
if !apierrors.IsNotFound(err) {
errorMessage(conn, logger, err, "failed getting secret")
return
}
logger.Info("secret not found, creating one")
secret := corev1.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "apps/v1",
},
ObjectMeta: v1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
StringData: map[string]string{
secretPath: v.Passphrase,
constants.GeneratedByKey: v.GeneratedBy,
},
Type: "Opaque",
}
_, err = kclient.CoreV1().Secrets(namespace).Create(ctx, &secret, v1.CreateOptions{})
if err != nil {
errorMessage(conn, logger, err, "failed during secret creation")
}
logger.Info("created new secret")
})
m.HandleFunc("/getPass", func(w http.ResponseWriter, r *http.Request) {
conn, _ := upgrader.Upgrade(w, r, nil) // error ignored for sake of simplicity
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
logger.Error(err, "upgrading connection")
return
}
defer func() {
err := conn.Close()
if err != nil {
logger.Error(err, "closing the connection")
}
}()
logger.Info("Received connection")
volumeList := &keyserverv1alpha1.SealedVolumeList{}
for {
fmt.Println("Received connection")
volumeList := &keyserverv1alpha1.SealedVolumeList{}
if err := reconciler.List(ctx, volumeList, &client.ListOptions{Namespace: namespace}); err != nil {
fmt.Println("Failed listing volumes")
fmt.Println(err)
logger.Error(err, "listing volumes")
continue
}
token := r.Header.Get("Authorization")
label := r.Header.Get("label")
name := r.Header.Get("name")
uuid := r.Header.Get("uuid")
if err := tpm.AuthRequest(r, conn); err != nil {
fmt.Println("error validating challenge", err.Error())
return
}
hashEncoded, err := getPubHash(token)
if err != nil {
fmt.Println("error decoding pubhash", err.Error())
return
}
sealedVolumeData := findVolumeFor(PassphraseRequestData{
TPMHash: hashEncoded,
Label: label,
DeviceName: name,
UUID: uuid,
}, volumeList)
if sealedVolumeData == nil {
writer, _ := conn.NextWriter(websocket.BinaryMessage)
errorMessage(writer, fmt.Sprintf("Invalid hash: %s", hashEncoded))
conn.Close()
return
}
writer, _ := conn.NextWriter(websocket.BinaryMessage)
if !sealedVolumeData.Quarantined {
secretName, secretPath := sealedVolumeData.DefaultSecret()
// 1. The admin sets a specific cleartext password from Kube manager
// SealedVolume -> with a secret .
// 2. The admin just adds a SealedVolume associated with a TPM Hash ( you don't provide any passphrase )
// 3. There is no challenger server at all (offline mode)
//
secret, err := kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
if err == nil {
passphrase := secret.Data[secretPath]
generatedBy := secret.Data[constants.GeneratedByKey]
p := payload.Data{Passphrase: string(passphrase), GeneratedBy: string(generatedBy)}
err = json.NewEncoder(writer).Encode(p)
if err != nil {
fmt.Println("error encoding the passphrase to json", err.Error(), string(passphrase))
}
if err = writer.Close(); err != nil {
fmt.Println("error closing the writer", err.Error())
return
}
if err = conn.Close(); err != nil {
fmt.Println("error closing the connection", err.Error())
return
}
return
} else {
errorMessage(writer, fmt.Sprintf("No secret found for %s and %s", hashEncoded, sealedVolumeData.PartitionLabel))
}
} else {
errorMessage(writer, fmt.Sprintf("quarantined: %s", sealedVolumeData.PartitionLabel))
if err = conn.Close(); err != nil {
fmt.Println("error closing the connection", err.Error())
return
}
return
}
break
}
},
)
s.Handler = m
logger.Info("reading data from request")
token := r.Header.Get("Authorization")
label := r.Header.Get("label")
name := r.Header.Get("name")
uuid := r.Header.Get("uuid")
tokenStr := "empty"
if token != "" {
tokenStr = "not empty"
}
logger.Info("request data", "token", tokenStr, "label", label, "name", name, "uuid", uuid)
if err := tpm.AuthRequest(r, conn); err != nil {
logger.Error(err, "error validating challenge")
return
}
hashEncoded, err := getPubHash(token)
if err != nil {
logger.Error(err, "error decoding pubhash")
return
}
logger.Info("Looking up volume with request data")
sealedVolumeData := findVolumeFor(PassphraseRequestData{
TPMHash: hashEncoded,
Label: label,
DeviceName: name,
UUID: uuid,
}, volumeList)
if sealedVolumeData == nil {
errorMessage(conn, logger, fmt.Errorf("no volume found with data from request and hash: %s", hashEncoded), "")
return
}
logger.Info("[Looking up volume with request data] succeeded")
if sealedVolumeData.Quarantined {
errorMessage(conn, logger, fmt.Errorf("quarantined: %s", sealedVolumeData.PartitionLabel), "")
return
}
secretName, secretPath := sealedVolumeData.DefaultSecret()
// 1. The admin sets a specific cleartext password from Kube manager
// SealedVolume -> with a secret .
// 2. The admin just adds a SealedVolume associated with a TPM Hash ( you don't provide any passphrase )
// 3. There is no challenger server at all (offline mode)
//
logger.Info(fmt.Sprintf("looking up secret %s in namespace %s", secretName, namespace))
secret, err := kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
errorMessage(conn, logger, fmt.Errorf("No secret found for %s and %s", hashEncoded, sealedVolumeData.PartitionLabel), "")
} else {
errorMessage(conn, logger, err, "getting the secret from Kubernetes")
}
return
}
logger.Info(fmt.Sprintf("secret %s found in namespace %s", secretName, namespace))
passphrase := secret.Data[secretPath]
generatedBy := secret.Data[constants.GeneratedByKey]
writer, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
logger.Error(err, "getting a writer from the connection")
}
p := payload.Data{Passphrase: string(passphrase), GeneratedBy: string(generatedBy)}
err = json.NewEncoder(writer).Encode(p)
if err != nil {
logger.Error(err, "writing passphrase to the websocket channel")
}
if err = writer.Close(); err != nil {
logger.Error(err, "closing the writer")
return
}
})
s.Handler = logRequestHandler(logger, m)
go func() {
err := s.ListenAndServe()
@@ -334,3 +360,36 @@ func findVolumeFor(requestData PassphraseRequestData, volumeList *keyserverv1alp
return nil
}
// errorMessage should be used when an error should be both, printed to the stdout
// and sent over the wire to the websocket client.
func errorMessage(conn *websocket.Conn, logger logr.Logger, theErr error, description string) {
if theErr == nil {
return
}
logger.Error(theErr, description)
writer, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
logger.Error(err, "getting a writer from the connection")
}
errMsg := theErr.Error()
err = json.NewEncoder(writer).Encode(payload.Data{Error: errMsg})
if err != nil {
logger.Error(err, "error encoding the response to json")
}
err = writer.Close()
if err != nil {
logger.Error(err, "closing the writer")
}
}
func logRequestHandler(logger logr.Logger, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
logger.Info("Incoming request", "method", r.Method, "uri", r.URL.String(),
"referer", r.Header.Get("Referer"), "userAgent", r.Header.Get("User-Agent"))
h.ServeHTTP(w, r)
})
}

42
renovate.json Normal file
View File

@@ -0,0 +1,42 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base"
],
"schedule": [
"after 11pm every weekday",
"before 7am every weekday",
"every weekend"
],
"timezone": "Europe/Brussels",
"packageRules": [
{
"matchUpdateTypes": [
"patch"
],
"automerge": true
}
],
"regexManagers": [
{
"fileMatch": [
"^Earthfile$"
],
"matchStrings": [
"#\\s*renovate:\\s*datasource=(?<datasource>.*?) depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?\\sARG\\s+.+_VERSION=(?<currentValue>.*?)\\s"
],
"versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}"
},
{
"fileMatch": [
"^earthly\\.(sh|ps1)$"
],
"datasourceTemplate": "docker",
"depNameTemplate": "earthly/earthly",
"matchStrings": [
"earthly\\/earthly:(?<currentValue>.*?)\\s"
],
"versioningTemplate": "semver-coerced"
}
]
}

View File

@@ -59,4 +59,4 @@ kubectl apply -k "$SCRIPT_DIR/../tests/assets/"
# https://stackoverflow.com/a/6752280
export KMS_ADDRESS="10.0.2.2.challenger.sslip.io"
PATH=$PATH:$GOPATH/bin ginkgo -v --nodes $GINKGO_NODES --label-filter $LABEL --fail-fast -r ./tests/
go run github.com/onsi/ginkgo/v2/ginkgo -v --nodes $GINKGO_NODES --label-filter $LABEL --fail-fast -r ./tests/

View File

@@ -11,6 +11,7 @@ spec:
- hosts:
- 10.0.2.2.challenger.sslip.io
- ${CLUSTER_IP}.challenger.sslip.io
- discoverable-kms.local
secretName: kms-tls
rules:
- host: 10.0.2.2.challenger.sslip.io
@@ -33,3 +34,13 @@ spec:
name: kcrypt-controller-kcrypt-escrow-server
port:
number: 8082
- host: discoverable-kms.local
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: kcrypt-controller-kcrypt-escrow-server
port:
number: 8082

View File

@@ -19,13 +19,19 @@ import (
var installationOutput string
var vm VM
var mdnsVM VM
var _ = Describe("kcrypt encryption", func() {
var config string
var vmOpts VMOptions
var expectedInstallationSuccess bool
BeforeEach(func() {
expectedInstallationSuccess = true
vmOpts = DefaultVMOptions()
RegisterFailHandler(printInstallationOutput)
_, vm = startVM()
_, vm = startVM(vmOpts)
fmt.Printf("\nvm.StateDir = %+v\n", vm.StateDir)
vm.EventuallyConnects(1200)
@@ -43,10 +49,13 @@ var _ = Describe("kcrypt encryption", func() {
Expect(err).ToNot(HaveOccurred())
installationOutput, err = vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
Expect(err).ToNot(HaveOccurred(), installationOutput)
if expectedInstallationSuccess {
Expect(err).ToNot(HaveOccurred(), installationOutput)
}
})
AfterEach(func() {
vm.GatherLog("/run/immucore/immucore.log")
err := vm.Destroy(func(vm VM) {
// Stop TPM emulator
tpmPID, err := os.ReadFile(path.Join(vm.StateDir, "tpm", "pid"))
@@ -62,6 +71,63 @@ var _ = Describe("kcrypt encryption", func() {
Expect(err).ToNot(HaveOccurred())
})
When("discovering KMS with mdns", Label("discoverable-kms"), func() {
var tpmHash string
var mdnsHostname string
BeforeEach(func() {
By("creating the secret in kubernetes")
tpmHash = createTPMPassphraseSecret(vm)
mdnsHostname = "discoverable-kms.local"
By("deploying simple-mdns-server vm")
mdnsVM = deploySimpleMDNSServer(mdnsHostname)
config = fmt.Sprintf(`#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
users:
- name: kairos
passwd: kairos
install:
encrypted_partitions:
- COS_PERSISTENT
grub_options:
extra_cmdline: "rd.neednet=1"
reboot: false # we will reboot manually
kcrypt:
challenger:
mdns: true
challenger_server: "http://%[1]s"
`, mdnsHostname)
})
AfterEach(func() {
cmd := exec.Command("kubectl", "delete", "sealedvolume", tpmHash)
out, err := cmd.CombinedOutput()
Expect(err).ToNot(HaveOccurred(), out)
err = mdnsVM.Destroy(func(vm VM) {})
Expect(err).ToNot(HaveOccurred())
})
It("discovers the KMS using mdns", func() {
Skip("TODO: make this test work")
By("rebooting")
vm.Reboot()
By("checking that we can connect after installation")
vm.EventuallyConnects(1200)
By("checking if we got an encrypted partition")
out, err := vm.Sudo("blkid")
Expect(err).ToNot(HaveOccurred(), out)
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
})
})
// https://kairos.io/docs/advanced/partition_encryption/#offline-mode
When("doing local encryption", Label("local-encryption"), func() {
BeforeEach(func() {
@@ -91,25 +157,9 @@ users:
//https://kairos.io/docs/advanced/partition_encryption/#online-mode
When("using a remote key management server (automated passphrase generation)", Label("remote-auto"), func() {
var tpmHash string
var err error
BeforeEach(func() {
tpmHash, err = vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
Expect(err).ToNot(HaveOccurred(), tpmHash)
kubectlApplyYaml(fmt.Sprintf(`---
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: "%[1]s"
namespace: default
spec:
TPMHash: "%[1]s"
partitions:
- label: COS_PERSISTENT
quarantined: false
`, strings.TrimSpace(tpmHash)))
tpmHash = createTPMPassphraseSecret(vm)
config = fmt.Sprintf(`#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
@@ -212,10 +262,6 @@ install:
kcrypt:
challenger:
challenger_server: "http://%s"
nv_index: ""
c_index: ""
tpm_device: ""
`, os.Getenv("KMS_ADDRESS"))
})
@@ -242,24 +288,15 @@ kcrypt:
When("the key management server is listening on https", func() {
var tpmHash string
var err error
BeforeEach(func() {
tpmHash, err = vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
Expect(err).ToNot(HaveOccurred(), tpmHash)
tpmHash = createTPMPassphraseSecret(vm)
})
kubectlApplyYaml(fmt.Sprintf(`---
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: "%[1]s"
namespace: default
spec:
TPMHash: "%[1]s"
partitions:
- label: COS_PERSISTENT
quarantined: false
`, strings.TrimSpace(tpmHash)))
AfterEach(func() {
cmd := exec.Command("kubectl", "delete", "sealedvolume", tpmHash)
out, err := cmd.CombinedOutput()
Expect(err).ToNot(HaveOccurred(), out)
})
When("the certificate is pinned on the configuration", Label("remote-https-pinned"), func() {
@@ -299,6 +336,8 @@ install:
When("the no certificate is set in the configuration", Label("remote-https-bad-cert"), func() {
BeforeEach(func() {
expectedInstallationSuccess = false
config = fmt.Sprintf(`#cloud-config
hostname: metal-{{ trunc 4 .MachineID }}
@@ -316,16 +355,13 @@ install:
kcrypt:
challenger:
challenger_server: "https://%s"
nv_index: ""
c_index: ""
tpm_device: ""
`, os.Getenv("KMS_ADDRESS"))
})
It("fails to talk to the server", func() {
out, err := vm.Sudo("cat manual-install.txt")
Expect(err).ToNot(HaveOccurred(), out)
Expect(out).To(MatchRegexp("could not encrypt partition.*x509: certificate signed by unknown authority"))
Expect(out).To(MatchRegexp("failed to verify certificate: x509: certificate signed by unknown authority"))
})
})
})
@@ -362,29 +398,57 @@ func getChallengerServerCert() string {
}
func createConfigWithCert(server, cert string) client.Config {
return client.Config{
Kcrypt: struct {
Challenger struct {
Server string "yaml:\"challenger_server,omitempty\""
NVIndex string "yaml:\"nv_index,omitempty\""
CIndex string "yaml:\"c_index,omitempty\""
TPMDevice string "yaml:\"tpm_device,omitempty\""
Certificate string "yaml:\"certificate,omitempty\""
}
}{
Challenger: struct {
Server string "yaml:\"challenger_server,omitempty\""
NVIndex string "yaml:\"nv_index,omitempty\""
CIndex string "yaml:\"c_index,omitempty\""
TPMDevice string "yaml:\"tpm_device,omitempty\""
Certificate string "yaml:\"certificate,omitempty\""
}{
Server: server,
NVIndex: "",
CIndex: "",
TPMDevice: "",
Certificate: cert,
},
},
}
c := client.Config{}
c.Kcrypt.Challenger.Server = server
c.Kcrypt.Challenger.Certificate = cert
return c
}
func createTPMPassphraseSecret(vm VM) string {
tpmHash, err := vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
Expect(err).ToNot(HaveOccurred(), tpmHash)
kubectlApplyYaml(fmt.Sprintf(`---
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: "%[1]s"
namespace: default
spec:
TPMHash: "%[1]s"
partitions:
- label: COS_PERSISTENT
quarantined: false
`, strings.TrimSpace(tpmHash)))
return tpmHash
}
// We run the simple-mdns-server (https://github.com/kairos-io/simple-mdns-server/)
// inside a VM next to the one we test. The server advertises the KMS as running on 10.0.2.2
// (the host machine). This is a "hack" and is needed because of how the default
// networking in qemu works. We need to be within the same network and that
// network is only available withing another VM.
// https://wiki.qemu.org/Documentation/Networking
func deploySimpleMDNSServer(hostname string) VM {
opts := DefaultVMOptions()
opts.Memory = "2000"
opts.CPUS = "1"
opts.EmulateTPM = false
_, vm := startVM(opts)
vm.EventuallyConnects(1200)
out, err := vm.Sudo(`curl -s https://api.github.com/repos/kairos-io/simple-mdns-server/releases/latest | jq -r .assets[].browser_download_url | grep $(uname -m) | xargs curl -L -o sms.tar.gz`)
Expect(err).ToNot(HaveOccurred(), string(out))
out, err = vm.Sudo("tar xvf sms.tar.gz")
Expect(err).ToNot(HaveOccurred(), string(out))
// Start the simple-mdns-server in the background
out, err = vm.Sudo(fmt.Sprintf(
"/bin/bash -c './simple-mdns-server --port 80 --address 10.0.2.2 --serviceType _kcrypt._tcp --hostName %s &'", hostname))
Expect(err).ToNot(HaveOccurred(), string(out))
return vm
}

View File

@@ -25,6 +25,53 @@ func TestE2e(t *testing.T) {
RunSpecs(t, "kcrypt-challenger e2e test Suite")
}
type VMOptions struct {
ISO string
User string
Password string
Memory string
CPUS string
RunSpicy bool
UseKVM bool
EmulateTPM bool
}
func DefaultVMOptions() VMOptions {
var err error
memory := os.Getenv("MEMORY")
if memory == "" {
memory = "2096"
}
cpus := os.Getenv("CPUS")
if cpus == "" {
cpus = "2"
}
runSpicy := false
if s := os.Getenv("MACHINE_SPICY"); s != "" {
runSpicy, err = strconv.ParseBool(os.Getenv("MACHINE_SPICY"))
Expect(err).ToNot(HaveOccurred())
}
useKVM := false
if envKVM := os.Getenv("KVM"); envKVM != "" {
useKVM, err = strconv.ParseBool(os.Getenv("KVM"))
Expect(err).ToNot(HaveOccurred())
}
return VMOptions{
ISO: os.Getenv("ISO"),
User: user(),
Password: pass(),
Memory: memory,
CPUS: cpus,
RunSpicy: runSpicy,
UseKVM: useKVM,
EmulateTPM: true,
}
}
func user() string {
user := os.Getenv("SSH_USER")
if user == "" {
@@ -42,8 +89,8 @@ func pass() string {
return pass
}
func startVM() (context.Context, VM) {
if os.Getenv("ISO") == "" {
func startVM(vmOpts VMOptions) (context.Context, VM) {
if vmOpts.ISO == "" {
fmt.Println("ISO missing")
os.Exit(1)
}
@@ -53,29 +100,22 @@ func startVM() (context.Context, VM) {
stateDir, err := os.MkdirTemp("", "")
Expect(err).ToNot(HaveOccurred())
emulateTPM(stateDir)
if vmOpts.EmulateTPM {
emulateTPM(stateDir)
}
sshPort, err := getFreePort()
Expect(err).ToNot(HaveOccurred())
memory := os.Getenv("MEMORY")
if memory == "" {
memory = "2096"
}
cpus := os.Getenv("CPUS")
if cpus == "" {
cpus = "2"
}
opts := []types.MachineOption{
types.QEMUEngine,
types.WithISO(os.Getenv("ISO")),
types.WithMemory(memory),
types.WithCPU(cpus),
types.WithISO(vmOpts.ISO),
types.WithMemory(vmOpts.Memory),
types.WithCPU(vmOpts.CPUS),
types.WithSSHPort(strconv.Itoa(sshPort)),
types.WithID(vmName),
types.WithSSHUser(user()),
types.WithSSHPass(pass()),
types.WithSSHUser(vmOpts.User),
types.WithSSHPass(vmOpts.Password),
types.OnFailure(func(p *process.Process) {
defer GinkgoRecover()
@@ -109,9 +149,12 @@ func startVM() (context.Context, VM) {
types.WithStateDir(stateDir),
// Serial output to file: https://superuser.com/a/1412150
func(m *types.MachineConfig) error {
if vmOpts.EmulateTPM {
m.Args = append(m.Args,
"-chardev", fmt.Sprintf("socket,id=chrtpm,path=%s/swtpm-sock", path.Join(stateDir, "tpm")),
"-tpmdev", "emulator,id=tpm0,chardev=chrtpm", "-device", "tpm-tis,tpmdev=tpm0")
}
m.Args = append(m.Args,
"-chardev", fmt.Sprintf("socket,id=chrtpm,path=%s/swtpm-sock", path.Join(stateDir, "tpm")),
"-tpmdev", "emulator,id=tpm0,chardev=chrtpm", "-device", "tpm-tis,tpmdev=tpm0",
"-chardev", fmt.Sprintf("stdio,mux=on,id=char0,logfile=%s,signal=off", path.Join(stateDir, "serial.log")),
"-serial", "chardev:char0",
"-mon", "chardev=char0",
@@ -123,14 +166,14 @@ func startVM() (context.Context, VM) {
// Set this to true to debug.
// You can connect to it with "spicy" or other tool.
var spicePort int
if os.Getenv("MACHINE_SPICY") != "" {
if vmOpts.RunSpicy {
spicePort, err = getFreePort()
Expect(err).ToNot(HaveOccurred())
fmt.Printf("Spice port = %d\n", spicePort)
opts = append(opts, types.WithDisplay(fmt.Sprintf("-spice port=%d,addr=127.0.0.1,disable-ticketing", spicePort)))
}
if os.Getenv("KVM") != "" {
if vmOpts.UseKVM {
opts = append(opts, func(m *types.MachineConfig) error {
m.Args = append(m.Args,
"-enable-kvm",
@@ -147,7 +190,7 @@ func startVM() (context.Context, VM) {
ctx, err := vm.Start(context.Background())
Expect(err).ToNot(HaveOccurred())
if os.Getenv("MACHINE_SPICY") != "" {
if vmOpts.RunSpicy {
cmd := exec.Command("spicy",
"-h", "127.0.0.1",
"-p", strconv.Itoa(spicePort))