mirror of
https://github.com/kairos-io/kcrypt-challenger.git
synced 2025-09-03 07:44:15 +00:00
Compare commits
118 Commits
test_itxak
...
v0.11.2
Author | SHA1 | Date | |
---|---|---|---|
|
2aba011ada | ||
|
c1a92786b2 | ||
|
a8e35a94f5 | ||
|
5089f4cc71 | ||
|
a925d877bc | ||
|
a21fb32bc0 | ||
|
f8ef34077d | ||
|
4c2891e33b | ||
|
3bf6a1e612 | ||
|
3e519be076 | ||
|
f8045707ff | ||
|
8194344115 | ||
|
c92402b6c9 | ||
|
8f9d463bc0 | ||
|
33ba761d42 | ||
|
65108068e8 | ||
|
8314c64169 | ||
|
52dfdf3420 | ||
|
09a6ec31ec | ||
|
a33d7872c5 | ||
|
2f0d6d778a | ||
|
2d15026331 | ||
|
0fa24f7679 | ||
|
865c2fc795 | ||
|
42fca7593a | ||
|
4e87807d1f | ||
|
e984eed1c1 | ||
|
4e33127982 | ||
|
6a180b7cde | ||
|
6e2211e4d6 | ||
|
97dcf030cb | ||
|
93596bd189 | ||
|
012329e54b | ||
|
57e911e62a | ||
|
401e3f9735 | ||
|
91edb4eb57 | ||
|
9bdc42fbba | ||
|
d6b79752a3 | ||
|
63795470b1 | ||
|
09e155828c | ||
|
f3ade81dd3 | ||
|
07ce451b60 | ||
|
978d0aa3be | ||
|
615d2013b7 | ||
|
6b8245dc61 | ||
|
df29a61b8b | ||
|
23e4a1dd55 | ||
|
42709484ac | ||
|
97f92cc809 | ||
|
09a93ff001 | ||
|
02b5389fc6 | ||
|
f970ef1899 | ||
|
429b86ea09 | ||
|
5bfbac6892 | ||
|
d9e658b202 | ||
|
204ce64465 | ||
|
17d1414b14 | ||
|
b6c5d331fb | ||
|
a2b28af7b2 | ||
|
dd187adf3a | ||
|
196bcf8500 | ||
|
50441f8e4c | ||
|
a5e73df6e6 | ||
|
904ce9a1b8 | ||
|
2039b57421 | ||
|
c4dcabcabb | ||
|
f757f852dd | ||
|
ef14cef5c4 | ||
|
7205723259 | ||
|
202668005f | ||
|
b572776381 | ||
|
f189719055 | ||
|
5bbc4fd0fb | ||
|
2f2f577db7 | ||
|
d64cab6a7d | ||
|
050d1832dd | ||
|
06552b7777 | ||
|
b8ff5f31dc | ||
|
2f582b3a83 | ||
|
521363de93 | ||
|
7a805f374b | ||
|
06d3d6b1c1 | ||
|
6a337e5812 | ||
|
a4c5c84719 | ||
|
a410398adb | ||
|
a59b3019ed | ||
|
e0138fe609 | ||
|
fe5d338ed5 | ||
|
d708fcfa26 | ||
|
2e63d50125 | ||
|
d4e8b2adc2 | ||
|
10dcecdc85 | ||
|
3c4663afa5 | ||
|
95a352f4b4 | ||
|
fbfd7c9f07 | ||
|
7d84c01663 | ||
|
311b8adda0 | ||
|
bf59ecd475 | ||
|
71e90b94aa | ||
|
3d2d2de9dc | ||
|
c42e66a9de | ||
|
da93e626c5 | ||
|
ecbbe1499e | ||
|
09981d750e | ||
|
b5b4d0d042 | ||
|
8420155746 | ||
|
a9359bf713 | ||
|
b31467e925 | ||
|
c5dc8db56b | ||
|
a80703a556 | ||
|
0b6f771d32 | ||
|
72dd7d3e50 | ||
|
0619047a20 | ||
|
715664969a | ||
|
bcda5b5b38 | ||
|
b2a0330dd8 | ||
|
0b68d90081 | ||
|
40267d4c24 |
1
.earthlyignore
Normal file
1
.earthlyignore
Normal file
@@ -0,0 +1 @@
|
||||
bin/
|
12
.github/ISSUE_TEMPLATE/file-issues-on-main-kairos-repo.md
vendored
Normal file
12
.github/ISSUE_TEMPLATE/file-issues-on-main-kairos-repo.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
name: File issues on main Kairos repo
|
||||
about: Tell users to file their issues on the main Kairos repo
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
:warning: All Kairos issues are tracked in our main repo, please file your issue there, thanks! :warning:
|
||||
|
||||
https://github.com/kairos-io/kairos/issues
|
42
.github/workflows/dependabot_auto.yml
vendored
Normal file
42
.github/workflows/dependabot_auto.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: Dependabot auto-merge
|
||||
on:
|
||||
- pull_request_target
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
packages: read
|
||||
|
||||
jobs:
|
||||
dependabot:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v2.3.0
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
skip-commit-verification: true
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Approve a PR if not already approved
|
||||
run: |
|
||||
gh pr checkout "$PR_URL"
|
||||
if [ "$(gh pr status --json reviewDecision -q .currentBranch.reviewDecision)" != "APPROVED" ];
|
||||
then
|
||||
gh pr review --approve "$PR_URL"
|
||||
else
|
||||
echo "PR already approved.";
|
||||
fi
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Enable auto-merge for Dependabot PRs
|
||||
run: gh pr merge --auto --squash "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
116
.github/workflows/e2e-tests.yml
vendored
116
.github/workflows/e2e-tests.yml
vendored
@@ -9,54 +9,29 @@ on:
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
|
||||
concurrency:
|
||||
group: ci-e2e-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e-tests:
|
||||
runs-on: self-hosted
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- label: "local-encryption"
|
||||
- label: "remote-auto"
|
||||
- label: "remote-static"
|
||||
- label: "remote-https-pinned"
|
||||
- label: "remote-https-bad-cert"
|
||||
build-iso:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
- name: Install earthly
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
go-version: ^1.20
|
||||
- name: Run tests
|
||||
env:
|
||||
LABEL: ${{ matrix.label }}
|
||||
KVM: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: build iso
|
||||
run: |
|
||||
sudo apt update && \
|
||||
sudo apt install -y git qemu-system-x86 qemu-utils swtpm jq make glibc-tools \
|
||||
openssl curl gettext ca-certificates curl gnupg lsb-release
|
||||
|
||||
curl -L https://github.com/mudler/luet/releases/download/0.33.0/luet-0.33.0-linux-amd64 -o luet
|
||||
chmod +x luet
|
||||
sudo mv luet /usr/bin/luet
|
||||
sudo mkdir -p /etc/luet/repos.conf.d/
|
||||
sudo luet repo add -y kairos --url quay.io/kairos/packages --type docker
|
||||
LUET_NOLOCK=true sudo -E luet install -y container/kubectl utils/k3d utils/earthly
|
||||
|
||||
earthly -P +iso
|
||||
export ISO=$PWD/build/challenger.iso
|
||||
|
||||
go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
|
||||
go get github.com/onsi/gomega/...
|
||||
go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.7.1
|
||||
go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.7.1
|
||||
go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.7.1
|
||||
|
||||
# Configure earthly to use the docker mirror in CI
|
||||
# https://docs.earthly.dev/ci-integration/pull-through-cache#configuring-earthly-to-use-the-cache
|
||||
mkdir -p ~/.earthly/
|
||||
cat << EOF > ~/.earthly/config.yml
|
||||
global:
|
||||
buildkit_additional_config: |
|
||||
@@ -66,5 +41,70 @@ jobs:
|
||||
insecure = true
|
||||
EOF
|
||||
|
||||
earthly -P +iso
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: challenger.iso.zip
|
||||
path: |
|
||||
build/*.iso
|
||||
e2e-tests:
|
||||
needs:
|
||||
- build-iso
|
||||
runs-on: kvm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- label: "local-encryption"
|
||||
- label: "remote-auto"
|
||||
- label: "remote-static"
|
||||
- label: "remote-https-pinned"
|
||||
- label: "remote-https-bad-cert"
|
||||
- label: "discoverable-kms"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Install earthly
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install deps
|
||||
run: |
|
||||
curl -L https://github.com/mudler/luet/releases/download/0.33.0/luet-0.33.0-linux-amd64 -o luet
|
||||
chmod +x luet
|
||||
sudo mv luet /usr/bin/luet
|
||||
sudo mkdir -p /etc/luet/repos.conf.d/
|
||||
sudo luet repo add -y kairos --url quay.io/kairos/packages --type docker
|
||||
LUET_NOLOCK=true sudo -E luet install -y container/kubectl utils/k3d
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: challenger.iso.zip
|
||||
- name: Run tests
|
||||
env:
|
||||
LABEL: ${{ matrix.label }}
|
||||
KVM: true
|
||||
run: |
|
||||
sudo apt update && \
|
||||
sudo apt install -y git qemu-system-x86 qemu-utils swtpm jq make glibc-tools \
|
||||
openssl curl gettext ca-certificates curl gnupg lsb-release
|
||||
|
||||
export ISO=$PWD/$(ls *.iso)
|
||||
# update controllers
|
||||
make test
|
||||
# Generate controller image
|
||||
make docker-build
|
||||
# We run with sudo to be able to access /dev/kvm
|
||||
sudo -E ./scripts/e2e-tests.sh
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: ${{ matrix.label }}-test.logs.zip
|
||||
path: tests/**/logs/*
|
||||
if-no-files-found: warn
|
||||
|
12
.github/workflows/image.yml
vendored
12
.github/workflows/image.yml
vendored
@@ -1,4 +1,3 @@
|
||||
---
|
||||
name: 'build container images'
|
||||
|
||||
on:
|
||||
@@ -8,12 +7,17 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ci-image-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare
|
||||
id: prep
|
||||
@@ -46,14 +50,14 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_PASSWORD }}
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
|
17
.github/workflows/lint.yml
vendored
17
.github/workflows/lint.yml
vendored
@@ -6,6 +6,12 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**'
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ci-lint-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
jobs:
|
||||
@@ -13,18 +19,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ^1.20
|
||||
uses: actions/setup-go@v5
|
||||
- name: Install earthly
|
||||
uses: Luet-lab/luet-install-action@v1
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
repository: quay.io/kairos/packages
|
||||
packages: utils/earthly
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Run Lint checks
|
||||
run: |
|
||||
earthly +lint
|
||||
|
21
.github/workflows/osv-scanner-pr.yaml
vendored
Normal file
21
.github/workflows/osv-scanner-pr.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: OSV-Scanner PR Scan
|
||||
|
||||
# Change "main" to your default branch if you use a different name, i.e. "master"
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
merge_group:
|
||||
branches: [main]
|
||||
|
||||
permissions:
|
||||
# Require writing security events to upload SARIF file to security tab
|
||||
security-events: write
|
||||
# Only need to read contents adn actions
|
||||
contents: read
|
||||
actions: read
|
||||
|
||||
jobs:
|
||||
scan-pr:
|
||||
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v1.9.2"
|
27
.github/workflows/release.yaml
vendored
Normal file
27
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: |
|
||||
git fetch --prune --unshallow
|
||||
- name: Install gcc for arm64
|
||||
run: sudo apt-get update && sudo apt-get install -y build-essential crossbuild-essential-arm64
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
35
.github/workflows/renovate_auto.yml
vendored
Normal file
35
.github/workflows/renovate_auto.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Renovate auto-merge
|
||||
on:
|
||||
- pull_request_target
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
packages: read
|
||||
|
||||
jobs:
|
||||
dependabot:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'renovate[bot]' }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Approve a PR if not already approved
|
||||
run: |
|
||||
gh pr checkout "$PR_URL"
|
||||
if [ "$(gh pr status --json reviewDecision -q .currentBranch.reviewDecision)" != "APPROVED" ];
|
||||
then
|
||||
gh pr review --approve "$PR_URL"
|
||||
else
|
||||
echo "PR already approved.";
|
||||
fi
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Enable auto-merge for Renovate PRs
|
||||
run: gh pr merge --auto --squash "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
32
.github/workflows/secscan.yaml
vendored
Normal file
32
.github/workflows/secscan.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: "Security Scan"
|
||||
|
||||
# Run workflow each time code is pushed to your repository and on a schedule.
|
||||
# The scheduled workflow runs every at 00:00 on Sunday UTC time.
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- '**'
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v4
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: securego/gosec@master
|
||||
with:
|
||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
24
.github/workflows/unit-tests.yml
vendored
24
.github/workflows/unit-tests.yml
vendored
@@ -1,19 +1,35 @@
|
||||
---
|
||||
name: Unit tests
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
concurrency:
|
||||
group: ci-unit-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
unit-tests:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ["1.24-bookworm"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install earthly
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Run tests
|
||||
run: |
|
||||
./earthly.sh +test
|
||||
earthly +test --GO_VERSION=${{ matrix.go-version }}
|
||||
- name: Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
file: ./coverage.out
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -24,3 +24,5 @@ testbin/*
|
||||
*~
|
||||
|
||||
/helm-chart
|
||||
build/
|
||||
dist/
|
73
.goreleaser.yaml
Normal file
73
.goreleaser.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
version: 2
|
||||
project_name: kcrypt-discovery-challenger
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- CGO_LDFLAGS="-ldl"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
binary: '{{ .ProjectName }}'
|
||||
id: default
|
||||
main: ./cmd/discovery/main.go
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- GOEXPERIMENT=boringcrypto
|
||||
- CGO_LDFLAGS="-ldl"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
binary: '{{ .ProjectName }}'
|
||||
id: fips-amd64
|
||||
main: ./cmd/discovery/main.go
|
||||
hooks:
|
||||
post:
|
||||
- bash -c 'set -e; go version {{.Path}} | grep boringcrypto || (echo "boringcrypto not found" && exit 1)'
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- GOEXPERIMENT=boringcrypto
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
- CGO_LDFLAGS="-ldl"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
binary: '{{ .ProjectName }}'
|
||||
id: fips-arm64
|
||||
main: ./cmd/discovery/main.go
|
||||
hooks:
|
||||
post:
|
||||
- bash -c 'set -e; go version {{.Path}} | grep boringcrypto || (echo "boringcrypto not found" && exit 1)'
|
||||
source:
|
||||
enabled: true
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
|
||||
archives:
|
||||
- id: default-archive
|
||||
ids:
|
||||
- default
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}-{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
|
||||
- id: fips-archive
|
||||
ids:
|
||||
- fips-arm64
|
||||
- fips-amd64
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}-{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}-fips'
|
||||
checksum:
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-checksums.txt'
|
||||
snapshot:
|
||||
version_template: "{{ .Tag }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- '^Merge pull request'
|
||||
env:
|
||||
- GOSUMDB=sum.golang.org
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.20 as builder
|
||||
FROM golang:1.24 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
@@ -16,7 +16,7 @@ COPY pkg/ pkg/
|
||||
COPY controllers/ controllers/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
|
||||
RUN CGO_ENABLED=0 go build -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
|
90
Earthfile
90
Earthfile
@@ -1,80 +1,74 @@
|
||||
VERSION 0.6
|
||||
ARG BASE_IMAGE=quay.io/kairos/core-ubuntu:latest
|
||||
|
||||
# renovate: datasource=github-releases depName=kairos-io/kairos
|
||||
ARG KAIROS_VERSION="v2.5.0"
|
||||
ARG BASE_IMAGE=quay.io/kairos/ubuntu:23.10-core-amd64-generic-$KAIROS_VERSION
|
||||
|
||||
ARG OSBUILDER_IMAGE=quay.io/kairos/osbuilder-tools
|
||||
# renovate: datasource=docker depName=golang
|
||||
ARG GO_VERSION=1.20
|
||||
ARG GO_VERSION=1.24-bookworm
|
||||
ARG LUET_VERSION=0.33.0
|
||||
|
||||
build-challenger:
|
||||
FROM golang:alpine
|
||||
FROM +go-deps
|
||||
COPY . /work
|
||||
WORKDIR /work
|
||||
RUN CGO_ENABLED=0 go build -o kcrypt-discovery-challenger ./cmd/discovery
|
||||
SAVE ARTIFACT /work/kcrypt-discovery-challenger AS LOCAL kcrypt-discovery-challenger
|
||||
SAVE ARTIFACT /work/kcrypt-discovery-challenger kcrypt-discovery-challenger AS LOCAL kcrypt-discovery-challenger
|
||||
|
||||
image:
|
||||
FROM github.com/Itxaka/kairos:drop_kcrypt_dracut+image
|
||||
FROM $BASE_IMAGE
|
||||
ARG IMAGE
|
||||
RUN cat /etc/os-release
|
||||
COPY +build-challenger/kcrypt-discovery-challenger /system/discovery/kcrypt-discovery-challenger
|
||||
SAVE IMAGE $IMAGE
|
||||
|
||||
image-rootfs:
|
||||
FROM +image
|
||||
SAVE ARTIFACT --keep-own /. rootfs
|
||||
|
||||
grub-files:
|
||||
FROM alpine
|
||||
RUN apk add wget
|
||||
RUN wget https://raw.githubusercontent.com/c3os-io/c3os/master/overlay/files-iso/boot/grub2/grub.cfg -O grub.cfg
|
||||
SAVE ARTIFACT --keep-own grub.cfg grub.cfg
|
||||
FROM +image
|
||||
SAVE ARTIFACT --keep-own /. rootfs
|
||||
|
||||
iso:
|
||||
ARG OSBUILDER_IMAGE
|
||||
ARG ISO_NAME=challenger
|
||||
FROM $OSBUILDER_IMAGE
|
||||
WORKDIR /build
|
||||
COPY --keep-own +grub-files/grub.cfg /build/files-iso/boot/grub2/grub.cfg
|
||||
COPY --keep-own +image-rootfs/rootfs /build/rootfs
|
||||
RUN /entrypoint.sh --name $ISO_NAME --debug build-iso --squash-no-compression --date=false --local --overlay-iso /build/files-iso --output /build/ dir:/build/rootfs
|
||||
RUN /entrypoint.sh --name $ISO_NAME --debug build-iso --squash-no-compression --date=false --output /build/ dir:/build/rootfs
|
||||
SAVE ARTIFACT /build/$ISO_NAME.iso kairos.iso AS LOCAL build/$ISO_NAME.iso
|
||||
SAVE ARTIFACT /build/$ISO_NAME.iso.sha256 kairos.iso.sha256 AS LOCAL build/$ISO_NAME.iso.sha256
|
||||
|
||||
test:
|
||||
go-deps:
|
||||
ARG GO_VERSION
|
||||
FROM golang:$GO_VERSION
|
||||
ENV CGO_ENABLED=0
|
||||
WORKDIR /build
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
RUN go mod verify
|
||||
SAVE ARTIFACT go.mod AS LOCAL go.mod
|
||||
SAVE ARTIFACT go.sum AS LOCAL go.sum
|
||||
|
||||
test:
|
||||
FROM +go-deps
|
||||
ENV CGO_ENABLED=0
|
||||
WORKDIR /work
|
||||
|
||||
# Cache layer for modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download && go mod verify
|
||||
|
||||
RUN go get github.com/onsi/gomega/...
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.1.4
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.1.4
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.1.4
|
||||
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
COPY . /work
|
||||
RUN PATH=$PATH:$GOPATH/bin ginkgo run --covermode=atomic --coverprofile=coverage.out -p -r pkg/challenger cmd/discovery/client
|
||||
COPY . .
|
||||
RUN go run github.com/onsi/ginkgo/v2/ginkgo run --covermode=atomic --coverprofile=coverage.out -p -r pkg/challenger cmd/discovery/client
|
||||
SAVE ARTIFACT coverage.out AS LOCAL coverage.out
|
||||
|
||||
# Generic targets
|
||||
# usage e.g. ./earthly.sh +datasource-iso --CLOUD_CONFIG=tests/assets/qrcode.yaml
|
||||
datasource-iso:
|
||||
ARG OSBUILDER_IMAGE
|
||||
ARG CLOUD_CONFIG
|
||||
FROM $OSBUILDER_IMAGE
|
||||
RUN zypper in -y mkisofs
|
||||
WORKDIR /build
|
||||
RUN touch meta-data
|
||||
ARG OSBUILDER_IMAGE
|
||||
ARG CLOUD_CONFIG
|
||||
FROM $OSBUILDER_IMAGE
|
||||
RUN zypper in -y mkisofs
|
||||
WORKDIR /build
|
||||
RUN touch meta-data
|
||||
|
||||
COPY ${CLOUD_CONFIG} user-data
|
||||
RUN cat user-data
|
||||
RUN mkisofs -output ci.iso -volid cidata -joliet -rock user-data meta-data
|
||||
SAVE ARTIFACT /build/ci.iso iso.iso AS LOCAL build/datasource.iso
|
||||
COPY ${CLOUD_CONFIG} user-data
|
||||
RUN cat user-data
|
||||
RUN mkisofs -output ci.iso -volid cidata -joliet -rock user-data meta-data
|
||||
SAVE ARTIFACT /build/ci.iso iso.iso AS LOCAL build/datasource.iso
|
||||
|
||||
luet:
|
||||
FROM quay.io/luet/base:$LUET_VERSION
|
||||
@@ -82,18 +76,12 @@ luet:
|
||||
|
||||
e2e-tests-image:
|
||||
FROM opensuse/tumbleweed
|
||||
RUN zypper in -y go git qemu-x86 qemu-arm qemu-tools swtpm docker jq docker-compose make glibc libopenssl-devel curl gettext-runtime
|
||||
RUN zypper in -y go1.23 git qemu-x86 qemu-arm qemu-tools swtpm docker jq docker-compose make glibc libopenssl-devel curl gettext-runtime awk envsubst
|
||||
ENV GOPATH="/go"
|
||||
|
||||
COPY . /test
|
||||
WORKDIR /test
|
||||
|
||||
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
|
||||
RUN go get github.com/onsi/gomega/...
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.7.1
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.7.1
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.7.1
|
||||
|
||||
IF [ -e /test/build/kairos.iso ]
|
||||
ENV ISO=/test/build/kairos.iso
|
||||
ELSE
|
||||
@@ -106,11 +94,15 @@ e2e-tests-image:
|
||||
RUN luet repo add -y kairos --url quay.io/kairos/packages --type docker
|
||||
RUN LUET_NOLOCK=true luet install -y container/kubectl utils/k3d
|
||||
|
||||
controller-latest:
|
||||
FROM DOCKERFILE .
|
||||
SAVE IMAGE controller:latest
|
||||
|
||||
e2e-tests:
|
||||
FROM +e2e-tests-image
|
||||
ARG LABEL
|
||||
|
||||
WITH DOCKER --allow-privileged
|
||||
RUN make test # This also generates the latest controllers automatically, we do that before building the docker image with them
|
||||
WITH DOCKER --allow-privileged --load controller:latest=+controller-latest
|
||||
RUN ./scripts/e2e-tests.sh
|
||||
END
|
||||
|
||||
|
2
Makefile
2
Makefile
@@ -160,7 +160,7 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v3.8.7
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.9.2
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.14.0
|
||||
|
||||
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
|
||||
.PHONY: kustomize
|
||||
|
@@ -16,6 +16,9 @@ import (
|
||||
"github.com/mudler/yip/pkg/utils"
|
||||
)
|
||||
|
||||
// Because of how go-pluggable works, we can't just print to stdout
|
||||
const LOGFILE = "/tmp/kcrypt-challenger-client.log"
|
||||
|
||||
var errPartNotFound error = fmt.Errorf("pass for partition not found")
|
||||
var errBadCertificate error = fmt.Errorf("unknown certificate")
|
||||
|
||||
@@ -30,6 +33,10 @@ func NewClient() (*Client, error) {
|
||||
|
||||
// ❯ echo '{ "data": "{ \\"label\\": \\"LABEL\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
|
||||
func (c *Client) Start() error {
|
||||
if err := os.RemoveAll(LOGFILE); err != nil { // Start fresh
|
||||
return fmt.Errorf("removing the logfile: %w", err)
|
||||
}
|
||||
|
||||
factory := pluggable.NewPluginFactory()
|
||||
|
||||
// Input: bus.EventInstallPayload
|
||||
@@ -59,7 +66,7 @@ func (c *Client) Start() error {
|
||||
return factory.Run(pluggable.EventType(os.Args[1]), os.Stdin, os.Stdout)
|
||||
}
|
||||
|
||||
func (c *Client) generatePass(postEndpoint string, p *block.Partition) error {
|
||||
func (c *Client) generatePass(postEndpoint string, headers map[string]string, p *block.Partition) error {
|
||||
|
||||
rand := utils.RandomString(32)
|
||||
pass, err := tpm.EncryptBlob([]byte(rand))
|
||||
@@ -71,10 +78,14 @@ func (c *Client) generatePass(postEndpoint string, p *block.Partition) error {
|
||||
opts := []tpm.Option{
|
||||
tpm.WithCAs([]byte(c.Config.Kcrypt.Challenger.Certificate)),
|
||||
tpm.AppendCustomCAToSystemCA,
|
||||
tpm.WithAdditionalHeader("label", p.Label),
|
||||
tpm.WithAdditionalHeader("label", p.FilesystemLabel),
|
||||
tpm.WithAdditionalHeader("name", p.Name),
|
||||
tpm.WithAdditionalHeader("uuid", p.UUID),
|
||||
}
|
||||
for k, v := range headers {
|
||||
opts = append(opts, tpm.WithAdditionalHeader(k, v))
|
||||
}
|
||||
|
||||
conn, err := tpm.Connection(postEndpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -84,20 +95,27 @@ func (c *Client) generatePass(postEndpoint string, p *block.Partition) error {
|
||||
}
|
||||
|
||||
func (c *Client) waitPass(p *block.Partition, attempts int) (pass string, err error) {
|
||||
// IF we don't have any server configured, just do local
|
||||
if c.Config.Kcrypt.Challenger.Server == "" {
|
||||
additionalHeaders := map[string]string{}
|
||||
serverURL := c.Config.Kcrypt.Challenger.Server
|
||||
|
||||
// If we don't have any server configured, just do local
|
||||
if serverURL == "" {
|
||||
return localPass(c.Config)
|
||||
}
|
||||
|
||||
challengeEndpoint := fmt.Sprintf("%s/getPass", c.Config.Kcrypt.Challenger.Server)
|
||||
postEndpoint := fmt.Sprintf("%s/postPass", c.Config.Kcrypt.Challenger.Server)
|
||||
if c.Config.Kcrypt.Challenger.MDNS {
|
||||
serverURL, additionalHeaders, err = queryMDNS(serverURL)
|
||||
}
|
||||
|
||||
getEndpoint := fmt.Sprintf("%s/getPass", serverURL)
|
||||
postEndpoint := fmt.Sprintf("%s/postPass", serverURL)
|
||||
|
||||
for tries := 0; tries < attempts; tries++ {
|
||||
var generated bool
|
||||
pass, generated, err = getPass(challengeEndpoint, c.Config.Kcrypt.Challenger.Certificate, p)
|
||||
pass, generated, err = getPass(getEndpoint, additionalHeaders, c.Config.Kcrypt.Challenger.Certificate, p)
|
||||
if err == errPartNotFound {
|
||||
// IF server doesn't have a pass for us, then we generate one and we set it
|
||||
err = c.generatePass(postEndpoint, p)
|
||||
err = c.generatePass(postEndpoint, additionalHeaders, p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -118,7 +136,7 @@ func (c *Client) waitPass(p *block.Partition, attempts int) (pass string, err er
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Failed with error: %s . Will retry.\n", err.Error())
|
||||
logToFile("Failed with error: %s . Will retry.\n", err.Error())
|
||||
time.Sleep(1 * time.Second) // network errors? retry
|
||||
}
|
||||
|
||||
@@ -145,3 +163,14 @@ func (c *Client) decryptPassphrase(pass string) (string, error) {
|
||||
|
||||
return string(passBytes), err
|
||||
}
|
||||
|
||||
func logToFile(format string, a ...any) {
|
||||
s := fmt.Sprintf(format, a...)
|
||||
file, err := os.OpenFile(LOGFILE, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString(s)
|
||||
}
|
||||
|
@@ -1,8 +1,9 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/kairos-io/kairos/pkg/config"
|
||||
"github.com/kairos-io/kairos-sdk/collector"
|
||||
kconfig "github.com/kairos-io/kcrypt/pkg/config"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
@@ -12,11 +13,10 @@ type Client struct {
|
||||
type Config struct {
|
||||
Kcrypt struct {
|
||||
Challenger struct {
|
||||
Server string `yaml:"challenger_server,omitempty"`
|
||||
// Non-volatile index memory: where we store the encrypted passphrase (offline mode)
|
||||
NVIndex string `yaml:"nv_index,omitempty"`
|
||||
// Certificate index: this is where the rsa pair that decrypts the passphrase lives
|
||||
CIndex string `yaml:"c_index,omitempty"`
|
||||
MDNS bool `yaml:"mdns,omitempty"`
|
||||
Server string `yaml:"challenger_server,omitempty"`
|
||||
NVIndex string `yaml:"nv_index,omitempty"` // Non-volatile index memory: where we store the encrypted passphrase (offline mode)
|
||||
CIndex string `yaml:"c_index,omitempty"` // Certificate index: this is where the rsa pair that decrypts the passphrase lives
|
||||
TPMDevice string `yaml:"tpm_device,omitempty"`
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
}
|
||||
@@ -26,12 +26,21 @@ type Config struct {
|
||||
func unmarshalConfig() (Config, error) {
|
||||
var result Config
|
||||
|
||||
c, err := config.Scan(config.Directories(kconfig.ConfigScanDirs...), config.NoLogs)
|
||||
o := &collector.Options{NoLogs: true, MergeBootCMDLine: false}
|
||||
if err := o.Apply(collector.Directories(append(kconfig.ConfigScanDirs, "/tmp/oem")...)); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
c, err := collector.Scan(o, func(d []byte) ([]byte, error) {
|
||||
return d, nil
|
||||
})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
if err = c.Unmarshal(&result); err != nil {
|
||||
a, _ := c.String()
|
||||
err = yaml.Unmarshal([]byte(a), &result)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
|
@@ -16,13 +16,19 @@ import (
|
||||
|
||||
const DefaultNVIndex = "0x1500000"
|
||||
|
||||
func getPass(server, certificate string, partition *block.Partition) (string, bool, error) {
|
||||
msg, err := tpm.Get(server,
|
||||
func getPass(server string, headers map[string]string, certificate string, partition *block.Partition) (string, bool, error) {
|
||||
opts := []tpm.Option{
|
||||
tpm.WithCAs([]byte(certificate)),
|
||||
tpm.AppendCustomCAToSystemCA,
|
||||
tpm.WithAdditionalHeader("label", partition.Label),
|
||||
tpm.WithAdditionalHeader("label", partition.FilesystemLabel),
|
||||
tpm.WithAdditionalHeader("name", partition.Name),
|
||||
tpm.WithAdditionalHeader("uuid", partition.UUID))
|
||||
tpm.WithAdditionalHeader("uuid", partition.UUID),
|
||||
}
|
||||
for k, v := range headers {
|
||||
opts = append(opts, tpm.WithAdditionalHeader(k, v))
|
||||
}
|
||||
|
||||
msg, err := tpm.Get(server, opts...)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
@@ -41,7 +47,7 @@ func getPass(server, certificate string, partition *block.Partition) (string, bo
|
||||
if strings.Contains(result.Error, "x509: certificate signed by unknown authority") {
|
||||
return "", false, errBadCertificate
|
||||
}
|
||||
return "", false, fmt.Errorf(result.Error)
|
||||
return "", false, errors.New(result.Error)
|
||||
}
|
||||
|
||||
return "", false, errPartNotFound
|
||||
|
85
cmd/discovery/client/mdns.go
Normal file
85
cmd/discovery/client/mdns.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/mdns"
|
||||
)
|
||||
|
||||
const (
|
||||
MDNSServiceType = "_kcrypt._tcp"
|
||||
MDNSTimeout = 15 * time.Second
|
||||
)
|
||||
|
||||
// queryMDNS will make an mdns query on local network to find a kcrypt challenger server
|
||||
// instance. If none is found, the original URL is returned and no additional headers.
|
||||
// If a response is received, the IP address and port from the response will be returned// and an additional "Host" header pointing to the original host.
|
||||
func queryMDNS(originalURL string) (string, map[string]string, error) {
|
||||
additionalHeaders := map[string]string{}
|
||||
var err error
|
||||
|
||||
parsedURL, err := url.Parse(originalURL)
|
||||
if err != nil {
|
||||
return originalURL, additionalHeaders, fmt.Errorf("parsing the original host: %w", err)
|
||||
}
|
||||
|
||||
host := parsedURL.Host
|
||||
if !strings.HasSuffix(host, ".local") { // sanity check
|
||||
return "", additionalHeaders, fmt.Errorf("domain should end in \".local\" when using mdns")
|
||||
}
|
||||
|
||||
mdnsIP, mdnsPort := discoverMDNSServer(host)
|
||||
if mdnsIP == "" { // no reply
|
||||
logToFile("no reply from mdns\n")
|
||||
return originalURL, additionalHeaders, nil
|
||||
}
|
||||
|
||||
additionalHeaders["Host"] = parsedURL.Host
|
||||
newURL := strings.ReplaceAll(originalURL, host, mdnsIP)
|
||||
// Remove any port in the original url
|
||||
if port := parsedURL.Port(); port != "" {
|
||||
newURL = strings.ReplaceAll(newURL, port, "")
|
||||
}
|
||||
|
||||
// Add any possible port from the mdns response
|
||||
if mdnsPort != "" {
|
||||
newURL = strings.ReplaceAll(newURL, mdnsIP, fmt.Sprintf("%s:%s", mdnsIP, mdnsPort))
|
||||
}
|
||||
|
||||
return newURL, additionalHeaders, nil
|
||||
}
|
||||
|
||||
// discoverMDNSServer performs an mDNS query to discover any running kcrypt challenger
|
||||
// servers on the same network that matches the given hostname.
|
||||
// If a response if received, the IP address and the Port from the response are returned.
|
||||
func discoverMDNSServer(hostname string) (string, string) {
|
||||
// Make a channel for results and start listening
|
||||
entriesCh := make(chan *mdns.ServiceEntry, 4)
|
||||
defer close(entriesCh)
|
||||
|
||||
logToFile("Will now wait for some mdns server to respond\n")
|
||||
// Start the lookup. It will block until we read from the chan.
|
||||
mdns.Lookup(MDNSServiceType, entriesCh)
|
||||
|
||||
expectedHost := hostname + "." // FQDN
|
||||
// Wait until a matching server is found or we reach a timeout
|
||||
for {
|
||||
select {
|
||||
case entry := <-entriesCh:
|
||||
logToFile("mdns response received\n")
|
||||
if entry.Host == expectedHost {
|
||||
logToFile("%s matches %s\n", entry.Host, expectedHost)
|
||||
return entry.AddrV4.String(), strconv.Itoa(entry.Port) // TODO: v6?
|
||||
} else {
|
||||
logToFile("%s didn't match %s\n", entry.Host, expectedHost)
|
||||
}
|
||||
case <-time.After(MDNSTimeout):
|
||||
logToFile("timed out waiting for mdns\n")
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
}
|
@@ -20,14 +20,13 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
@@ -44,10 +43,7 @@ var testEnv *envtest.Environment
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"Controller Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
RunSpecs(t, "Control")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
|
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker run --privileged -v /var/run/docker.sock:/var/run/docker.sock --rm -t -v $(pwd):/workspace -v earthly-tmp:/tmp/earthly:rw earthly/earthly:v0.6.21 --allow-privileged $@
|
||||
docker run --privileged -v /var/run/docker.sock:/var/run/docker.sock --rm -t -v $(pwd):/workspace -v earthly-tmp:/tmp/earthly:rw earthly/earthly:v0.8.15 --allow-privileged $@
|
214
go.mod
214
go.mod
@@ -1,144 +1,182 @@
|
||||
module github.com/kairos-io/kairos-challenger
|
||||
|
||||
go 1.20
|
||||
go 1.24.2
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/jaypipes/ghw v0.9.0
|
||||
github.com/kairos-io/kairos v1.24.3-56.0.20230208235509-4d28f3b87f60
|
||||
github.com/kairos-io/kcrypt v0.5.0
|
||||
github.com/kairos-io/tpm-helpers v0.0.0-20230119140150-3fa97128ef6b
|
||||
github.com/go-logr/logr v1.4.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hashicorp/mdns v1.0.6
|
||||
github.com/jaypipes/ghw v0.14.0
|
||||
github.com/kairos-io/kairos-sdk v0.7.3
|
||||
github.com/kairos-io/kcrypt v0.13.0
|
||||
github.com/kairos-io/tpm-helpers v0.0.0-20240123063624-f7a3fcc66199
|
||||
github.com/mudler/go-pluggable v0.0.0-20230126220627-7710299a0ae5
|
||||
github.com/mudler/go-processmanager v0.0.0-20220724164624-c45b5c61312d
|
||||
github.com/mudler/yip v1.0.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.8.1
|
||||
github.com/onsi/gomega v1.26.0
|
||||
github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82
|
||||
github.com/mudler/yip v1.15.0
|
||||
github.com/onsi/ginkgo/v2 v2.23.0
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spectrocloud/peg v0.0.0-20230214140930-4d6672f825b2
|
||||
github.com/spectrocloud/peg v0.0.0-20240405075800-c5da7125e30f
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
sigs.k8s.io/controller-runtime v0.12.2
|
||||
k8s.io/api v0.27.2
|
||||
k8s.io/apimachinery v0.27.2
|
||||
k8s.io/client-go v0.27.2
|
||||
sigs.k8s.io/controller-runtime v0.15.0
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.1.1 // indirect
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
cloud.google.com/go v0.93.3 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.1.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/avast/retry-go v3.0.0+incompatible // indirect
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bramvdbogaerde/go-scp v1.2.1 // indirect
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 // indirect
|
||||
github.com/codingsince1985/checksum v1.2.6 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.5 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/containerd v1.7.25 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/denisbrodbeck/machineid v1.0.1 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v27.5.0+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v27.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/folbricht/tpmk v0.1.2-0.20230104073416-f20b20c289d7 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/zapr v1.2.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.4 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gofrs/uuid v4.4.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/certificate-transparency-go v1.1.4 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-attestation v0.4.4-0.20220404204839-8820d49b18d9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-containerregistry v0.20.3 // indirect
|
||||
github.com/google/go-tpm v0.3.3 // indirect
|
||||
github.com/google/go-tpm-tools v0.3.10 // indirect
|
||||
github.com/google/go-tspi v0.3.0 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gookit/color v1.5.2 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||
github.com/itchyny/gojq v0.12.11 // indirect
|
||||
github.com/itchyny/timefmt-go v0.1.5 // indirect
|
||||
github.com/itchyny/gojq v0.12.17 // indirect
|
||||
github.com/itchyny/timefmt-go v0.1.6 // indirect
|
||||
github.com/joho/godotenv v1.5.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.5 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/miekg/dns v1.1.55 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect
|
||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/pterm/pterm v0.12.54 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_golang v1.20.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/pterm/pterm v0.12.80 // indirect
|
||||
github.com/qeesung/image2ascii v1.0.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/testify v1.8.1 // indirect
|
||||
github.com/twpayne/go-vfs v1.7.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rs/zerolog v1.33.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.24.7 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twpayne/go-vfs/v4 v4.3.0 // indirect
|
||||
github.com/vbatts/tar-split v0.11.6 // indirect
|
||||
github.com/wayneashleyberry/terminal-dimensions v1.1.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/net v0.6.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
golang.org/x/crypto v0.33.0 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/net v0.35.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.29.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250212204824-5a70512c5d8b // indirect
|
||||
google.golang.org/grpc v1.70.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
howett.net/plist v1.0.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
||||
k8s.io/component-base v0.24.2 // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.27.2 // indirect
|
||||
k8s.io/component-base v0.27.2 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
5
main.go
5
main.go
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
@@ -120,7 +121,9 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
go challenger.Start(context.Background(), clientset, reconciler, namespace, challengerAddr)
|
||||
serverLog := ctrl.Log.WithName("server")
|
||||
|
||||
go challenger.Start(context.Background(), serverLog, clientset, reconciler, namespace, challengerAddr)
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
|
103
mdns-notes.md
Normal file
103
mdns-notes.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Prerequisites
|
||||
|
||||
Nodes and KMS should be on the same local network (mdns requirement)
|
||||
|
||||
# Steps
|
||||
|
||||
- Create a cluster with a port bound to the host:
|
||||
|
||||
```
|
||||
k3d cluster create kcrypt -p '30000:30000@server:0'
|
||||
```
|
||||
|
||||
(we are going to assign this port to the kcrypt challenger server and advertise it over mdns)
|
||||
|
||||
- Follow [the instructions to setup the kcrypt challenger server](https://github.com/kairos-io/kcrypt-challenger#installation):
|
||||
|
||||
```
|
||||
helm repo add kairos https://kairos-io.github.io/helm-charts
|
||||
helm install kairos-crd kairos/kairos-crds
|
||||
```
|
||||
|
||||
Create the following 'kcrypt-challenger-values.yaml` file:
|
||||
|
||||
|
||||
```yaml
|
||||
service:
|
||||
challenger:
|
||||
type: "NodePort"
|
||||
port: 8082
|
||||
nodePort: 30000
|
||||
```
|
||||
|
||||
and deploy the challenger server with it:
|
||||
|
||||
```bash
|
||||
helm install -f kcrypt-challenger-values.yaml kairos-challenger kairos/kairos-challenger
|
||||
```
|
||||
|
||||
- Add the sealedvolume and secret for the tpm chip:
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: example-host-tpm-secret
|
||||
namespace: default
|
||||
type: Opaque
|
||||
stringData:
|
||||
pass: "awesome-passphrase"
|
||||
---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: example-host
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "5640e37f4016da16b841a93880dcc44886904392fa3c86681087b77db5afedbe"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
secret:
|
||||
name: example-host-tpm-secret
|
||||
path: pass
|
||||
quarantined: false
|
||||
```
|
||||
|
||||
- Start the [simple-mdns-server](https://github.com/kairos-io/simple-mdns-server)
|
||||
|
||||
```
|
||||
go run . --port 30000 --interfaceName enp121s0 --serviceType _kcrypt._tcp --hostName mychallenger.local
|
||||
```
|
||||
|
||||
|
||||
- Start a node in manual install mode
|
||||
|
||||
- Replace `/system/discovery/kcrypt-discovery-challenger` with a custom build (until we merge)
|
||||
|
||||
- Create the following config:
|
||||
|
||||
```
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
|
||||
# Kcrypt configuration block
|
||||
kcrypt:
|
||||
challenger:
|
||||
mdns: true
|
||||
challenger_server: "http://mychallenger.local"
|
||||
```
|
||||
|
||||
- Install:
|
||||
|
||||
```
|
||||
kairos-agent manual-install --device auto config.yaml
|
||||
```
|
@@ -4,12 +4,13 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/payload"
|
||||
@@ -97,8 +98,8 @@ func getPubHash(token string) (string, error) {
|
||||
return tpm.DecodePubHash(ek)
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, kclient *kubernetes.Clientset, reconciler *controllers.SealedVolumeReconciler, namespace, address string) {
|
||||
fmt.Println("Challenger started at", address)
|
||||
func Start(ctx context.Context, logger logr.Logger, kclient *kubernetes.Clientset, reconciler *controllers.SealedVolumeReconciler, namespace, address string) {
|
||||
logger.Info("Challenger started", "address", address)
|
||||
s := http.Server{
|
||||
Addr: address,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
@@ -107,189 +108,214 @@ func Start(ctx context.Context, kclient *kubernetes.Clientset, reconciler *contr
|
||||
|
||||
m := http.NewServeMux()
|
||||
|
||||
errorMessage := func(writer io.WriteCloser, errMsg string) {
|
||||
err := json.NewEncoder(writer).Encode(payload.Data{Error: errMsg})
|
||||
if err != nil {
|
||||
fmt.Println("error encoding the response to json", err.Error())
|
||||
}
|
||||
fmt.Println(errMsg)
|
||||
}
|
||||
|
||||
m.HandleFunc("/postPass", func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, _ := upgrader.Upgrade(w, r, nil) // error ignored for sake of simplicity
|
||||
for {
|
||||
|
||||
fmt.Println("Receiving passphrase")
|
||||
if err := tpm.AuthRequest(r, conn); err != nil {
|
||||
fmt.Println("error", err.Error())
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
fmt.Println("[Receiving passphrase] auth succeeded")
|
||||
|
||||
token := r.Header.Get("Authorization")
|
||||
|
||||
hashEncoded, err := getPubHash(token)
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
logger.Error(err, "upgrading connection")
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err := conn.Close()
|
||||
if err != nil {
|
||||
fmt.Println("error decoding pubhash", err.Error())
|
||||
return
|
||||
logger.Error(err, "closing the connection")
|
||||
}
|
||||
fmt.Println("[Receiving passphrase] pubhash", hashEncoded)
|
||||
}()
|
||||
|
||||
label := r.Header.Get("label")
|
||||
name := r.Header.Get("name")
|
||||
uuid := r.Header.Get("uuid")
|
||||
v := &payload.Data{}
|
||||
logger.Info("Receiving passphrase")
|
||||
if err := tpm.AuthRequest(r, conn); err != nil {
|
||||
errorMessage(conn, logger, err, "auth request")
|
||||
return
|
||||
}
|
||||
logger.Info("[Receiving passphrase] auth succeeded")
|
||||
|
||||
volumeList := &keyserverv1alpha1.SealedVolumeList{}
|
||||
token := r.Header.Get("Authorization")
|
||||
hashEncoded, err := getPubHash(token)
|
||||
if err != nil {
|
||||
errorMessage(conn, logger, err, "decoding pubhash")
|
||||
return
|
||||
}
|
||||
logger.Info("[Receiving passphrase] pubhash", "encodedhash", hashEncoded)
|
||||
|
||||
label := r.Header.Get("label")
|
||||
name := r.Header.Get("name")
|
||||
uuid := r.Header.Get("uuid")
|
||||
v := &payload.Data{}
|
||||
logger.Info("Reading request data", "label", label, "name", name, "uuid", uuid)
|
||||
|
||||
volumeList := &keyserverv1alpha1.SealedVolumeList{}
|
||||
for {
|
||||
if err := reconciler.List(ctx, volumeList, &client.ListOptions{Namespace: namespace}); err != nil {
|
||||
fmt.Println("Failed listing volumes")
|
||||
fmt.Println(err)
|
||||
logger.Error(err, "listing volumes")
|
||||
continue
|
||||
}
|
||||
|
||||
sealedVolumeData := findVolumeFor(PassphraseRequestData{
|
||||
TPMHash: hashEncoded,
|
||||
Label: label,
|
||||
DeviceName: name,
|
||||
UUID: uuid,
|
||||
}, volumeList)
|
||||
|
||||
if sealedVolumeData == nil {
|
||||
fmt.Println("No TPM Hash found for", hashEncoded)
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if err := conn.ReadJSON(v); err != nil {
|
||||
fmt.Println("error", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if v.HasPassphrase() && !v.HasError() {
|
||||
secretName, secretPath := sealedVolumeData.DefaultSecret()
|
||||
_, err := kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
fmt.Printf("Failed getting secret: %s\n", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
secret := corev1.Secret{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
StringData: map[string]string{
|
||||
secretPath: v.Passphrase,
|
||||
constants.GeneratedByKey: v.GeneratedBy,
|
||||
},
|
||||
Type: "Opaque",
|
||||
}
|
||||
_, err := kclient.CoreV1().Secrets(namespace).Create(ctx, &secret, v1.CreateOptions{})
|
||||
if err != nil {
|
||||
fmt.Println("failed during secret creation:", err.Error())
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Posted for already existing secret - ignoring")
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Invalid answer from client: doesn't contain any passphrase")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
logger.Info("Looking up volume with request data")
|
||||
sealedVolumeData := findVolumeFor(PassphraseRequestData{
|
||||
TPMHash: hashEncoded,
|
||||
Label: label,
|
||||
DeviceName: name,
|
||||
UUID: uuid,
|
||||
}, volumeList)
|
||||
|
||||
if sealedVolumeData == nil {
|
||||
errorMessage(conn, logger, fmt.Errorf("no TPM Hash found for %s", hashEncoded), "")
|
||||
return
|
||||
}
|
||||
logger.Info("[Looking up volume with request data] succeeded")
|
||||
|
||||
if err := conn.ReadJSON(v); err != nil {
|
||||
logger.Error(err, "reading json from connection")
|
||||
return
|
||||
}
|
||||
|
||||
if !v.HasPassphrase() {
|
||||
errorMessage(conn, logger, fmt.Errorf("invalid answer from client: doesn't contain any passphrase"), "")
|
||||
}
|
||||
if v.HasError() {
|
||||
errorMessage(conn, logger, fmt.Errorf("error: %s", v.Error), v.Error)
|
||||
}
|
||||
|
||||
secretName, secretPath := sealedVolumeData.DefaultSecret()
|
||||
logger.Info("Looking up secret in with name", "name", secretName, "namespace", namespace)
|
||||
_, err = kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
|
||||
if err == nil {
|
||||
logger.Info("Posted for already existing secret - ignoring")
|
||||
return
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
errorMessage(conn, logger, err, "failed getting secret")
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("secret not found, creating one")
|
||||
secret := corev1.Secret{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
StringData: map[string]string{
|
||||
secretPath: v.Passphrase,
|
||||
constants.GeneratedByKey: v.GeneratedBy,
|
||||
},
|
||||
Type: "Opaque",
|
||||
}
|
||||
_, err = kclient.CoreV1().Secrets(namespace).Create(ctx, &secret, v1.CreateOptions{})
|
||||
if err != nil {
|
||||
errorMessage(conn, logger, err, "failed during secret creation")
|
||||
}
|
||||
logger.Info("created new secret")
|
||||
})
|
||||
|
||||
m.HandleFunc("/getPass", func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, _ := upgrader.Upgrade(w, r, nil) // error ignored for sake of simplicity
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
logger.Error(err, "upgrading connection")
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err := conn.Close()
|
||||
if err != nil {
|
||||
logger.Error(err, "closing the connection")
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Received connection")
|
||||
volumeList := &keyserverv1alpha1.SealedVolumeList{}
|
||||
for {
|
||||
fmt.Println("Received connection")
|
||||
volumeList := &keyserverv1alpha1.SealedVolumeList{}
|
||||
if err := reconciler.List(ctx, volumeList, &client.ListOptions{Namespace: namespace}); err != nil {
|
||||
fmt.Println("Failed listing volumes")
|
||||
fmt.Println(err)
|
||||
logger.Error(err, "listing volumes")
|
||||
continue
|
||||
}
|
||||
|
||||
token := r.Header.Get("Authorization")
|
||||
label := r.Header.Get("label")
|
||||
name := r.Header.Get("name")
|
||||
uuid := r.Header.Get("uuid")
|
||||
|
||||
if err := tpm.AuthRequest(r, conn); err != nil {
|
||||
fmt.Println("error validating challenge", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
hashEncoded, err := getPubHash(token)
|
||||
if err != nil {
|
||||
fmt.Println("error decoding pubhash", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
sealedVolumeData := findVolumeFor(PassphraseRequestData{
|
||||
TPMHash: hashEncoded,
|
||||
Label: label,
|
||||
DeviceName: name,
|
||||
UUID: uuid,
|
||||
}, volumeList)
|
||||
|
||||
if sealedVolumeData == nil {
|
||||
writer, _ := conn.NextWriter(websocket.BinaryMessage)
|
||||
errorMessage(writer, fmt.Sprintf("Invalid hash: %s", hashEncoded))
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
writer, _ := conn.NextWriter(websocket.BinaryMessage)
|
||||
if !sealedVolumeData.Quarantined {
|
||||
secretName, secretPath := sealedVolumeData.DefaultSecret()
|
||||
|
||||
// 1. The admin sets a specific cleartext password from Kube manager
|
||||
// SealedVolume -> with a secret .
|
||||
// 2. The admin just adds a SealedVolume associated with a TPM Hash ( you don't provide any passphrase )
|
||||
// 3. There is no challenger server at all (offline mode)
|
||||
//
|
||||
secret, err := kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
|
||||
if err == nil {
|
||||
passphrase := secret.Data[secretPath]
|
||||
generatedBy := secret.Data[constants.GeneratedByKey]
|
||||
|
||||
p := payload.Data{Passphrase: string(passphrase), GeneratedBy: string(generatedBy)}
|
||||
err = json.NewEncoder(writer).Encode(p)
|
||||
if err != nil {
|
||||
fmt.Println("error encoding the passphrase to json", err.Error(), string(passphrase))
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
fmt.Println("error closing the writer", err.Error())
|
||||
return
|
||||
}
|
||||
if err = conn.Close(); err != nil {
|
||||
fmt.Println("error closing the connection", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
} else {
|
||||
errorMessage(writer, fmt.Sprintf("No secret found for %s and %s", hashEncoded, sealedVolumeData.PartitionLabel))
|
||||
}
|
||||
} else {
|
||||
errorMessage(writer, fmt.Sprintf("quarantined: %s", sealedVolumeData.PartitionLabel))
|
||||
if err = conn.Close(); err != nil {
|
||||
fmt.Println("error closing the connection", err.Error())
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
s.Handler = m
|
||||
logger.Info("reading data from request")
|
||||
token := r.Header.Get("Authorization")
|
||||
label := r.Header.Get("label")
|
||||
name := r.Header.Get("name")
|
||||
uuid := r.Header.Get("uuid")
|
||||
|
||||
tokenStr := "empty"
|
||||
if token != "" {
|
||||
tokenStr = "not empty"
|
||||
}
|
||||
logger.Info("request data", "token", tokenStr, "label", label, "name", name, "uuid", uuid)
|
||||
|
||||
if err := tpm.AuthRequest(r, conn); err != nil {
|
||||
logger.Error(err, "error validating challenge")
|
||||
return
|
||||
}
|
||||
|
||||
hashEncoded, err := getPubHash(token)
|
||||
if err != nil {
|
||||
logger.Error(err, "error decoding pubhash")
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("Looking up volume with request data")
|
||||
sealedVolumeData := findVolumeFor(PassphraseRequestData{
|
||||
TPMHash: hashEncoded,
|
||||
Label: label,
|
||||
DeviceName: name,
|
||||
UUID: uuid,
|
||||
}, volumeList)
|
||||
|
||||
if sealedVolumeData == nil {
|
||||
errorMessage(conn, logger, fmt.Errorf("no volume found with data from request and hash: %s", hashEncoded), "")
|
||||
return
|
||||
}
|
||||
logger.Info("[Looking up volume with request data] succeeded")
|
||||
|
||||
if sealedVolumeData.Quarantined {
|
||||
errorMessage(conn, logger, fmt.Errorf("quarantined: %s", sealedVolumeData.PartitionLabel), "")
|
||||
return
|
||||
}
|
||||
|
||||
secretName, secretPath := sealedVolumeData.DefaultSecret()
|
||||
|
||||
// 1. The admin sets a specific cleartext password from Kube manager
|
||||
// SealedVolume -> with a secret .
|
||||
// 2. The admin just adds a SealedVolume associated with a TPM Hash ( you don't provide any passphrase )
|
||||
// 3. There is no challenger server at all (offline mode)
|
||||
//
|
||||
logger.Info(fmt.Sprintf("looking up secret %s in namespace %s", secretName, namespace))
|
||||
secret, err := kclient.CoreV1().Secrets(namespace).Get(ctx, secretName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
errorMessage(conn, logger, fmt.Errorf("No secret found for %s and %s", hashEncoded, sealedVolumeData.PartitionLabel), "")
|
||||
} else {
|
||||
errorMessage(conn, logger, err, "getting the secret from Kubernetes")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
logger.Info(fmt.Sprintf("secret %s found in namespace %s", secretName, namespace))
|
||||
|
||||
passphrase := secret.Data[secretPath]
|
||||
generatedBy := secret.Data[constants.GeneratedByKey]
|
||||
|
||||
writer, err := conn.NextWriter(websocket.BinaryMessage)
|
||||
if err != nil {
|
||||
logger.Error(err, "getting a writer from the connection")
|
||||
}
|
||||
p := payload.Data{Passphrase: string(passphrase), GeneratedBy: string(generatedBy)}
|
||||
err = json.NewEncoder(writer).Encode(p)
|
||||
if err != nil {
|
||||
logger.Error(err, "writing passphrase to the websocket channel")
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
logger.Error(err, "closing the writer")
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
s.Handler = logRequestHandler(logger, m)
|
||||
|
||||
go func() {
|
||||
err := s.ListenAndServe()
|
||||
@@ -334,3 +360,36 @@ func findVolumeFor(requestData PassphraseRequestData, volumeList *keyserverv1alp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorMessage should be used when an error should be both, printed to the stdout
|
||||
// and sent over the wire to the websocket client.
|
||||
func errorMessage(conn *websocket.Conn, logger logr.Logger, theErr error, description string) {
|
||||
if theErr == nil {
|
||||
return
|
||||
}
|
||||
logger.Error(theErr, description)
|
||||
|
||||
writer, err := conn.NextWriter(websocket.BinaryMessage)
|
||||
if err != nil {
|
||||
logger.Error(err, "getting a writer from the connection")
|
||||
}
|
||||
|
||||
errMsg := theErr.Error()
|
||||
err = json.NewEncoder(writer).Encode(payload.Data{Error: errMsg})
|
||||
if err != nil {
|
||||
logger.Error(err, "error encoding the response to json")
|
||||
}
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
logger.Error(err, "closing the writer")
|
||||
}
|
||||
}
|
||||
|
||||
func logRequestHandler(logger logr.Logger, h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
logger.Info("Incoming request", "method", r.Method, "uri", r.URL.String(),
|
||||
"referer", r.Header.Get("Referer"), "userAgent", r.Header.Get("User-Agent"))
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
44
renovate.json
Normal file
44
renovate.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:base"
|
||||
],
|
||||
"schedule": [
|
||||
"after 11pm every weekday",
|
||||
"before 7am every weekday",
|
||||
"every weekend"
|
||||
],
|
||||
"timezone": "Europe/Brussels",
|
||||
"rebaseWhen": "behind-base-branch",
|
||||
"reviewers": [ "team:maintainers" ],
|
||||
"packageRules": [
|
||||
{
|
||||
"matchUpdateTypes": [
|
||||
"patch"
|
||||
],
|
||||
"automerge": true
|
||||
}
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
"fileMatch": [
|
||||
"^Earthfile$"
|
||||
],
|
||||
"matchStrings": [
|
||||
"#\\s*renovate:\\s*datasource=(?<datasource>.*?) depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?\\sARG\\s+.+_VERSION=(?<currentValue>.*?)\\s"
|
||||
],
|
||||
"versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}"
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"^earthly\\.(sh|ps1)$"
|
||||
],
|
||||
"datasourceTemplate": "docker",
|
||||
"depNameTemplate": "earthly/earthly",
|
||||
"matchStrings": [
|
||||
"earthly\\/earthly:(?<currentValue>.*?)\\s"
|
||||
],
|
||||
"versioningTemplate": "semver-coerced"
|
||||
}
|
||||
]
|
||||
}
|
@@ -34,10 +34,8 @@ trap cleanup EXIT
|
||||
k3d cluster create "$CLUSTER_NAME" --k3s-arg "--cluster-cidr=10.49.0.1/16@server:0" --k3s-arg "--service-cidr=10.48.0.1/16@server:0" -p '80:80@server:0' -p '443:443@server:0' --image "$K3S_IMAGE"
|
||||
k3d kubeconfig get "$CLUSTER_NAME" > "$KUBECONFIG"
|
||||
|
||||
# Build the docker image
|
||||
IMG=controller:latest make docker-build
|
||||
|
||||
# Import the image to the cluster
|
||||
# Import the controller image that we built at the start into to the cluster
|
||||
# this image has to exists and be available in the local docker
|
||||
k3d image import -c "$CLUSTER_NAME" controller:latest
|
||||
|
||||
# Install cert manager
|
||||
@@ -59,4 +57,4 @@ kubectl apply -k "$SCRIPT_DIR/../tests/assets/"
|
||||
# https://stackoverflow.com/a/6752280
|
||||
export KMS_ADDRESS="10.0.2.2.challenger.sslip.io"
|
||||
|
||||
PATH=$PATH:$GOPATH/bin ginkgo -v --nodes $GINKGO_NODES --label-filter $LABEL --fail-fast -r ./tests/
|
||||
go run github.com/onsi/ginkgo/v2/ginkgo -v --nodes $GINKGO_NODES --label-filter $LABEL --fail-fast -r ./tests/
|
||||
|
@@ -11,6 +11,7 @@ spec:
|
||||
- hosts:
|
||||
- 10.0.2.2.challenger.sslip.io
|
||||
- ${CLUSTER_IP}.challenger.sslip.io
|
||||
- discoverable-kms.local
|
||||
secretName: kms-tls
|
||||
rules:
|
||||
- host: 10.0.2.2.challenger.sslip.io
|
||||
@@ -33,3 +34,13 @@ spec:
|
||||
name: kcrypt-controller-kcrypt-escrow-server
|
||||
port:
|
||||
number: 8082
|
||||
- host: discoverable-kms.local
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: "/"
|
||||
backend:
|
||||
service:
|
||||
name: kcrypt-controller-kcrypt-escrow-server
|
||||
port:
|
||||
number: 8082
|
||||
|
@@ -19,13 +19,19 @@ import (
|
||||
|
||||
var installationOutput string
|
||||
var vm VM
|
||||
var mdnsVM VM
|
||||
|
||||
var _ = Describe("kcrypt encryption", func() {
|
||||
var config string
|
||||
var vmOpts VMOptions
|
||||
var expectedInstallationSuccess bool
|
||||
|
||||
BeforeEach(func() {
|
||||
expectedInstallationSuccess = true
|
||||
|
||||
vmOpts = DefaultVMOptions()
|
||||
RegisterFailHandler(printInstallationOutput)
|
||||
_, vm = startVM()
|
||||
_, vm = startVM(vmOpts)
|
||||
fmt.Printf("\nvm.StateDir = %+v\n", vm.StateDir)
|
||||
|
||||
vm.EventuallyConnects(1200)
|
||||
@@ -43,10 +49,13 @@ var _ = Describe("kcrypt encryption", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
installationOutput, err = vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
Expect(err).ToNot(HaveOccurred(), installationOutput)
|
||||
if expectedInstallationSuccess {
|
||||
Expect(err).ToNot(HaveOccurred(), installationOutput)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
vm.GatherLog("/run/immucore/immucore.log")
|
||||
err := vm.Destroy(func(vm VM) {
|
||||
// Stop TPM emulator
|
||||
tpmPID, err := os.ReadFile(path.Join(vm.StateDir, "tpm", "pid"))
|
||||
@@ -62,6 +71,63 @@ var _ = Describe("kcrypt encryption", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
When("discovering KMS with mdns", Label("discoverable-kms"), func() {
|
||||
var tpmHash string
|
||||
var mdnsHostname string
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the secret in kubernetes")
|
||||
tpmHash = createTPMPassphraseSecret(vm)
|
||||
|
||||
mdnsHostname = "discoverable-kms.local"
|
||||
|
||||
By("deploying simple-mdns-server vm")
|
||||
mdnsVM = deploySimpleMDNSServer(mdnsHostname)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false # we will reboot manually
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
mdns: true
|
||||
challenger_server: "http://%[1]s"
|
||||
`, mdnsHostname)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", tpmHash)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
|
||||
err = mdnsVM.Destroy(func(vm VM) {})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("discovers the KMS using mdns", func() {
|
||||
Skip("TODO: make this test work")
|
||||
|
||||
By("rebooting")
|
||||
vm.Reboot()
|
||||
By("checking that we can connect after installation")
|
||||
vm.EventuallyConnects(1200)
|
||||
By("checking if we got an encrypted partition")
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
})
|
||||
})
|
||||
|
||||
// https://kairos.io/docs/advanced/partition_encryption/#offline-mode
|
||||
When("doing local encryption", Label("local-encryption"), func() {
|
||||
BeforeEach(func() {
|
||||
@@ -91,25 +157,9 @@ users:
|
||||
//https://kairos.io/docs/advanced/partition_encryption/#online-mode
|
||||
When("using a remote key management server (automated passphrase generation)", Label("remote-auto"), func() {
|
||||
var tpmHash string
|
||||
var err error
|
||||
|
||||
BeforeEach(func() {
|
||||
tpmHash, err = vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
|
||||
Expect(err).ToNot(HaveOccurred(), tpmHash)
|
||||
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%[1]s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%[1]s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false
|
||||
`, strings.TrimSpace(tpmHash)))
|
||||
|
||||
tpmHash = createTPMPassphraseSecret(vm)
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
@@ -212,10 +262,6 @@ install:
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
nv_index: ""
|
||||
c_index: ""
|
||||
tpm_device: ""
|
||||
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
})
|
||||
|
||||
@@ -242,24 +288,15 @@ kcrypt:
|
||||
|
||||
When("the key management server is listening on https", func() {
|
||||
var tpmHash string
|
||||
var err error
|
||||
|
||||
BeforeEach(func() {
|
||||
tpmHash, err = vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
|
||||
Expect(err).ToNot(HaveOccurred(), tpmHash)
|
||||
tpmHash = createTPMPassphraseSecret(vm)
|
||||
})
|
||||
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%[1]s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%[1]s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false
|
||||
`, strings.TrimSpace(tpmHash)))
|
||||
AfterEach(func() {
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", tpmHash)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
})
|
||||
|
||||
When("the certificate is pinned on the configuration", Label("remote-https-pinned"), func() {
|
||||
@@ -299,6 +336,8 @@ install:
|
||||
|
||||
When("the no certificate is set in the configuration", Label("remote-https-bad-cert"), func() {
|
||||
BeforeEach(func() {
|
||||
expectedInstallationSuccess = false
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
@@ -316,16 +355,13 @@ install:
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "https://%s"
|
||||
nv_index: ""
|
||||
c_index: ""
|
||||
tpm_device: ""
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
})
|
||||
|
||||
It("fails to talk to the server", func() {
|
||||
out, err := vm.Sudo("cat manual-install.txt")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("could not encrypt partition.*x509: certificate signed by unknown authority"))
|
||||
Expect(out).To(MatchRegexp("failed to verify certificate: x509: certificate signed by unknown authority"))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -362,29 +398,57 @@ func getChallengerServerCert() string {
|
||||
}
|
||||
|
||||
func createConfigWithCert(server, cert string) client.Config {
|
||||
return client.Config{
|
||||
Kcrypt: struct {
|
||||
Challenger struct {
|
||||
Server string "yaml:\"challenger_server,omitempty\""
|
||||
NVIndex string "yaml:\"nv_index,omitempty\""
|
||||
CIndex string "yaml:\"c_index,omitempty\""
|
||||
TPMDevice string "yaml:\"tpm_device,omitempty\""
|
||||
Certificate string "yaml:\"certificate,omitempty\""
|
||||
}
|
||||
}{
|
||||
Challenger: struct {
|
||||
Server string "yaml:\"challenger_server,omitempty\""
|
||||
NVIndex string "yaml:\"nv_index,omitempty\""
|
||||
CIndex string "yaml:\"c_index,omitempty\""
|
||||
TPMDevice string "yaml:\"tpm_device,omitempty\""
|
||||
Certificate string "yaml:\"certificate,omitempty\""
|
||||
}{
|
||||
Server: server,
|
||||
NVIndex: "",
|
||||
CIndex: "",
|
||||
TPMDevice: "",
|
||||
Certificate: cert,
|
||||
},
|
||||
},
|
||||
}
|
||||
c := client.Config{}
|
||||
c.Kcrypt.Challenger.Server = server
|
||||
c.Kcrypt.Challenger.Certificate = cert
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func createTPMPassphraseSecret(vm VM) string {
|
||||
tpmHash, err := vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
|
||||
Expect(err).ToNot(HaveOccurred(), tpmHash)
|
||||
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%[1]s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%[1]s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false
|
||||
`, strings.TrimSpace(tpmHash)))
|
||||
|
||||
return tpmHash
|
||||
}
|
||||
|
||||
// We run the simple-mdns-server (https://github.com/kairos-io/simple-mdns-server/)
|
||||
// inside a VM next to the one we test. The server advertises the KMS as running on 10.0.2.2
|
||||
// (the host machine). This is a "hack" and is needed because of how the default
|
||||
// networking in qemu works. We need to be within the same network and that
|
||||
// network is only available withing another VM.
|
||||
// https://wiki.qemu.org/Documentation/Networking
|
||||
func deploySimpleMDNSServer(hostname string) VM {
|
||||
opts := DefaultVMOptions()
|
||||
opts.Memory = "2000"
|
||||
opts.CPUS = "1"
|
||||
opts.EmulateTPM = false
|
||||
_, vm := startVM(opts)
|
||||
vm.EventuallyConnects(1200)
|
||||
|
||||
out, err := vm.Sudo(`curl -s https://api.github.com/repos/kairos-io/simple-mdns-server/releases/latest | jq -r .assets[].browser_download_url | grep $(uname -m) | xargs curl -L -o sms.tar.gz`)
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
|
||||
out, err = vm.Sudo("tar xvf sms.tar.gz")
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
|
||||
// Start the simple-mdns-server in the background
|
||||
out, err = vm.Sudo(fmt.Sprintf(
|
||||
"/bin/bash -c './simple-mdns-server --port 80 --address 10.0.2.2 --serviceType _kcrypt._tcp --hostName %s &'", hostname))
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
|
||||
return vm
|
||||
}
|
||||
|
@@ -25,6 +25,53 @@ func TestE2e(t *testing.T) {
|
||||
RunSpecs(t, "kcrypt-challenger e2e test Suite")
|
||||
}
|
||||
|
||||
type VMOptions struct {
|
||||
ISO string
|
||||
User string
|
||||
Password string
|
||||
Memory string
|
||||
CPUS string
|
||||
RunSpicy bool
|
||||
UseKVM bool
|
||||
EmulateTPM bool
|
||||
}
|
||||
|
||||
func DefaultVMOptions() VMOptions {
|
||||
var err error
|
||||
|
||||
memory := os.Getenv("MEMORY")
|
||||
if memory == "" {
|
||||
memory = "2096"
|
||||
}
|
||||
cpus := os.Getenv("CPUS")
|
||||
if cpus == "" {
|
||||
cpus = "2"
|
||||
}
|
||||
|
||||
runSpicy := false
|
||||
if s := os.Getenv("MACHINE_SPICY"); s != "" {
|
||||
runSpicy, err = strconv.ParseBool(os.Getenv("MACHINE_SPICY"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
useKVM := false
|
||||
if envKVM := os.Getenv("KVM"); envKVM != "" {
|
||||
useKVM, err = strconv.ParseBool(os.Getenv("KVM"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
return VMOptions{
|
||||
ISO: os.Getenv("ISO"),
|
||||
User: user(),
|
||||
Password: pass(),
|
||||
Memory: memory,
|
||||
CPUS: cpus,
|
||||
RunSpicy: runSpicy,
|
||||
UseKVM: useKVM,
|
||||
EmulateTPM: true,
|
||||
}
|
||||
}
|
||||
|
||||
func user() string {
|
||||
user := os.Getenv("SSH_USER")
|
||||
if user == "" {
|
||||
@@ -42,8 +89,8 @@ func pass() string {
|
||||
return pass
|
||||
}
|
||||
|
||||
func startVM() (context.Context, VM) {
|
||||
if os.Getenv("ISO") == "" {
|
||||
func startVM(vmOpts VMOptions) (context.Context, VM) {
|
||||
if vmOpts.ISO == "" {
|
||||
fmt.Println("ISO missing")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -53,29 +100,22 @@ func startVM() (context.Context, VM) {
|
||||
stateDir, err := os.MkdirTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
emulateTPM(stateDir)
|
||||
if vmOpts.EmulateTPM {
|
||||
emulateTPM(stateDir)
|
||||
}
|
||||
|
||||
sshPort, err := getFreePort()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
memory := os.Getenv("MEMORY")
|
||||
if memory == "" {
|
||||
memory = "2096"
|
||||
}
|
||||
cpus := os.Getenv("CPUS")
|
||||
if cpus == "" {
|
||||
cpus = "2"
|
||||
}
|
||||
|
||||
opts := []types.MachineOption{
|
||||
types.QEMUEngine,
|
||||
types.WithISO(os.Getenv("ISO")),
|
||||
types.WithMemory(memory),
|
||||
types.WithCPU(cpus),
|
||||
types.WithISO(vmOpts.ISO),
|
||||
types.WithMemory(vmOpts.Memory),
|
||||
types.WithCPU(vmOpts.CPUS),
|
||||
types.WithSSHPort(strconv.Itoa(sshPort)),
|
||||
types.WithID(vmName),
|
||||
types.WithSSHUser(user()),
|
||||
types.WithSSHPass(pass()),
|
||||
types.WithSSHUser(vmOpts.User),
|
||||
types.WithSSHPass(vmOpts.Password),
|
||||
types.OnFailure(func(p *process.Process) {
|
||||
defer GinkgoRecover()
|
||||
|
||||
@@ -109,9 +149,12 @@ func startVM() (context.Context, VM) {
|
||||
types.WithStateDir(stateDir),
|
||||
// Serial output to file: https://superuser.com/a/1412150
|
||||
func(m *types.MachineConfig) error {
|
||||
if vmOpts.EmulateTPM {
|
||||
m.Args = append(m.Args,
|
||||
"-chardev", fmt.Sprintf("socket,id=chrtpm,path=%s/swtpm-sock", path.Join(stateDir, "tpm")),
|
||||
"-tpmdev", "emulator,id=tpm0,chardev=chrtpm", "-device", "tpm-tis,tpmdev=tpm0")
|
||||
}
|
||||
m.Args = append(m.Args,
|
||||
"-chardev", fmt.Sprintf("socket,id=chrtpm,path=%s/swtpm-sock", path.Join(stateDir, "tpm")),
|
||||
"-tpmdev", "emulator,id=tpm0,chardev=chrtpm", "-device", "tpm-tis,tpmdev=tpm0",
|
||||
"-chardev", fmt.Sprintf("stdio,mux=on,id=char0,logfile=%s,signal=off", path.Join(stateDir, "serial.log")),
|
||||
"-serial", "chardev:char0",
|
||||
"-mon", "chardev=char0",
|
||||
@@ -123,14 +166,14 @@ func startVM() (context.Context, VM) {
|
||||
// Set this to true to debug.
|
||||
// You can connect to it with "spicy" or other tool.
|
||||
var spicePort int
|
||||
if os.Getenv("MACHINE_SPICY") != "" {
|
||||
if vmOpts.RunSpicy {
|
||||
spicePort, err = getFreePort()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
fmt.Printf("Spice port = %d\n", spicePort)
|
||||
opts = append(opts, types.WithDisplay(fmt.Sprintf("-spice port=%d,addr=127.0.0.1,disable-ticketing", spicePort)))
|
||||
}
|
||||
|
||||
if os.Getenv("KVM") != "" {
|
||||
if vmOpts.UseKVM {
|
||||
opts = append(opts, func(m *types.MachineConfig) error {
|
||||
m.Args = append(m.Args,
|
||||
"-enable-kvm",
|
||||
@@ -147,7 +190,7 @@ func startVM() (context.Context, VM) {
|
||||
ctx, err := vm.Start(context.Background())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if os.Getenv("MACHINE_SPICY") != "" {
|
||||
if vmOpts.RunSpicy {
|
||||
cmd := exec.Command("spicy",
|
||||
"-h", "127.0.0.1",
|
||||
"-p", strconv.Itoa(spicePort))
|
||||
|
Reference in New Issue
Block a user