mirror of
https://github.com/kairos-io/kcrypt-challenger.git
synced 2025-09-26 13:04:30 +00:00
Compare commits
229 Commits
v0.2.2
...
2988-remot
Author | SHA1 | Date | |
---|---|---|---|
|
012bfa2fae | ||
|
ecfd56d260 | ||
|
b5a129e21c | ||
|
4553776716 | ||
|
a3e93db05e | ||
|
4c036c5912 | ||
|
448a8b9684 | ||
|
ee6ed01b50 | ||
|
f0cadbbe6e | ||
|
62fb8f6cce | ||
|
329fa9212c | ||
|
b123339d19 | ||
|
2439d24e70 | ||
|
fac5dfb32d | ||
|
5fb15c81f6 | ||
|
caedb1ef7f | ||
|
55a0d62231 | ||
|
592426ae43 | ||
|
118189e672 | ||
|
5f2d857097 | ||
|
6ce6db1d84 | ||
|
89b07027cb | ||
|
bd19b91a1b | ||
|
9eeb285826 | ||
|
dc853ab2a4 | ||
|
8383f4b1b0 | ||
|
eba04e1479 | ||
|
db5793d0d1 | ||
|
8ce8651bca | ||
|
b674f911da | ||
|
2ef72d3c0a | ||
|
f943b01c90 | ||
|
80cd276ff3 | ||
|
db720d392a | ||
|
af5f9b34e6 | ||
|
69bd83e5ba | ||
|
932a59b960 | ||
|
eea31d697d | ||
|
ccd79623ad | ||
|
a4613048c6 | ||
|
ea1f84ed49 | ||
|
afb9d5e70d | ||
|
432c71e4c2 | ||
|
79dae2a87d | ||
|
1f596e0abf | ||
|
53b26c2635 | ||
|
2683ad797b | ||
|
74e82836a5 | ||
|
24d88295d8 | ||
|
17efbabdfc | ||
|
05157abbd4 | ||
|
f1360e172b | ||
|
ac41a4fdcb | ||
|
17cc494985 | ||
|
ddd65746f0 | ||
|
5787d7fa47 | ||
|
2ee88ce704 | ||
|
7d2e6cbed7 | ||
|
fd47042069 | ||
|
c77d4d94f2 | ||
|
d4cbf44973 | ||
|
99f69e38e5 | ||
|
e032586eb4 | ||
|
2aba011ada | ||
|
c1a92786b2 | ||
|
a8e35a94f5 | ||
|
5089f4cc71 | ||
|
a925d877bc | ||
|
a21fb32bc0 | ||
|
f8ef34077d | ||
|
4c2891e33b | ||
|
3bf6a1e612 | ||
|
3e519be076 | ||
|
f8045707ff | ||
|
8194344115 | ||
|
c92402b6c9 | ||
|
8f9d463bc0 | ||
|
33ba761d42 | ||
|
65108068e8 | ||
|
8314c64169 | ||
|
52dfdf3420 | ||
|
09a6ec31ec | ||
|
a33d7872c5 | ||
|
2f0d6d778a | ||
|
2d15026331 | ||
|
0fa24f7679 | ||
|
865c2fc795 | ||
|
42fca7593a | ||
|
4e87807d1f | ||
|
e984eed1c1 | ||
|
4e33127982 | ||
|
6a180b7cde | ||
|
6e2211e4d6 | ||
|
97dcf030cb | ||
|
93596bd189 | ||
|
012329e54b | ||
|
57e911e62a | ||
|
401e3f9735 | ||
|
91edb4eb57 | ||
|
9bdc42fbba | ||
|
d6b79752a3 | ||
|
63795470b1 | ||
|
09e155828c | ||
|
f3ade81dd3 | ||
|
07ce451b60 | ||
|
978d0aa3be | ||
|
615d2013b7 | ||
|
6b8245dc61 | ||
|
df29a61b8b | ||
|
23e4a1dd55 | ||
|
42709484ac | ||
|
97f92cc809 | ||
|
09a93ff001 | ||
|
02b5389fc6 | ||
|
f970ef1899 | ||
|
429b86ea09 | ||
|
5bfbac6892 | ||
|
d9e658b202 | ||
|
204ce64465 | ||
|
17d1414b14 | ||
|
b6c5d331fb | ||
|
a2b28af7b2 | ||
|
dd187adf3a | ||
|
196bcf8500 | ||
|
50441f8e4c | ||
|
a5e73df6e6 | ||
|
904ce9a1b8 | ||
|
2039b57421 | ||
|
c4dcabcabb | ||
|
f757f852dd | ||
|
ef14cef5c4 | ||
|
7205723259 | ||
|
202668005f | ||
|
b572776381 | ||
|
f189719055 | ||
|
5bbc4fd0fb | ||
|
2f2f577db7 | ||
|
d64cab6a7d | ||
|
050d1832dd | ||
|
06552b7777 | ||
|
b8ff5f31dc | ||
|
2f582b3a83 | ||
|
521363de93 | ||
|
7a805f374b | ||
|
06d3d6b1c1 | ||
|
6a337e5812 | ||
|
a4c5c84719 | ||
|
a410398adb | ||
|
a59b3019ed | ||
|
e0138fe609 | ||
|
fe5d338ed5 | ||
|
d708fcfa26 | ||
|
2e63d50125 | ||
|
d4e8b2adc2 | ||
|
10dcecdc85 | ||
|
3c4663afa5 | ||
|
95a352f4b4 | ||
|
fbfd7c9f07 | ||
|
7d84c01663 | ||
|
311b8adda0 | ||
|
bf59ecd475 | ||
|
71e90b94aa | ||
|
3d2d2de9dc | ||
|
c42e66a9de | ||
|
da93e626c5 | ||
|
ecbbe1499e | ||
|
09981d750e | ||
|
b5b4d0d042 | ||
|
8420155746 | ||
|
a9359bf713 | ||
|
b31467e925 | ||
|
c5dc8db56b | ||
|
a80703a556 | ||
|
0b6f771d32 | ||
|
72dd7d3e50 | ||
|
0619047a20 | ||
|
715664969a | ||
|
bcda5b5b38 | ||
|
b2a0330dd8 | ||
|
0b68d90081 | ||
|
40267d4c24 | ||
|
e0ae7a12a4 | ||
|
c2c50877da | ||
|
7a2627fcc8 | ||
|
a0c4462f99 | ||
|
145dd400b1 | ||
|
95dbb0d0be | ||
|
0e56e52cbf | ||
|
3a22134226 | ||
|
7b561efed8 | ||
|
6ff6262459 | ||
|
816013d33d | ||
|
8d0fb0148d | ||
|
ffd5f18bcf | ||
|
3b89def5b4 | ||
|
887d67907b | ||
|
b0a7aa5fdf | ||
|
40875bbae1 | ||
|
7166b14c7e | ||
|
9eb5d9b086 | ||
|
4da6a4f3b0 | ||
|
74fc9c62b4 | ||
|
f3f10b4919 | ||
|
3d4829859b | ||
|
8a17ff714c | ||
|
27114b8db8 | ||
|
1e3efb57cc | ||
|
0c236b6145 | ||
|
d390f77688 | ||
|
266c4f20e9 | ||
|
4c0b40d3a0 | ||
|
08bb62f94e | ||
|
0d3406fa7b | ||
|
1cd4d9a7af | ||
|
d875e54171 | ||
|
2967fb0a6c | ||
|
e9433d2ba7 | ||
|
7abdc7b092 | ||
|
9448ecdd54 | ||
|
d8cd48b411 | ||
|
43d629c974 | ||
|
b00d3af43b | ||
|
7d83e07b05 | ||
|
2fe3f3bc00 | ||
|
791d9dbb8b | ||
|
7dc1e39ac7 | ||
|
076a50b2e9 | ||
|
f8e7a0df87 | ||
|
968ff53267 |
1
.earthlyignore
Normal file
1
.earthlyignore
Normal file
@@ -0,0 +1 @@
|
||||
bin/
|
12
.github/ISSUE_TEMPLATE/file-issues-on-main-kairos-repo.md
vendored
Normal file
12
.github/ISSUE_TEMPLATE/file-issues-on-main-kairos-repo.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
name: File issues on main Kairos repo
|
||||
about: Tell users to file their issues on the main Kairos repo
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
:warning: All Kairos issues are tracked in our main repo, please file your issue there, thanks! :warning:
|
||||
|
||||
https://github.com/kairos-io/kairos/issues
|
42
.github/workflows/dependabot_auto.yml
vendored
Normal file
42
.github/workflows/dependabot_auto.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: Dependabot auto-merge
|
||||
on:
|
||||
- pull_request_target
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
packages: read
|
||||
|
||||
jobs:
|
||||
dependabot:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v2.4.0
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
skip-commit-verification: true
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Approve a PR if not already approved
|
||||
run: |
|
||||
gh pr checkout "$PR_URL"
|
||||
if [ "$(gh pr status --json reviewDecision -q .currentBranch.reviewDecision)" != "APPROVED" ];
|
||||
then
|
||||
gh pr review --approve "$PR_URL"
|
||||
else
|
||||
echo "PR already approved.";
|
||||
fi
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Enable auto-merge for Dependabot PRs
|
||||
run: gh pr merge --auto --squash "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
113
.github/workflows/e2e-tests.yml
vendored
Normal file
113
.github/workflows/e2e-tests.yml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: End to end tests
|
||||
on:
|
||||
push:
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
|
||||
concurrency:
|
||||
group: ci-e2e-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-iso:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v6
|
||||
- name: Install earthly
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: build iso
|
||||
run: |
|
||||
# Configure earthly to use the docker mirror in CI
|
||||
# https://docs.earthly.dev/ci-integration/pull-through-cache#configuring-earthly-to-use-the-cache
|
||||
mkdir -p ~/.earthly/
|
||||
cat << EOF > ~/.earthly/config.yml
|
||||
global:
|
||||
buildkit_additional_config: |
|
||||
[registry."docker.io"]
|
||||
mirrors = ["registry.docker-mirror.svc.cluster.local:5000"]
|
||||
[registry."registry.docker-mirror.svc.cluster.local:5000"]
|
||||
insecure = true
|
||||
EOF
|
||||
|
||||
earthly -P +iso
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: challenger.iso.zip
|
||||
path: |
|
||||
build/*.iso
|
||||
e2e-tests:
|
||||
needs:
|
||||
- build-iso
|
||||
runs-on: kvm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# Basic encryption tests
|
||||
- label: "local-encryption"
|
||||
- label: "remote-auto"
|
||||
- label: "remote-static"
|
||||
- label: "remote-https-pinned"
|
||||
- label: "remote-https-bad-cert"
|
||||
- label: "discoverable-kms"
|
||||
# Consolidated remote attestation workflow test
|
||||
- label: "remote-complete-workflow"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Install earthly
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install deps
|
||||
run: |
|
||||
curl -L https://github.com/mudler/luet/releases/download/0.33.0/luet-0.33.0-linux-amd64 -o luet
|
||||
chmod +x luet
|
||||
sudo mv luet /usr/bin/luet
|
||||
sudo mkdir -p /etc/luet/repos.conf.d/
|
||||
sudo luet repo add -y kairos --url quay.io/kairos/packages --type docker
|
||||
LUET_NOLOCK=true sudo -E luet install -y container/kubectl utils/k3d
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: challenger.iso.zip
|
||||
- name: Run tests
|
||||
env:
|
||||
LABEL: ${{ matrix.label }}
|
||||
KVM: true
|
||||
run: |
|
||||
sudo apt update && \
|
||||
sudo apt install -y git qemu-system-x86 qemu-utils swtpm jq make glibc-tools \
|
||||
openssl curl gettext ca-certificates curl gnupg lsb-release
|
||||
|
||||
export ISO=$PWD/$(ls *.iso)
|
||||
# update controllers
|
||||
make test
|
||||
# Generate controller image
|
||||
make docker-build
|
||||
# We run with sudo to be able to access /dev/kvm
|
||||
sudo -E ./scripts/e2e-tests.sh
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: ${{ matrix.label }}-test.logs.zip
|
||||
path: tests/**/logs/*
|
||||
if-no-files-found: warn
|
14
.github/workflows/image.yml
vendored
14
.github/workflows/image.yml
vendored
@@ -1,4 +1,3 @@
|
||||
---
|
||||
name: 'build container images'
|
||||
|
||||
on:
|
||||
@@ -8,12 +7,17 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ci-image-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Prepare
|
||||
id: prep
|
||||
@@ -46,18 +50,18 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_PASSWORD }}
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
|
33
.github/workflows/lint.yml
vendored
Normal file
33
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- '**'
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ci-lint-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v6
|
||||
- name: Install earthly
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Run Lint checks
|
||||
run: |
|
||||
earthly +lint
|
21
.github/workflows/osv-scanner-pr.yaml
vendored
Normal file
21
.github/workflows/osv-scanner-pr.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: OSV-Scanner PR Scan
|
||||
|
||||
# Change "main" to your default branch if you use a different name, i.e. "master"
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
merge_group:
|
||||
branches: [main]
|
||||
|
||||
permissions:
|
||||
# Require writing security events to upload SARIF file to security tab
|
||||
security-events: write
|
||||
# Only need to read contents adn actions
|
||||
contents: read
|
||||
actions: read
|
||||
|
||||
jobs:
|
||||
scan-pr:
|
||||
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v2.2.2"
|
27
.github/workflows/release.yaml
vendored
Normal file
27
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- run: |
|
||||
git fetch --prune --unshallow
|
||||
- name: Install gcc for arm64
|
||||
run: sudo apt-get update && sudo apt-get install -y build-essential crossbuild-essential-arm64
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
35
.github/workflows/renovate_auto.yml
vendored
Normal file
35
.github/workflows/renovate_auto.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Renovate auto-merge
|
||||
on:
|
||||
- pull_request_target
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
packages: read
|
||||
|
||||
jobs:
|
||||
dependabot:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'renovate[bot]' }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Approve a PR if not already approved
|
||||
run: |
|
||||
gh pr checkout "$PR_URL"
|
||||
if [ "$(gh pr status --json reviewDecision -q .currentBranch.reviewDecision)" != "APPROVED" ];
|
||||
then
|
||||
gh pr review --approve "$PR_URL"
|
||||
else
|
||||
echo "PR already approved.";
|
||||
fi
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Enable auto-merge for Renovate PRs
|
||||
run: gh pr merge --auto --squash "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
32
.github/workflows/secscan.yaml
vendored
Normal file
32
.github/workflows/secscan.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: "Security Scan"
|
||||
|
||||
# Run workflow each time code is pushed to your repository and on a schedule.
|
||||
# The scheduled workflow runs every at 00:00 on Sunday UTC time.
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- '**'
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v5
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: securego/gosec@master
|
||||
with:
|
||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
31
.github/workflows/unit-tests.yml
vendored
31
.github/workflows/unit-tests.yml
vendored
@@ -1,18 +1,35 @@
|
||||
name: Unit tests
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
concurrency:
|
||||
group: ci-unit-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
unit-tests:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ["1.25-bookworm"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install earthly
|
||||
uses: earthly/actions-setup@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Run tests
|
||||
run: |
|
||||
./earthly.sh +test
|
||||
earthly +test --GO_VERSION=${{ matrix.go-version }}
|
||||
- name: Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
file: ./coverage.out
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,6 +6,7 @@
|
||||
*.dylib
|
||||
bin
|
||||
testbin/*
|
||||
manager
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
@@ -24,3 +25,5 @@ testbin/*
|
||||
*~
|
||||
|
||||
/helm-chart
|
||||
build/
|
||||
dist/
|
73
.goreleaser.yaml
Normal file
73
.goreleaser.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
version: 2
|
||||
project_name: kcrypt-discovery-challenger
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- CGO_LDFLAGS="-ldl"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
binary: '{{ .ProjectName }}'
|
||||
id: default
|
||||
main: ./cmd/discovery/main.go
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- GOEXPERIMENT=boringcrypto
|
||||
- CGO_LDFLAGS="-ldl"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
binary: '{{ .ProjectName }}'
|
||||
id: fips-amd64
|
||||
main: ./cmd/discovery/main.go
|
||||
hooks:
|
||||
post:
|
||||
- bash -c 'set -e; go version {{.Path}} | grep boringcrypto || (echo "boringcrypto not found" && exit 1)'
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- GOEXPERIMENT=boringcrypto
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
- CGO_LDFLAGS="-ldl"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
binary: '{{ .ProjectName }}'
|
||||
id: fips-arm64
|
||||
main: ./cmd/discovery/main.go
|
||||
hooks:
|
||||
post:
|
||||
- bash -c 'set -e; go version {{.Path}} | grep boringcrypto || (echo "boringcrypto not found" && exit 1)'
|
||||
source:
|
||||
enabled: true
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
|
||||
archives:
|
||||
- id: default-archive
|
||||
ids:
|
||||
- default
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}-{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
|
||||
- id: fips-archive
|
||||
ids:
|
||||
- fips-arm64
|
||||
- fips-amd64
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}-{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}-fips'
|
||||
checksum:
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-checksums.txt'
|
||||
snapshot:
|
||||
version_template: "{{ .Tag }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- '^Merge pull request'
|
||||
env:
|
||||
- GOSUMDB=sum.golang.org
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
21
.yamllint
Normal file
21
.yamllint
Normal file
@@ -0,0 +1,21 @@
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
# 80 chars should be enough, but don't fail if a line is longer
|
||||
line-length:
|
||||
max: 150
|
||||
level: warning
|
||||
|
||||
# accept both key:
|
||||
# - item
|
||||
#
|
||||
# and key:
|
||||
# - item
|
||||
indentation:
|
||||
indent-sequences: whatever
|
||||
|
||||
truthy:
|
||||
check-keys: false
|
||||
|
||||
document-start:
|
||||
present: false
|
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.18 as builder
|
||||
FROM golang:1.25 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
@@ -16,7 +16,7 @@ COPY pkg/ pkg/
|
||||
COPY controllers/ controllers/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
|
||||
RUN CGO_ENABLED=0 go build -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
|
111
Earthfile
111
Earthfile
@@ -1,14 +1,20 @@
|
||||
VERSION 0.6
|
||||
ARG BASE_IMAGE=quay.io/kairos/core-opensuse:latest
|
||||
|
||||
# renovate: datasource=github-releases depName=kairos-io/kairos
|
||||
ARG KAIROS_VERSION="v2.5.0"
|
||||
ARG BASE_IMAGE=quay.io/kairos/ubuntu:23.10-core-amd64-generic-$KAIROS_VERSION
|
||||
|
||||
ARG OSBUILDER_IMAGE=quay.io/kairos/osbuilder-tools
|
||||
ARG GO_VERSION=1.18
|
||||
# renovate: datasource=docker depName=golang
|
||||
ARG GO_VERSION=1.25-bookworm
|
||||
ARG LUET_VERSION=0.33.0
|
||||
|
||||
build-challenger:
|
||||
FROM golang:alpine
|
||||
FROM +go-deps
|
||||
COPY . /work
|
||||
WORKDIR /work
|
||||
RUN CGO_ENABLED=0 go build -o kcrypt-discovery-challenger ./cmd/discovery
|
||||
SAVE ARTIFACT /work/kcrypt-discovery-challenger AS LOCAL kcrypt-discovery-challenger
|
||||
SAVE ARTIFACT /work/kcrypt-discovery-challenger kcrypt-discovery-challenger AS LOCAL kcrypt-discovery-challenger
|
||||
|
||||
image:
|
||||
FROM $BASE_IMAGE
|
||||
@@ -16,37 +22,94 @@ image:
|
||||
COPY +build-challenger/kcrypt-discovery-challenger /system/discovery/kcrypt-discovery-challenger
|
||||
SAVE IMAGE $IMAGE
|
||||
|
||||
image-rootfs:
|
||||
FROM +image
|
||||
SAVE ARTIFACT --keep-own /. rootfs
|
||||
|
||||
iso:
|
||||
ARG OSBUILDER_IMAGE
|
||||
ARG ISO_NAME=challenger
|
||||
FROM $OSBUILDER_IMAGE
|
||||
RUN zypper in -y jq docker
|
||||
WORKDIR /build
|
||||
WITH DOCKER --allow-privileged --load $IMAGE=(+image --IMAGE=test)
|
||||
RUN /entrypoint.sh --name $ISO_NAME --debug build-iso --date=false --local test --output /build/
|
||||
END
|
||||
# See: https://github.com/rancher/elemental-cli/issues/228
|
||||
RUN sha256sum $ISO_NAME.iso > $ISO_NAME.iso.sha256
|
||||
COPY --keep-own +image-rootfs/rootfs /build/rootfs
|
||||
RUN /entrypoint.sh --name $ISO_NAME --debug build-iso --squash-no-compression --date=false --output /build/ dir:/build/rootfs
|
||||
SAVE ARTIFACT /build/$ISO_NAME.iso kairos.iso AS LOCAL build/$ISO_NAME.iso
|
||||
SAVE ARTIFACT /build/$ISO_NAME.iso.sha256 kairos.iso.sha256 AS LOCAL build/$ISO_NAME.iso.sha256
|
||||
|
||||
test:
|
||||
go-deps:
|
||||
ARG GO_VERSION
|
||||
FROM golang:$GO_VERSION
|
||||
ENV CGO_ENABLED=0
|
||||
WORKDIR /build
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
RUN go mod verify
|
||||
SAVE ARTIFACT go.mod AS LOCAL go.mod
|
||||
SAVE ARTIFACT go.sum AS LOCAL go.sum
|
||||
|
||||
test:
|
||||
FROM +go-deps
|
||||
ENV CGO_ENABLED=0
|
||||
WORKDIR /work
|
||||
|
||||
# Cache layer for modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download && go mod verify
|
||||
|
||||
RUN go get github.com/onsi/gomega/...
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.1.4
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.1.4
|
||||
RUN go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.1.4
|
||||
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
COPY . /work
|
||||
RUN PATH=$PATH:$GOPATH/bin ginkgo run --covermode=atomic --coverprofile=coverage.out -p -r pkg/challenger cmd/discovery/client
|
||||
COPY . .
|
||||
RUN go run github.com/onsi/ginkgo/v2/ginkgo run --covermode=atomic --coverprofile=coverage.out -p -r pkg/challenger cmd/discovery/client
|
||||
SAVE ARTIFACT coverage.out AS LOCAL coverage.out
|
||||
|
||||
# Generic targets
|
||||
# usage e.g. ./earthly.sh +datasource-iso --CLOUD_CONFIG=tests/assets/qrcode.yaml
|
||||
datasource-iso:
|
||||
ARG OSBUILDER_IMAGE
|
||||
ARG CLOUD_CONFIG
|
||||
FROM $OSBUILDER_IMAGE
|
||||
RUN zypper in -y mkisofs
|
||||
WORKDIR /build
|
||||
RUN touch meta-data
|
||||
|
||||
COPY ${CLOUD_CONFIG} user-data
|
||||
RUN cat user-data
|
||||
RUN mkisofs -output ci.iso -volid cidata -joliet -rock user-data meta-data
|
||||
SAVE ARTIFACT /build/ci.iso iso.iso AS LOCAL build/datasource.iso
|
||||
|
||||
luet:
|
||||
FROM quay.io/luet/base:$LUET_VERSION
|
||||
SAVE ARTIFACT /usr/bin/luet /luet
|
||||
|
||||
e2e-tests-image:
|
||||
FROM opensuse/tumbleweed
|
||||
RUN zypper in -y go1.23 git qemu-x86 qemu-arm qemu-tools swtpm docker jq docker-compose make glibc libopenssl-devel curl gettext-runtime awk envsubst
|
||||
ENV GOPATH="/go"
|
||||
|
||||
COPY . /test
|
||||
WORKDIR /test
|
||||
|
||||
IF [ -e /test/build/kairos.iso ]
|
||||
ENV ISO=/test/build/kairos.iso
|
||||
ELSE
|
||||
COPY +iso/kairos.iso kairos.iso
|
||||
ENV ISO=/test/kairos.iso
|
||||
END
|
||||
|
||||
COPY +luet/luet /usr/bin/luet
|
||||
RUN mkdir -p /etc/luet/repos.conf.d/
|
||||
RUN luet repo add -y kairos --url quay.io/kairos/packages --type docker
|
||||
RUN LUET_NOLOCK=true luet install -y container/kubectl utils/k3d
|
||||
|
||||
controller-latest:
|
||||
FROM DOCKERFILE .
|
||||
SAVE IMAGE controller:latest
|
||||
|
||||
e2e-tests:
|
||||
FROM +e2e-tests-image
|
||||
ARG LABEL
|
||||
RUN make test # This also generates the latest controllers automatically, we do that before building the docker image with them
|
||||
WITH DOCKER --allow-privileged --load controller:latest=+controller-latest
|
||||
RUN ./scripts/e2e-tests.sh
|
||||
END
|
||||
|
||||
lint:
|
||||
BUILD +yamllint
|
||||
|
||||
yamllint:
|
||||
FROM cytopia/yamllint
|
||||
COPY . .
|
||||
RUN yamllint .github/workflows/
|
||||
|
9
Makefile
9
Makefile
@@ -103,7 +103,7 @@ vet: ## Run go vet against code.
|
||||
|
||||
.PHONY: test
|
||||
test: manifests generate fmt vet envtest ## Run tests.
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./pkg/...
|
||||
|
||||
##@ Build
|
||||
|
||||
@@ -160,7 +160,7 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v3.8.7
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.9.2
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.16.0
|
||||
|
||||
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
|
||||
.PHONY: kustomize
|
||||
@@ -171,7 +171,8 @@ $(KUSTOMIZE): $(LOCALBIN)
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
|
||||
test -s $(LOCALBIN)/controller-gen || curl -L -v -Sso $(LOCALBIN)/controller-gen https://github.com/kubernetes-sigs/controller-tools/releases/download/$(CONTROLLER_TOOLS_VERSION)/controller-gen-linux-amd64
|
||||
chmod +x $(LOCALBIN)/controller-gen
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
||||
@@ -257,4 +258,4 @@ undeploy-dev: ## Undeploy controller from the K8s cluster specified in ~/.kube/c
|
||||
kubesplit: manifests kustomize
|
||||
rm -rf helm-chart
|
||||
mkdir helm-chart
|
||||
$(KUSTOMIZE) build config/default | kubesplit -helm helm-chart
|
||||
$(KUSTOMIZE) build config/default | kubesplit -helm helm-chart
|
||||
|
489
README.md
489
README.md
@@ -1,9 +1,98 @@
|
||||
# kcrypt-challenger
|
||||
<h1 align="center">
|
||||
<br>
|
||||
<img width="184" alt="kairos-white-column 5bc2fe34" src="https://user-images.githubusercontent.com/2420543/193010398-72d4ba6e-7efe-4c2e-b7ba-d3a826a55b7d.png"><br>
|
||||
Kcrypt challenger
|
||||
<br>
|
||||
</h1>
|
||||
|
||||
<h3 align="center">Kcrypt TPM challenger</h3>
|
||||
<p align="center">
|
||||
<a href="https://opensource.org/licenses/">
|
||||
<img src="https://img.shields.io/badge/licence-APL2-brightgreen"
|
||||
alt="license">
|
||||
</a>
|
||||
<a href="https://github.com/kairos-io/kcrypt-challenger/issues"><img src="https://img.shields.io/github/issues/kairos-io/kcrypt-challenger"></a>
|
||||
<a href="https://kairos.io/docs/" target=_blank> <img src="https://img.shields.io/badge/Documentation-blue"
|
||||
alt="docs"></a>
|
||||
<img src="https://img.shields.io/badge/made%20with-Go-blue">
|
||||
<img src="https://goreportcard.com/badge/github.com/kairos-io/kcrypt-challenger" alt="go report card" />
|
||||
<a href="https://github.com/kairos-io/kcrypt-challenger/actions/workflows/e2e-tests.yml?query=branch%3Amain"> <img src="https://github.com/kairos-io/kcrypt-challenger/actions/workflows/e2e-tests.yml/badge.svg?branch=main"></a>
|
||||
</p>
|
||||
|
||||
|
||||
With Kairos you can build immutable, bootable Kubernetes and OS images for your edge devices as easily as writing a Dockerfile. Optional P2P mesh with distributed ledger automates node bootstrapping and coordination. Updating nodes is as easy as CI/CD: push a new image to your container registry and let secure, risk-free A/B atomic upgrades do the rest.
|
||||
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th align="center">
|
||||
<img width="640" height="1px">
|
||||
<p>
|
||||
<small>
|
||||
Documentation
|
||||
</small>
|
||||
</p>
|
||||
</th>
|
||||
<th align="center">
|
||||
<img width="640" height="1">
|
||||
<p>
|
||||
<small>
|
||||
Contribute
|
||||
</small>
|
||||
</p>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
📚 [Getting started with Kairos](https://kairos.io/docs/getting-started) <br> :bulb: [Examples](https://kairos.io/docs/examples) <br> :movie_camera: [Video](https://kairos.io/docs/media/) <br> :open_hands:[Engage with the Community](https://kairos.io/community/)
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
🙌[ CONTRIBUTING.md ]( https://github.com/kairos-io/kairos/blob/master/CONTRIBUTING.md ) <br> :raising_hand: [ GOVERNANCE ]( https://github.com/kairos-io/kairos/blob/master/GOVERNANCE.md ) <br>:construction_worker:[Code of conduct](https://github.com/kairos-io/kairos/blob/master/CODE_OF_CONDUCT.md)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
| :exclamation: | This is experimental! |
|
||||
|-|:-|
|
||||
|
||||
This is the Kairos kcrypt-challenger Kubernetes Native Extension.
|
||||
This is the Kairos kcrypt-challenger Kubernetes Native Extension.
|
||||
|
||||
## Usage
|
||||
|
||||
See the documentation in our website: https://kairos.io/docs/advanced/partition_encryption/.
|
||||
|
||||
### TPM NV Memory Cleanup
|
||||
|
||||
⚠️ **DANGER**: This command removes encryption passphrases from TPM memory!
|
||||
⚠️ **If you delete the wrong index, your encrypted disk may become UNBOOTABLE!**
|
||||
|
||||
During development and testing, the kcrypt-challenger may store passphrases in TPM non-volatile (NV) memory. These passphrases persist across reboots and can accumulate over time, taking up space in the TPM.
|
||||
|
||||
To clean up TPM NV memory used by the challenger:
|
||||
|
||||
```bash
|
||||
# Clean up the default NV index (respects config or defaults to 0x1500000)
|
||||
kcrypt-discovery-challenger cleanup
|
||||
|
||||
# Clean up a specific NV index
|
||||
kcrypt-discovery-challenger cleanup --nv-index=0x1500001
|
||||
|
||||
# Clean up with specific TPM device
|
||||
kcrypt-discovery-challenger cleanup --tpm-device=/dev/tpmrm0
|
||||
```
|
||||
|
||||
**Safety Features:**
|
||||
- By default, the command shows warnings and prompts for confirmation
|
||||
- You must type "yes" to proceed with deletion
|
||||
- Use `--i-know-what-i-am-doing` flag to skip the prompt (not recommended)
|
||||
|
||||
**Note**: This command uses native Go TPM libraries and requires appropriate permissions to access the TPM device.
|
||||
|
||||
## Installation
|
||||
|
||||
To install, use helm:
|
||||
|
||||
@@ -11,7 +100,7 @@ To install, use helm:
|
||||
# Adds the kairos repo to helm
|
||||
$ helm repo add kairos https://kairos-io.github.io/helm-charts
|
||||
"kairos" has been added to your repositories
|
||||
$ helm repo update
|
||||
$ helm repo update
|
||||
Hang tight while we grab the latest from your chart repositories...
|
||||
...Successfully got an update from the "kairos" chart repository
|
||||
Update Complete. ⎈Happy Helming!⎈
|
||||
@@ -28,3 +117,397 @@ TEST SUITE: None
|
||||
# Installs challenger
|
||||
$ helm install kairos-challenger kairos/kcrypt-challenger
|
||||
```
|
||||
|
||||
## Remote Attestation Flow
|
||||
|
||||
The kcrypt-challenger implements a secure TPM-based remote attestation flow for disk encryption key management. The following diagram illustrates the complete attestation process:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant TPM as TPM Hardware
|
||||
participant Client as TPM Client<br/>(Kairos Node)
|
||||
participant Challenger as Kcrypt Challenger<br/>(Server)
|
||||
participant K8s as Kubernetes API<br/>(SealedVolume/Secret)
|
||||
|
||||
Note over TPM,Client: Client Boot Process
|
||||
Client->>TPM: Extract EK (Endorsement Key)
|
||||
Client->>TPM: Generate AK (Attestation Key)
|
||||
Client->>TPM: Read PCR Values (Boot State)
|
||||
|
||||
Note over Client,Challenger: 1. Connection Establishment
|
||||
Client->>Challenger: WebSocket connection with partition info<br/>(label, device, UUID)
|
||||
Challenger->>Client: Connection established
|
||||
|
||||
Note over Client,Challenger: 2. TPM Authentication (Challenge-Response)
|
||||
Client->>Challenger: Send EK + AK attestation data
|
||||
Challenger->>Challenger: Decode EK/AK, compute TPM hash
|
||||
Challenger->>Challenger: Generate cryptographic challenge
|
||||
Challenger->>Client: Send challenge (encrypted with EK)
|
||||
Client->>TPM: Decrypt challenge using private EK
|
||||
Client->>TPM: Sign response using private AK
|
||||
Client->>Challenger: Send proof response + PCR quote
|
||||
Challenger->>Challenger: Verify challenge response
|
||||
|
||||
Note over Challenger,K8s: 3. Enrollment Context Determination
|
||||
Challenger->>K8s: List SealedVolumes by TPM hash
|
||||
K8s->>Challenger: Return existing volumes (if any)
|
||||
|
||||
alt New Enrollment (TOFU - Trust On First Use)
|
||||
Note over Challenger,K8s: 4a. Initial TOFU Enrollment
|
||||
Challenger->>Challenger: Skip attestation verification (TOFU)
|
||||
Challenger->>Challenger: Generate secure passphrase
|
||||
Challenger->>K8s: Create/reuse Kubernetes Secret
|
||||
Challenger->>Challenger: Create attestation spec (store ALL PCRs)
|
||||
Challenger->>K8s: Create SealedVolume with attestation data
|
||||
K8s->>Challenger: Confirm resource creation
|
||||
else Existing Enrollment
|
||||
Note over Challenger,K8s: 4b. Selective Verification & Re-enrollment
|
||||
Challenger->>Challenger: Check if TPM is quarantined
|
||||
alt TPM Quarantined
|
||||
Challenger->>Client: Security rejection (access denied)
|
||||
else TPM Not Quarantined
|
||||
Note over Challenger: Selective Attestation Verification
|
||||
Challenger->>Challenger: Verify AK using selective enrollment:<br/>• Empty AK = re-enrollment mode (accept any)<br/>• Set AK = enforcement mode (exact match)
|
||||
Challenger->>Challenger: Verify PCRs using selective enrollment:<br/>• Empty PCR = re-enrollment mode (accept + update)<br/>• Set PCR = enforcement mode (exact match)<br/>• Omitted PCR = skip verification entirely
|
||||
alt Verification Failed
|
||||
Challenger->>Client: Security rejection (attestation failed)
|
||||
else Verification Passed
|
||||
Challenger->>Challenger: Update empty fields with current values
|
||||
Challenger->>K8s: Update SealedVolume (if changes made)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Note over Challenger,K8s: 5. Passphrase Retrieval & Delivery
|
||||
Challenger->>K8s: Get Kubernetes Secret by name/path
|
||||
K8s->>Challenger: Return encrypted passphrase
|
||||
Challenger->>Client: Send passphrase securely
|
||||
|
||||
Note over TPM,Client: 6. Disk Decryption
|
||||
Client->>Client: Use passphrase to decrypt disk partition
|
||||
Client->>Challenger: Close WebSocket connection
|
||||
|
||||
Note over TPM,Client: Success - Node continues boot process
|
||||
```
|
||||
|
||||
### Flow Explanation
|
||||
|
||||
1. **Connection Establishment**: Client establishes WebSocket connection with partition metadata
|
||||
2. **TPM Authentication**: Cryptographic challenge-response proves client controls the TPM hardware
|
||||
3. **Enrollment Determination**: Server checks if this TPM is already enrolled
|
||||
4. **Security Verification**:
|
||||
- **TOFU**: New TPMs are automatically enrolled (Trust On First Use)
|
||||
- **Selective Enrollment**: Existing TPMs undergo flexible verification based on field states
|
||||
5. **Passphrase Delivery**: Encrypted disk passphrase is securely delivered to authenticated client
|
||||
|
||||
### Selective Enrollment States
|
||||
|
||||
| Field State | Verification | Updates | Use Case |
|
||||
|-------------|-------------|---------|----------|
|
||||
| **Empty** (`""`) | ✅ Accept any value | ✅ Update with current | Re-learn after TPM/firmware changes |
|
||||
| **Set** (`"abc123"`) | ✅ Enforce exact match | ❌ No updates | Strict security enforcement |
|
||||
| **Omitted** (deleted) | ❌ Skip entirely | ❌ Never re-enrolled | Ignore volatile PCRs (e.g., PCR 11) |
|
||||
|
||||
## Selective Enrollment Mode for TPM Attestation
|
||||
|
||||
The kcrypt-challenger implements a sophisticated "selective enrollment mode" that solves operational challenges in real-world TPM-based disk encryption deployments. This feature provides flexible attestation management while maintaining strong security guarantees.
|
||||
|
||||
### Key Features
|
||||
|
||||
- Full selective enrollment with three field states (empty, set, omitted)
|
||||
- Trust On First Use (TOFU) automatic enrollment
|
||||
- Secret reuse after SealedVolume recreation
|
||||
- PCR re-enrollment for kernel upgrades
|
||||
- PCR omission for volatile boot stages
|
||||
- Early quarantine checking with fail-fast behavior
|
||||
|
||||
### How Selective Enrollment Works
|
||||
|
||||
The system supports two distinct enrollment behaviors:
|
||||
|
||||
#### **Initial TOFU Enrollment** (No SealedVolume exists)
|
||||
- **Store ALL PCRs** provided by the client (don't omit any)
|
||||
- Create complete attestation baseline from first contact
|
||||
- Enables full security verification for subsequent attestations
|
||||
|
||||
#### **Selective Re-enrollment** (SealedVolume exists with specific fields)
|
||||
- **Empty values** (`""`) = Accept any value, update the stored value (re-enrollment mode)
|
||||
- **Set values** (`"abc123..."`) = Enforce exact match (enforcement mode)
|
||||
- **Omitted fields** = Skip verification entirely (ignored mode)
|
||||
|
||||
**Selective Enrollment Behavior Summary:**
|
||||
|
||||
| Field State | Verification | Updates | Use Case |
|
||||
|-------------|-------------|---------|----------|
|
||||
| **Empty** (`""`) | ✅ Accept any value | ✅ Update with current | Re-learn after TPM/firmware changes |
|
||||
| **Set** (`"abc123"`) | ✅ Enforce exact match | ❌ No updates | Strict security enforcement |
|
||||
| **Omitted** (deleted) | ❌ Skip entirely | ❌ Never re-enrolled | Ignore volatile PCRs (e.g., PCR 11) |
|
||||
|
||||
### SealedVolume API Examples
|
||||
|
||||
#### **Example 1: Initial TOFU Enrollment**
|
||||
When no SealedVolume exists, the server automatically creates one with ALL received PCRs:
|
||||
|
||||
```yaml
|
||||
# Server creates this automatically during TOFU enrollment
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
spec:
|
||||
TPMHash: "computed-from-client"
|
||||
attestation:
|
||||
ekPublicKey: "learned-ek" # Learned from client
|
||||
akPublicKey: "learned-ak" # Learned from client
|
||||
pcrValues:
|
||||
pcrs:
|
||||
"0": "abc123..." # All received PCRs stored
|
||||
"7": "def456..."
|
||||
"11": "ghi789..." # Including PCR 11 if provided
|
||||
```
|
||||
|
||||
#### **Example 2: Selective Re-enrollment Control**
|
||||
Operators can control which fields allow re-enrollment:
|
||||
|
||||
```yaml
|
||||
# Operator-controlled selective enforcement
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
spec:
|
||||
TPMHash: "required-tpm-hash" # MUST be set for client matching
|
||||
attestation:
|
||||
ekPublicKey: "" # Empty = re-enrollment mode
|
||||
akPublicKey: "fixed-ak" # Set = enforce this value
|
||||
pcrValues:
|
||||
pcrs:
|
||||
"0": "" # Empty = re-enrollment mode
|
||||
"7": "fixed-value" # Set = enforce this value
|
||||
# "11": omitted # Omitted = skip entirely
|
||||
```
|
||||
|
||||
### Use Cases Solved
|
||||
|
||||
1. **Pure TOFU**: No SealedVolume exists → System learns ALL attestation data from first contact
|
||||
2. **Static Passphrase Tests**: Create Secret + SealedVolume with TPM hash, let TOFU handle attestation data
|
||||
3. **Production Manual Setup**: Operators set known passphrases + TPM hashes, system learns remaining security data
|
||||
4. **Firmware Upgrades**: Set PCR 0 to empty to re-learn after BIOS updates
|
||||
5. **TPM Replacement**: Set AK/EK fields to empty to re-learn after hardware changes
|
||||
6. **Flexible Boot Stages**: Omit PCR 11 entirely so users can decrypt during boot AND after full system startup
|
||||
7. **Kernel Updates**: Omit PCR 11 to avoid quarantine on routine Kairos upgrades
|
||||
|
||||
### Practical Operator Workflows
|
||||
|
||||
#### **Scenario 1: Reusing Existing Passphrases After SealedVolume Recreation**
|
||||
|
||||
**Problem**: An operator needs to recreate a SealedVolume (e.g., after accidental deletion or configuration changes) but wants to keep using the existing passphrase to avoid re-encrypting the disk.
|
||||
|
||||
**Solution**: The system automatically reuses existing Kubernetes secrets when available:
|
||||
|
||||
```bash
|
||||
# 1. Operator accidentally deletes SealedVolume
|
||||
kubectl delete sealedvolume my-encrypted-volume
|
||||
|
||||
# 2. Original secret still exists in cluster
|
||||
kubectl get secret my-encrypted-volume-encrypted-data
|
||||
# NAME TYPE DATA AGE
|
||||
# my-encrypted-volume-encrypted-data Opaque 1 5d
|
||||
|
||||
# 3. When TPM client reconnects, system detects existing secret
|
||||
# and reuses the passphrase instead of generating a new one
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Detect the existing secret with the same name
|
||||
- Log: "Secret already exists, reusing existing secret"
|
||||
- Use the existing passphrase for decryption
|
||||
- Recreate the SealedVolume with current TPM attestation data
|
||||
- Maintain continuity without requiring disk re-encryption
|
||||
|
||||
#### **Scenario 2: Deliberately Skipping PCRs After Initial Enrollment**
|
||||
|
||||
**Problem**: An operator initially enrolls with PCRs 0, 7, and 11, but later realizes PCR 11 changes frequently due to kernel updates and wants to ignore it permanently.
|
||||
|
||||
**Solution**: Remove the PCR from the SealedVolume specification:
|
||||
|
||||
```bash
|
||||
# 1. Initial enrollment created SealedVolume with:
|
||||
# pcrValues:
|
||||
# pcrs:
|
||||
# "0": "abc123..."
|
||||
# "7": "def456..."
|
||||
# "11": "ghi789..."
|
||||
|
||||
# 2. Operator edits SealedVolume to remove PCR 11 entirely
|
||||
kubectl edit sealedvolume my-encrypted-volume
|
||||
# Remove the "11": "ghi789..." line completely
|
||||
|
||||
# 3. Result - omitted PCR 11:
|
||||
# pcrValues:
|
||||
# pcrs:
|
||||
# "0": "abc123..."
|
||||
# "7": "def456..."
|
||||
# # PCR 11 omitted = ignored entirely
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Skip PCR 11 verification entirely (no enforcement)
|
||||
- Never re-enroll PCR 11 in future attestations
|
||||
- Log: "PCR verification successful using selective enrollment" (without mentioning PCR 11)
|
||||
- Continue enforcing PCRs 0 and 7 normally
|
||||
|
||||
#### **Scenario 3: Manual PCR Selection During Initial Setup**
|
||||
|
||||
**Problem**: An operator knows certain PCRs will be unstable and wants to exclude them from the beginning.
|
||||
|
||||
**Solution**: Create the initial SealedVolume manually with only desired PCRs:
|
||||
|
||||
```yaml
|
||||
# Create SealedVolume with selective PCR enforcement from the start
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: selective-pcr-volume
|
||||
spec:
|
||||
TPMHash: "known-tpm-hash"
|
||||
partitions:
|
||||
- label: "encrypted-data"
|
||||
secret:
|
||||
name: "my-passphrase"
|
||||
path: "passphrase"
|
||||
attestation:
|
||||
ekPublicKey: "" # Re-enrollment mode
|
||||
akPublicKey: "" # Re-enrollment mode
|
||||
pcrValues:
|
||||
pcrs:
|
||||
"0": "" # Re-enrollment mode (will learn)
|
||||
"7": "" # Re-enrollment mode (will learn)
|
||||
# "11": omitted # Skip PCR 11 entirely
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Learn and enforce PCRs 0 and 7 on first attestation
|
||||
- Completely ignore PCR 11 (never verify, never store)
|
||||
- Allow flexible boot stages without PCR 11 interference
|
||||
|
||||
#### **Scenario 4: Kernel Upgrade - Temporary PCR Re-enrollment**
|
||||
|
||||
**Problem**: An operator is performing a kernel upgrade and knows PCR 11 will change, but wants to continue enforcing it after the upgrade (unlike permanent omission).
|
||||
|
||||
**Solution**: Set the PCR value to empty string to trigger re-enrollment mode:
|
||||
|
||||
```bash
|
||||
# 1. Before kernel upgrade - PCR 11 is currently enforced
|
||||
kubectl get sealedvolume my-volume -o jsonpath='{.spec.attestation.pcrValues.pcrs.11}'
|
||||
# Output: "abc123def456..." (current PCR 11 value)
|
||||
|
||||
# 2. Set PCR 11 to empty string to allow re-enrollment
|
||||
kubectl patch sealedvolume my-volume --type='merge' \
|
||||
-p='{"spec":{"attestation":{"pcrValues":{"pcrs":{"11":""}}}}}'
|
||||
|
||||
# 3. Perform kernel upgrade and reboot
|
||||
|
||||
# 4. After reboot, TPM client reconnects and system learns new PCR 11 value
|
||||
# Log will show: "Updated PCR value during selective enrollment, pcr: 11"
|
||||
|
||||
# 5. Verify new PCR 11 value is now enforced
|
||||
kubectl get sealedvolume my-volume -o jsonpath='{.spec.attestation.pcrValues.pcrs.11}'
|
||||
# Output: "new789xyz012..." (new PCR 11 value after kernel upgrade)
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Accept any PCR 11 value on next attestation (re-enrollment mode)
|
||||
- Update the stored PCR 11 with the new post-upgrade value
|
||||
- Resume strict PCR 11 enforcement with the new value
|
||||
- Log: "Updated PCR value during selective enrollment"
|
||||
|
||||
**Key Difference from Scenario 2:**
|
||||
- **Scenario 2 (Omit PCR)**: PCR 11 permanently ignored, never verified again
|
||||
- **Scenario 4 (Empty PCR)**: PCR 11 temporarily re-enrolled, then enforced with new value
|
||||
|
||||
### Security Architecture
|
||||
|
||||
- **TPM Hash is mandatory** - prevents multiple clients from matching the same SealedVolume
|
||||
- **EK verification remains strict** - only AK and PCRs support selective enrollment modes
|
||||
- **Early quarantine checking** - quarantined TPMs are rejected immediately after authentication
|
||||
- **Comprehensive logging** - all enrollment events are logged for audit trails
|
||||
- **Challenge-response authentication** - prevents TPM impersonation attacks
|
||||
|
||||
### Quick Reference for Documentation
|
||||
|
||||
**Common Operations:**
|
||||
|
||||
```bash
|
||||
# Skip a PCR permanently (never verify again)
|
||||
kubectl edit sealedvolume my-volume
|
||||
# Remove the PCR line entirely from pcrValues.pcrs
|
||||
|
||||
# Temporarily allow PCR re-enrollment (e.g., before kernel upgrade)
|
||||
kubectl patch sealedvolume my-volume --type='merge' -p='{"spec":{"attestation":{"pcrValues":{"pcrs":{"11":""}}}}}'
|
||||
|
||||
# Re-learn a PCR after hardware change (e.g., PCR 0 after BIOS update)
|
||||
kubectl patch sealedvolume my-volume --type='merge' -p='{"spec":{"attestation":{"pcrValues":{"pcrs":{"0":""}}}}}'
|
||||
|
||||
# Re-learn AK after TPM replacement
|
||||
kubectl patch sealedvolume my-volume --type='merge' -p='{"spec":{"attestation":{"akPublicKey":""}}}'
|
||||
|
||||
# Check current PCR enforcement status
|
||||
kubectl get sealedvolume my-volume -o jsonpath='{.spec.attestation.pcrValues.pcrs}' | jq .
|
||||
```
|
||||
|
||||
**Log Messages to Expect:**
|
||||
|
||||
- `"Secret already exists, reusing existing secret"` - Passphrase reuse scenario
|
||||
- `"Updated PCR value during selective enrollment"` - Re-enrollment mode active
|
||||
- `"PCR verification successful using selective enrollment"` - Omitted PCRs ignored
|
||||
- `"PCR enforcement mode verification passed"` - Strict enforcement active
|
||||
|
||||
## ✅ E2E Testing Coverage for Selective Enrollment
|
||||
|
||||
### Status: ✅ COMPLETED
|
||||
Comprehensive E2E test suite has been implemented covering all selective enrollment scenarios. The test suite is optimized for efficiency using VM reuse patterns to minimize execution time while maintaining thorough coverage.
|
||||
|
||||
### ✅ Implemented E2E Test Scenarios
|
||||
|
||||
#### **Comprehensive Remote Attestation Workflow**
|
||||
- [x] **Complete E2E Test Suite**: All remote attestation scenarios consolidated into a single comprehensive test (`remote-complete-workflow`)
|
||||
- TOFU enrollment, quarantine management, PCR management, AK management
|
||||
- Secret reuse, error handling, multi-partition support
|
||||
- Performance testing, security verification, and operational workflows
|
||||
|
||||
#### **9. Logging & Observability**
|
||||
- [x] **Audit Trail Verification**: Security events logging validation (integrated across all tests)
|
||||
- [x] **Log Message Accuracy**: Expected log messages verification (integrated across all tests)
|
||||
- [x] **Metrics Collection**: Performance monitoring during tests (integrated across all tests)
|
||||
|
||||
#### **10. Compatibility Testing**
|
||||
- [x] **TPM 2.0 Compatibility**: Software TPM emulation with TPM 2.0 (all tests use `swtpm`)
|
||||
- [x] **Kernel Variations**: PCR behavior testing across different scenarios (`remote-large-pcr`)
|
||||
- [x] **Hardware Variations**: TPM emulation covering different chip behaviors (via `swtpm`)
|
||||
|
||||
### Test Implementation Details
|
||||
|
||||
The comprehensive test suite includes:
|
||||
|
||||
- **18 Test Labels**: Covering all scenarios from basic to advanced
|
||||
- **3 Test Files**: Organized by complexity and VM reuse optimization
|
||||
- **VM Reuse Pattern**: Reduces test time from ~40 minutes to ~20 minutes
|
||||
- **Real TPM Emulation**: Uses `swtpm` for realistic TPM behavior
|
||||
- **GitHub Workflow Integration**: All tests run in CI/CD pipeline
|
||||
|
||||
See [`tests/README.md`](tests/README.md) for detailed test documentation and usage instructions.
|
||||
|
||||
### Test Environment Requirements
|
||||
|
||||
- **Real TPM Hardware**: Software TPM simulators may not catch hardware-specific issues
|
||||
- **Kernel Build Pipeline**: Ability to test actual kernel upgrades and PCR changes
|
||||
- **Multi-Node Clusters**: Test distributed scenarios and namespace isolation
|
||||
- **Network Partitioning**: Test resilience under network failures
|
||||
- **Performance Monitoring**: Metrics collection for scalability validation
|
||||
|
||||
### Success Criteria
|
||||
|
||||
All E2E tests must pass consistently across:
|
||||
- Different hardware configurations (various TPM chips)
|
||||
- Multiple kernel versions (to test PCR 11 variability)
|
||||
- Various cluster configurations (single-node, multi-node)
|
||||
- Different load conditions (single client, concurrent clients)
|
||||
|
||||
Completing this E2E test suite will provide confidence that the selective enrollment system works reliably in production environments.
|
||||
|
@@ -23,11 +23,39 @@ import (
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// PCRValues represents Platform Configuration Register values for boot state verification
|
||||
// Uses a flexible map where keys are PCR indices (as strings) and values are hex-encoded PCR values
|
||||
type PCRValues struct {
|
||||
// PCRs is a flexible map of PCR index (as string) to PCR value (hex-encoded)
|
||||
// Example: {"0": "a1b2c3...", "7": "d4e5f6...", "11": "g7h8i9..."}
|
||||
// This allows for any combination of PCRs without hardcoding specific indices
|
||||
PCRs map[string]string `json:"pcrs,omitempty"`
|
||||
}
|
||||
|
||||
// AttestationSpec defines TPM attestation data for TOFU enrollment and verification
|
||||
type AttestationSpec struct {
|
||||
// EKPublicKey stores the Endorsement Key public key in PEM format
|
||||
EKPublicKey string `json:"ekPublicKey,omitempty"`
|
||||
|
||||
// AKPublicKey stores the Attestation Key public key in PEM format
|
||||
AKPublicKey string `json:"akPublicKey,omitempty"`
|
||||
|
||||
// PCRValues stores the expected PCR values for boot state verification
|
||||
PCRValues *PCRValues `json:"pcrValues,omitempty"`
|
||||
|
||||
// EnrolledAt timestamp when this TPM was first enrolled
|
||||
EnrolledAt *metav1.Time `json:"enrolledAt,omitempty"`
|
||||
|
||||
// LastVerifiedAt timestamp of the last successful attestation
|
||||
LastVerifiedAt *metav1.Time `json:"lastVerifiedAt,omitempty"`
|
||||
}
|
||||
|
||||
// SealedVolumeSpec defines the desired state of SealedVolume
|
||||
type SealedVolumeSpec struct {
|
||||
TPMHash string `json:"TPMHash,omitempty"`
|
||||
Partitions []PartitionSpec `json:"partitions,omitempty"`
|
||||
Quarantined bool `json:"quarantined,omitempty"`
|
||||
TPMHash string `json:"TPMHash,omitempty"`
|
||||
Partitions []PartitionSpec `json:"partitions,omitempty"`
|
||||
Quarantined bool `json:"quarantined,omitempty"`
|
||||
Attestation *AttestationSpec `json:"attestation,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionSpec defines a Partition. A partition can be identified using
|
||||
|
@@ -25,6 +25,56 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AttestationSpec) DeepCopyInto(out *AttestationSpec) {
|
||||
*out = *in
|
||||
if in.PCRValues != nil {
|
||||
in, out := &in.PCRValues, &out.PCRValues
|
||||
*out = new(PCRValues)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EnrolledAt != nil {
|
||||
in, out := &in.EnrolledAt, &out.EnrolledAt
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.LastVerifiedAt != nil {
|
||||
in, out := &in.LastVerifiedAt, &out.LastVerifiedAt
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttestationSpec.
|
||||
func (in *AttestationSpec) DeepCopy() *AttestationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AttestationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PCRValues) DeepCopyInto(out *PCRValues) {
|
||||
*out = *in
|
||||
if in.PCRs != nil {
|
||||
in, out := &in.PCRs, &out.PCRs
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PCRValues.
|
||||
func (in *PCRValues) DeepCopy() *PCRValues {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PCRValues)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PartitionSpec) DeepCopyInto(out *PartitionSpec) {
|
||||
*out = *in
|
||||
@@ -114,6 +164,11 @@ func (in *SealedVolumeSpec) DeepCopyInto(out *SealedVolumeSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Attestation != nil {
|
||||
in, out := &in.Attestation, &out.Attestation
|
||||
*out = new(AttestationSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SealedVolumeSpec.
|
||||
|
374
cmd/discovery/cli_test.go
Normal file
374
cmd/discovery/cli_test.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestCLI(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Discovery CLI Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("CLI Interface", func() {
|
||||
BeforeEach(func() {
|
||||
// Clean up any previous log files
|
||||
_ = os.Remove("/tmp/kcrypt-challenger-client.log")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Clean up log files
|
||||
_ = os.Remove("/tmp/kcrypt-challenger-client.log")
|
||||
})
|
||||
|
||||
Context("CLI help", func() {
|
||||
It("should show help when --help is used", func() {
|
||||
err := ExecuteWithArgs([]string{"--help"})
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
// We can't easily test the output content without complex output capture,
|
||||
// but we can verify the function executes without error
|
||||
})
|
||||
})
|
||||
|
||||
Context("Input validation", func() {
|
||||
It("should require all partition parameters for get command", func() {
|
||||
err := ExecuteWithArgs([]string{"get"})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
// Should return an error when required parameters are missing
|
||||
})
|
||||
|
||||
It("should validate that all required fields are provided for get command", func() {
|
||||
// Test with valid partition parameters
|
||||
err := ExecuteWithArgs([]string{"get", "--partition-name=/dev/sda2"})
|
||||
Expect(err).To(HaveOccurred()) // Should fail at client connection but parsing should work
|
||||
|
||||
// Test with valid UUID
|
||||
err = ExecuteWithArgs([]string{"get", "--partition-uuid=12345"})
|
||||
Expect(err).To(HaveOccurred()) // Should fail at client connection but parsing should work
|
||||
})
|
||||
|
||||
It("should handle invalid flags gracefully", func() {
|
||||
err := ExecuteWithArgs([]string{"--invalid-flag"})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
// Should return an error for invalid flags
|
||||
})
|
||||
})
|
||||
|
||||
Context("Flow detection and backend integration", func() {
|
||||
It("should attempt to get passphrase with valid parameters", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid-12345",
|
||||
"--partition-label=test-label",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// We expect this to fail since there's no server, but it should reach the backend logic
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Should show flow detection in the log (if created)
|
||||
logContent, readErr := os.ReadFile("/tmp/kcrypt-challenger-client.log")
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should contain flow detection message
|
||||
Expect(logStr).To(ContainSubstring("flow"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should use the correct backend client logic", func() {
|
||||
// Test that the CLI mode uses the same GetPassphrase method
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail but attempt to use the client
|
||||
Expect(err).To(HaveOccurred())
|
||||
// The important thing is that it reaches the backend and doesn't crash
|
||||
})
|
||||
})
|
||||
|
||||
Context("Configuration overrides with debug logging", func() {
|
||||
var tempDir string
|
||||
var originalLogFile string
|
||||
var testLogFile string
|
||||
var configDir string
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a temporary directory for this test
|
||||
var err error
|
||||
tempDir, err = os.MkdirTemp("", "kcrypt-test-*")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Use /tmp/oem since it's already in confScanDirs
|
||||
configDir = "/tmp/oem"
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create a test configuration file with known values
|
||||
configContent := `kcrypt:
|
||||
challenger:
|
||||
challenger_server: "https://default-server.com:8080"
|
||||
mdns: false
|
||||
certificate: "/default/path/to/cert.pem"
|
||||
nv_index: "0x1500000"
|
||||
c_index: "0x1400000"
|
||||
tpm_device: "/dev/tpm0"
|
||||
`
|
||||
configFile := filepath.Join(configDir, "kairos.yaml")
|
||||
err = os.WriteFile(configFile, []byte(configContent), 0644)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Override the log file location for testing
|
||||
originalLogFile = os.Getenv("KAIROS_LOG_FILE")
|
||||
testLogFile = filepath.Join(tempDir, "kcrypt-discovery-challenger.log")
|
||||
os.Setenv("KAIROS_LOG_FILE", testLogFile)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Restore original log file setting
|
||||
if originalLogFile != "" {
|
||||
os.Setenv("KAIROS_LOG_FILE", originalLogFile)
|
||||
} else {
|
||||
os.Unsetenv("KAIROS_LOG_FILE")
|
||||
}
|
||||
|
||||
// Clean up config file
|
||||
_ = os.RemoveAll(configDir)
|
||||
|
||||
// Clean up temporary directory
|
||||
_ = os.RemoveAll(tempDir)
|
||||
})
|
||||
|
||||
It("should read and use original configuration values without overrides", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but config parsing should work
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check that original configuration values are logged
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should show original configuration values from the file
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://default-server.com:8080"))
|
||||
Expect(logStr).To(ContainSubstring("false")) // mdns value
|
||||
Expect(logStr).To(ContainSubstring("/default/path/to/cert.pem"))
|
||||
// Should also show final configuration (which should be the same as original)
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
// Should NOT contain any override messages since no flags were provided
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding certificate"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should show configuration file values being overridden by CLI flags", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--challenger-server=https://overridden-server.com:9999",
|
||||
"--mdns=true",
|
||||
"--certificate=/overridden/cert.pem",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but config parsing and overrides should work
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check that both original and overridden values are logged
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should show original configuration values from the file
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://default-server.com:8080"))
|
||||
Expect(logStr).To(ContainSubstring("/default/path/to/cert.pem"))
|
||||
|
||||
// Should show override messages
|
||||
Expect(logStr).To(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).To(ContainSubstring("https://default-server.com:8080 -> https://overridden-server.com:9999"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).To(ContainSubstring("false -> true"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding certificate"))
|
||||
|
||||
// Should show final configuration with overridden values
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://overridden-server.com:9999"))
|
||||
Expect(logStr).To(ContainSubstring("/overridden/cert.pem"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should apply CLI flag overrides and log configuration changes", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--challenger-server=https://custom-server.com:8082",
|
||||
"--mdns=true",
|
||||
"--certificate=/path/to/cert.pem",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but flag parsing should work
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check if debug log exists and contains configuration information
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should contain debug information about configuration overrides
|
||||
Expect(logStr).To(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).To(ContainSubstring("https://custom-server.com:8082"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding certificate"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should show original vs final configuration in debug mode", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--challenger-server=https://override-server.com:9999",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail but debug information should be logged
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check for original and final configuration logging
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://override-server.com:9999"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should log partition details in debug mode", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/custom-partition",
|
||||
"--partition-uuid=custom-uuid-123",
|
||||
"--partition-label=custom-label-456",
|
||||
"--debug",
|
||||
"--attempts=2",
|
||||
})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check for partition details in debug log
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
Expect(logStr).To(ContainSubstring("Partition details"))
|
||||
Expect(logStr).To(ContainSubstring("/dev/custom-partition"))
|
||||
Expect(logStr).To(ContainSubstring("custom-uuid-123"))
|
||||
Expect(logStr).To(ContainSubstring("custom-label-456"))
|
||||
Expect(logStr).To(ContainSubstring("Attempts: 2"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should not log debug information without debug flag", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Debug log should not exist or should not contain detailed debug info
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should not contain debug-level details
|
||||
Expect(logStr).NotTo(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Partition details"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should handle missing configuration file gracefully and show defaults", func() {
|
||||
// Remove the config file to test default behavior
|
||||
_ = os.RemoveAll(configDir)
|
||||
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but not due to config parsing
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check that default/empty configuration values are logged
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should show original configuration (which should be empty/defaults)
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
// Should NOT contain override messages since no flags were provided
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding certificate"))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("CLI argument parsing", func() {
|
||||
It("should parse all arguments correctly", func() {
|
||||
// This will fail at the client creation/server connection,
|
||||
// but should successfully parse all arguments
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/custom",
|
||||
"--partition-uuid=custom-uuid-999",
|
||||
"--partition-label=custom-label",
|
||||
"--attempts=5",
|
||||
})
|
||||
|
||||
Expect(err).To(HaveOccurred()) // Fails due to no server
|
||||
// The important thing is that flag parsing worked and it reached the backend
|
||||
})
|
||||
|
||||
It("should handle boolean flags correctly", func() {
|
||||
// Test help flag
|
||||
err := ExecuteWithArgs([]string{"--help"})
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
@@ -1,40 +1,53 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-attestation/attest"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/jaypipes/ghw/pkg/block"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/payload"
|
||||
"github.com/kairos-io/kcrypt/pkg/bus"
|
||||
"github.com/kairos-io/kairos-sdk/kcrypt/bus"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
"github.com/kairos-io/tpm-helpers"
|
||||
"github.com/mudler/go-pluggable"
|
||||
"github.com/mudler/yip/pkg/utils"
|
||||
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
)
|
||||
|
||||
// Retry delays for different failure types
|
||||
const (
|
||||
TPMRetryDelay = 100 * time.Millisecond // Brief delay for TPM hardware busy/unavailable
|
||||
NetworkRetryDelay = 1 * time.Second // Longer delay for network/server issues
|
||||
)
|
||||
|
||||
var errPartNotFound error = fmt.Errorf("pass for partition not found")
|
||||
var errBadCertificate error = fmt.Errorf("unknown certificate")
|
||||
|
||||
func NewClient() (*Client, error) {
|
||||
return NewClientWithLogger(types.NewKairosLogger("kcrypt-challenger-client", "error", false))
|
||||
}
|
||||
|
||||
func NewClientWithLogger(logger types.KairosLogger) (*Client, error) {
|
||||
conf, err := unmarshalConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{Config: conf}, nil
|
||||
return &Client{Config: conf, Logger: logger}, nil
|
||||
}
|
||||
|
||||
// ❯ echo '{ "data": "{ \\"label\\": \\"LABEL\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
|
||||
func (c *Client) Start() error {
|
||||
func (c *Client) Start(eventType pluggable.EventType) error {
|
||||
factory := pluggable.NewPluginFactory()
|
||||
|
||||
// Input: bus.EventInstallPayload
|
||||
// Expected output: map[string]string{}
|
||||
factory.Add(bus.EventDiscoveryPassword, func(e *pluggable.Event) pluggable.EventResponse {
|
||||
|
||||
b := &block.Partition{}
|
||||
err := json.Unmarshal([]byte(e.Data), b)
|
||||
if err != nil {
|
||||
@@ -43,7 +56,8 @@ func (c *Client) Start() error {
|
||||
}
|
||||
}
|
||||
|
||||
pass, err := c.waitPass(b, 30)
|
||||
// Use the extracted core logic
|
||||
pass, err := c.GetPassphrase(b, 30)
|
||||
if err != nil {
|
||||
return pluggable.EventResponse{
|
||||
Error: fmt.Sprintf("failed getting pass: %s", err.Error()),
|
||||
@@ -55,65 +69,210 @@ func (c *Client) Start() error {
|
||||
}
|
||||
})
|
||||
|
||||
return factory.Run(pluggable.EventType(os.Args[1]), os.Stdin, os.Stdout)
|
||||
return factory.Run(eventType, os.Stdin, c.Logger)
|
||||
}
|
||||
|
||||
func (c *Client) generatePass(postEndpoint string, p *block.Partition) error {
|
||||
// ❯ echo '{ "data": "{ \\"label\\": \\"LABEL\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
|
||||
// GetPassphrase retrieves a passphrase for the given partition - core business logic
|
||||
func (c *Client) GetPassphrase(partition *block.Partition, attempts int) (string, error) {
|
||||
serverURL := c.Config.Kcrypt.Challenger.Server
|
||||
|
||||
rand := utils.RandomString(32)
|
||||
pass, err := tpm.EncryptBlob([]byte(rand))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bpass := base64.RawURLEncoding.EncodeToString(pass)
|
||||
|
||||
opts := []tpm.Option{
|
||||
tpm.WithAdditionalHeader("label", p.Label),
|
||||
tpm.WithAdditionalHeader("name", p.Name),
|
||||
tpm.WithAdditionalHeader("uuid", p.UUID),
|
||||
}
|
||||
conn, err := tpm.Connection(postEndpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return conn.WriteJSON(payload.Data{Passphrase: bpass, GeneratedBy: constants.TPMSecret})
|
||||
}
|
||||
|
||||
func (c *Client) waitPass(p *block.Partition, attempts int) (pass string, err error) {
|
||||
// IF we don't have any server configured, just do local
|
||||
if c.Config.Kcrypt.Challenger.Server == "" {
|
||||
// If we don't have any server configured, just do local
|
||||
if serverURL == "" {
|
||||
return localPass(c.Config)
|
||||
}
|
||||
|
||||
challengeEndpoint := fmt.Sprintf("%s/getPass", c.Config.Kcrypt.Challenger.Server)
|
||||
postEndpoint := fmt.Sprintf("%s/postPass", c.Config.Kcrypt.Challenger.Server)
|
||||
|
||||
for tries := 0; tries < attempts; tries++ {
|
||||
var generated bool
|
||||
pass, generated, err = getPass(challengeEndpoint, p)
|
||||
if err == errPartNotFound {
|
||||
// IF server doesn't have a pass for us, then we generate one and we set it
|
||||
err = c.generatePass(postEndpoint, p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Attempt to fetch again - validate that the server has it now
|
||||
tries = 0
|
||||
continue
|
||||
additionalHeaders := map[string]string{}
|
||||
var err error
|
||||
if c.Config.Kcrypt.Challenger.MDNS {
|
||||
serverURL, additionalHeaders, err = queryMDNS(serverURL, c.Logger)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if generated { // passphrase is encrypted
|
||||
return c.decryptPassphrase(pass)
|
||||
}
|
||||
|
||||
if err == nil || err == errPartNotFound { // passphrase not encrypted or not available
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second) // network errors? retry
|
||||
}
|
||||
|
||||
return
|
||||
c.Logger.Debugf("Starting TPM attestation flow with server: %s", serverURL)
|
||||
return c.waitPassWithTPMAttestation(serverURL, additionalHeaders, partition, attempts)
|
||||
}
|
||||
|
||||
// waitPassWithTPMAttestation implements the new TPM remote attestation flow over WebSocket
|
||||
func (c *Client) waitPassWithTPMAttestation(serverURL string, additionalHeaders map[string]string, p *block.Partition, attempts int) (string, error) {
|
||||
attestationEndpoint := fmt.Sprintf("%s/tpm-attestation", serverURL)
|
||||
c.Logger.Debugf("Debug: TPM attestation endpoint: %s", attestationEndpoint)
|
||||
|
||||
for tries := 0; tries < attempts; tries++ {
|
||||
c.Logger.Debugf("Debug: TPM attestation attempt %d/%d", tries+1, attempts)
|
||||
|
||||
// Step 1: Initialize AK Manager
|
||||
c.Logger.Debugf("Debug: Initializing AK Manager with handle file: %s", constants.AKBlobFile)
|
||||
akManager, err := tpm.NewAKManager(tpm.WithAKHandleFile(constants.AKBlobFile))
|
||||
if err != nil {
|
||||
c.Logger.Debugf("Failed to create AK manager: %v", err)
|
||||
time.Sleep(TPMRetryDelay)
|
||||
continue
|
||||
}
|
||||
c.Logger.Debugf("Debug: AK Manager initialized successfully")
|
||||
|
||||
// Step 2: Ensure AK exists
|
||||
c.Logger.Debugf("Debug: Getting or creating AK")
|
||||
_, err = akManager.GetOrCreateAK()
|
||||
if err != nil {
|
||||
c.Logger.Debugf("Failed to get/create AK: %v", err)
|
||||
time.Sleep(TPMRetryDelay)
|
||||
continue
|
||||
}
|
||||
c.Logger.Debugf("Debug: AK obtained/created successfully")
|
||||
|
||||
// Step 3: Start WebSocket-based attestation flow
|
||||
c.Logger.Debugf("Debug: Starting WebSocket-based attestation flow")
|
||||
passphrase, err := c.performTPMAttestation(attestationEndpoint, additionalHeaders, akManager, p)
|
||||
if err != nil {
|
||||
c.Logger.Debugf("Failed TPM attestation: %v", err)
|
||||
time.Sleep(NetworkRetryDelay)
|
||||
continue
|
||||
}
|
||||
|
||||
return passphrase, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("exhausted all attempts (%d) for TPM attestation", attempts)
|
||||
}
|
||||
|
||||
// performTPMAttestation handles the complete attestation flow over a single WebSocket connection
|
||||
func (c *Client) performTPMAttestation(endpoint string, additionalHeaders map[string]string, akManager *tpm.AKManager, p *block.Partition) (string, error) {
|
||||
c.Logger.Debugf("Debug: Creating WebSocket connection to endpoint: %s", endpoint)
|
||||
c.Logger.Debugf("Debug: Partition details - Label: %s, Name: %s, UUID: %s", p.FilesystemLabel, p.Name, p.UUID)
|
||||
c.Logger.Debugf("Debug: Certificate length: %d", len(c.Config.Kcrypt.Challenger.Certificate))
|
||||
|
||||
// Create WebSocket connection
|
||||
opts := []tpm.Option{
|
||||
tpm.WithAdditionalHeader("label", p.FilesystemLabel),
|
||||
tpm.WithAdditionalHeader("name", p.Name),
|
||||
tpm.WithAdditionalHeader("uuid", p.UUID),
|
||||
}
|
||||
|
||||
// Only add certificate options if a certificate is provided
|
||||
if len(c.Config.Kcrypt.Challenger.Certificate) > 0 {
|
||||
c.Logger.Debugf("Debug: Adding certificate validation options")
|
||||
opts = append(opts,
|
||||
tpm.WithCAs([]byte(c.Config.Kcrypt.Challenger.Certificate)),
|
||||
tpm.AppendCustomCAToSystemCA,
|
||||
)
|
||||
} else {
|
||||
c.Logger.Debugf("Debug: No certificate provided, using insecure connection")
|
||||
}
|
||||
for k, v := range additionalHeaders {
|
||||
opts = append(opts, tpm.WithAdditionalHeader(k, v))
|
||||
}
|
||||
c.Logger.Debugf("Debug: WebSocket options configured, attempting connection...")
|
||||
|
||||
// Add connection timeout to prevent hanging indefinitely
|
||||
type connectionResult struct {
|
||||
conn interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
done := make(chan connectionResult, 1)
|
||||
|
||||
go func() {
|
||||
c.Logger.Debugf("Debug: Using tpm.AttestationConnection for new TPM flow")
|
||||
conn, err := tpm.AttestationConnection(endpoint, opts...)
|
||||
c.Logger.Debugf("Debug: tpm.AttestationConnection returned with err: %v", err)
|
||||
done <- connectionResult{conn: conn, err: err}
|
||||
}()
|
||||
|
||||
var conn *websocket.Conn
|
||||
select {
|
||||
case result := <-done:
|
||||
if result.err != nil {
|
||||
c.Logger.Debugf("Debug: WebSocket connection failed: %v", result.err)
|
||||
return "", fmt.Errorf("creating WebSocket connection: %w", result.err)
|
||||
}
|
||||
var ok bool
|
||||
conn, ok = result.conn.(*websocket.Conn)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected connection type")
|
||||
}
|
||||
c.Logger.Debugf("Debug: WebSocket connection established successfully")
|
||||
case <-time.After(10 * time.Second):
|
||||
c.Logger.Debugf("Debug: WebSocket connection timed out after 10 seconds")
|
||||
return "", fmt.Errorf("WebSocket connection timed out")
|
||||
}
|
||||
|
||||
defer conn.Close() //nolint:errcheck
|
||||
|
||||
// Protocol Step 1: Send attestation data (EK + AK) to server so it can generate proper challenge
|
||||
c.Logger.Debugf("Debug: Getting attestation data for challenge generation")
|
||||
ek, akParams, err := akManager.GetAttestationData()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting attestation data: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Debug: Got EK and AK attestation data")
|
||||
|
||||
// Serialize EK to bytes using the existing encoding from tmp-helpers
|
||||
ekPEM, err := encodeEKToBytes(ek)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("encoding EK to bytes: %w", err)
|
||||
}
|
||||
|
||||
// Serialize AK parameters to JSON bytes
|
||||
akBytes, err := json.Marshal(akParams)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling AK parameters: %w", err)
|
||||
}
|
||||
|
||||
// Send attestation data to server as bytes
|
||||
attestationData := struct {
|
||||
EKBytes []byte `json:"ek_bytes"`
|
||||
AKBytes []byte `json:"ak_bytes"`
|
||||
}{
|
||||
EKBytes: ekPEM,
|
||||
AKBytes: akBytes,
|
||||
}
|
||||
|
||||
c.Logger.Debugf("Debug: Sending attestation data to server")
|
||||
if err := conn.WriteJSON(attestationData); err != nil {
|
||||
return "", fmt.Errorf("sending attestation data: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Debug: Attestation data sent successfully")
|
||||
|
||||
// Protocol Step 2: Wait for challenge response from server
|
||||
c.Logger.Debugf("Debug: Waiting for challenge from server")
|
||||
var challengeResp tpm.AttestationChallengeResponse
|
||||
if err := conn.ReadJSON(&challengeResp); err != nil {
|
||||
return "", fmt.Errorf("reading challenge from server: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Challenge received")
|
||||
|
||||
// Protocol Step 3: Create proof request using AK Manager
|
||||
c.Logger.Debugf("Debug: Creating proof request from challenge response")
|
||||
proofReq, err := akManager.CreateProofRequest(&challengeResp)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating proof request: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Debug: Proof request created successfully")
|
||||
|
||||
// Protocol Step 4: Send proof to server
|
||||
c.Logger.Debugf("Debug: Sending proof request to server")
|
||||
if err := conn.WriteJSON(proofReq); err != nil {
|
||||
return "", fmt.Errorf("sending proof request: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Proof request sent")
|
||||
|
||||
// Protocol Step 5: Receive passphrase from server
|
||||
c.Logger.Debugf("Debug: Waiting for passphrase response")
|
||||
var proofResp tpm.ProofResponse
|
||||
if err := conn.ReadJSON(&proofResp); err != nil {
|
||||
return "", fmt.Errorf("reading passphrase response: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Passphrase received - Length: %d bytes", len(proofResp.Passphrase))
|
||||
|
||||
// Check if we received an empty passphrase (indicates server error)
|
||||
if len(proofResp.Passphrase) == 0 {
|
||||
return "", fmt.Errorf("server returned empty passphrase, indicating an error occurred during attestation")
|
||||
}
|
||||
|
||||
return string(proofResp.Passphrase), nil
|
||||
}
|
||||
|
||||
// decryptPassphrase decodes (base64) and decrypts the passphrase returned
|
||||
@@ -136,3 +295,26 @@ func (c *Client) decryptPassphrase(pass string) (string, error) {
|
||||
|
||||
return string(passBytes), err
|
||||
}
|
||||
|
||||
// encodeEKToBytes encodes an EK to PEM bytes for transmission
|
||||
func encodeEKToBytes(ek *attest.EK) ([]byte, error) {
|
||||
if ek.Certificate != nil {
|
||||
pemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: ek.Certificate.Raw,
|
||||
}
|
||||
return pem.EncodeToMemory(pemBlock), nil
|
||||
}
|
||||
|
||||
// For EKs without certificates, marshal the public key
|
||||
pubBytes, err := x509.MarshalPKIXPublicKey(ek.Public)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling EK public key: %w", err)
|
||||
}
|
||||
|
||||
pemBlock := &pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: pubBytes,
|
||||
}
|
||||
return pem.EncodeToMemory(pemBlock), nil
|
||||
}
|
||||
|
@@ -1,23 +1,33 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/kairos-io/kairos/pkg/config"
|
||||
kconfig "github.com/kairos-io/kcrypt/pkg/config"
|
||||
"github.com/kairos-io/kairos-sdk/collector"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// There are the directories under which we expect to find kairos configuration.
|
||||
// When we are booted from an iso (during installation), configuration is expected
|
||||
// under `/oem`. When we are booting an installed system (in initramfs phase),
|
||||
// the path is `/sysroot/oem`.
|
||||
// When we run the challenger in hooks, we may have the config under /tmp/oem
|
||||
// During manual install (kairos-agent manual-install), kairos-agent stores config in /run/cos/oem
|
||||
var confScanDirs = []string{"/oem", "/sysroot/oem", "/tmp/oem", "/run/cos/oem"}
|
||||
|
||||
type Client struct {
|
||||
Config Config
|
||||
Logger types.KairosLogger
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Kcrypt struct {
|
||||
Challenger struct {
|
||||
Server string `yaml:"challenger_server,omitempty"`
|
||||
// Non-volatile index memory: where we store the encrypted passphrase (offline mode)
|
||||
NVIndex string `yaml:"nv_index,omitempty"`
|
||||
// Certificate index: this is where the rsa pair that decrypts the passphrase lives
|
||||
CIndex string `yaml:"c_index,omitempty"`
|
||||
TPMDevice string `yaml:"tpm_device,omitempty"`
|
||||
MDNS bool `yaml:"mdns,omitempty"`
|
||||
Server string `yaml:"challenger_server,omitempty"`
|
||||
NVIndex string `yaml:"nv_index,omitempty"` // Non-volatile index memory: where we store the encrypted passphrase (offline mode)
|
||||
CIndex string `yaml:"c_index,omitempty"` // Certificate index: this is where the rsa pair that decrypts the passphrase lives
|
||||
TPMDevice string `yaml:"tpm_device,omitempty"`
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,12 +35,21 @@ type Config struct {
|
||||
func unmarshalConfig() (Config, error) {
|
||||
var result Config
|
||||
|
||||
c, err := config.Scan(config.Directories(kconfig.ConfigScanDirs...), config.NoLogs)
|
||||
o := &collector.Options{NoLogs: true, MergeBootCMDLine: false}
|
||||
if err := o.Apply(collector.Directories(confScanDirs...)); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
c, err := collector.Scan(o, func(d []byte) ([]byte, error) {
|
||||
return d, nil
|
||||
})
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
if err = c.Unmarshal(&result); err != nil {
|
||||
a, _ := c.String()
|
||||
err = yaml.Unmarshal([]byte(a), &result)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
|
@@ -1,47 +1,12 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/payload"
|
||||
|
||||
"github.com/jaypipes/ghw/pkg/block"
|
||||
"github.com/kairos-io/tpm-helpers"
|
||||
"github.com/mudler/yip/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const DefaultNVIndex = "0x1500000"
|
||||
|
||||
func getPass(server string, partition *block.Partition) (string, bool, error) {
|
||||
msg, err := tpm.Get(server,
|
||||
tpm.WithAdditionalHeader("label", partition.Label),
|
||||
tpm.WithAdditionalHeader("name", partition.Name),
|
||||
tpm.WithAdditionalHeader("uuid", partition.UUID))
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
result := payload.Data{}
|
||||
err = json.Unmarshal(msg, &result)
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, string(msg))
|
||||
}
|
||||
|
||||
if result.HasPassphrase() {
|
||||
return fmt.Sprint(result.Passphrase), result.HasBeenGenerated() && result.GeneratedBy == constants.TPMSecret, nil
|
||||
} else if result.HasError() {
|
||||
if strings.Contains(result.Error, "No secret found for") {
|
||||
return "", false, errPartNotFound
|
||||
}
|
||||
return "", false, fmt.Errorf(result.Error)
|
||||
}
|
||||
|
||||
return "", false, errPartNotFound
|
||||
}
|
||||
|
||||
func genAndStore(k Config) (string, error) {
|
||||
opts := []tpm.TPMOption{}
|
||||
if k.Kcrypt.Challenger.TPMDevice != "" {
|
||||
|
47
cmd/discovery/client/flow_test.go
Normal file
47
cmd/discovery/client/flow_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Discovery Client Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("Flow Detection", func() {
|
||||
var client *Client
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a test client with basic config and logger
|
||||
client = &Client{}
|
||||
client.Config.Kcrypt.Challenger.Server = "http://test-server.local"
|
||||
client.Logger = types.NewKairosLogger("test-client", "debug", false)
|
||||
})
|
||||
|
||||
Context("TPM attestation capabilities", func() {
|
||||
It("should handle TPM operations", func() {
|
||||
// Test that client can be created without errors
|
||||
// TPM availability testing requires actual hardware
|
||||
Expect(client).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Logging functionality", func() {
|
||||
It("should have a valid logger", func() {
|
||||
// Test that client has a valid logger
|
||||
Expect(client.Logger).NotTo(BeNil())
|
||||
|
||||
// Test debug logging works without error
|
||||
client.Logger.Debugf("Test log entry for flow detection")
|
||||
|
||||
// If we get here without panic, logging is working
|
||||
Expect(true).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
})
|
86
cmd/discovery/client/mdns.go
Normal file
86
cmd/discovery/client/mdns.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/mdns"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
)
|
||||
|
||||
const (
|
||||
MDNSServiceType = "_kcrypt._tcp"
|
||||
MDNSTimeout = 15 * time.Second
|
||||
)
|
||||
|
||||
// queryMDNS will make an mdns query on local network to find a kcrypt challenger server
|
||||
// instance. If none is found, the original URL is returned and no additional headers.
|
||||
// If a response is received, the IP address and port from the response will be returned// and an additional "Host" header pointing to the original host.
|
||||
func queryMDNS(originalURL string, logger types.KairosLogger) (string, map[string]string, error) {
|
||||
additionalHeaders := map[string]string{}
|
||||
var err error
|
||||
|
||||
parsedURL, err := url.Parse(originalURL)
|
||||
if err != nil {
|
||||
return originalURL, additionalHeaders, fmt.Errorf("parsing the original host: %w", err)
|
||||
}
|
||||
|
||||
host := parsedURL.Host
|
||||
if !strings.HasSuffix(host, ".local") { // sanity check
|
||||
return "", additionalHeaders, fmt.Errorf("domain should end in \".local\" when using mdns")
|
||||
}
|
||||
|
||||
mdnsIP, mdnsPort := discoverMDNSServer(host, logger)
|
||||
if mdnsIP == "" { // no reply
|
||||
logger.Debugf("no reply from mdns")
|
||||
return originalURL, additionalHeaders, nil
|
||||
}
|
||||
|
||||
additionalHeaders["Host"] = parsedURL.Host
|
||||
newURL := strings.ReplaceAll(originalURL, host, mdnsIP)
|
||||
// Remove any port in the original url
|
||||
if port := parsedURL.Port(); port != "" {
|
||||
newURL = strings.ReplaceAll(newURL, port, "")
|
||||
}
|
||||
|
||||
// Add any possible port from the mdns response
|
||||
if mdnsPort != "" {
|
||||
newURL = strings.ReplaceAll(newURL, mdnsIP, fmt.Sprintf("%s:%s", mdnsIP, mdnsPort))
|
||||
}
|
||||
|
||||
return newURL, additionalHeaders, nil
|
||||
}
|
||||
|
||||
// discoverMDNSServer performs an mDNS query to discover any running kcrypt challenger
|
||||
// servers on the same network that matches the given hostname.
|
||||
// If a response if received, the IP address and the Port from the response are returned.
|
||||
func discoverMDNSServer(hostname string, logger types.KairosLogger) (string, string) {
|
||||
// Make a channel for results and start listening
|
||||
entriesCh := make(chan *mdns.ServiceEntry, 4)
|
||||
defer close(entriesCh)
|
||||
|
||||
logger.Debugf("Will now wait for some mdns server to respond")
|
||||
// Start the lookup. It will block until we read from the chan.
|
||||
mdns.Lookup(MDNSServiceType, entriesCh)
|
||||
|
||||
expectedHost := hostname + "." // FQDN
|
||||
// Wait until a matching server is found or we reach a timeout
|
||||
for {
|
||||
select {
|
||||
case entry := <-entriesCh:
|
||||
logger.Debugf("mdns response received")
|
||||
if entry.Host == expectedHost {
|
||||
logger.Debugf("%s matches %s", entry.Host, expectedHost)
|
||||
return entry.AddrV4.String(), strconv.Itoa(entry.Port) // TODO: v6?
|
||||
} else {
|
||||
logger.Debugf("%s didn't match %s", entry.Host, expectedHost)
|
||||
}
|
||||
case <-time.After(MDNSTimeout):
|
||||
logger.Debugf("timed out waiting for mdns")
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,30 +1,480 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/jaypipes/ghw/pkg/block"
|
||||
"github.com/kairos-io/kairos-challenger/cmd/discovery/client"
|
||||
"github.com/kairos-io/kcrypt/pkg/bus"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
"github.com/kairos-io/kairos-sdk/kcrypt/bus"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
"github.com/kairos-io/tpm-helpers"
|
||||
"github.com/mudler/go-pluggable"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) >= 2 && bus.IsEventDefined(os.Args[1]) {
|
||||
c, err := client.NewClient()
|
||||
checkErr(err)
|
||||
checkErr(c.Start())
|
||||
return
|
||||
}
|
||||
|
||||
pubhash, err := tpm.GetPubHash()
|
||||
checkErr(err)
|
||||
fmt.Print(pubhash)
|
||||
// GetFlags holds all flags specific to the get command
|
||||
type GetFlags struct {
|
||||
PartitionName string
|
||||
PartitionUUID string
|
||||
PartitionLabel string
|
||||
Attempts int
|
||||
ChallengerServer string
|
||||
EnableMDNS bool
|
||||
ServerCertificate string
|
||||
}
|
||||
|
||||
func checkErr(err error) {
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
var (
|
||||
// Global/persistent flags
|
||||
debug bool
|
||||
)
|
||||
|
||||
// rootCmd represents the base command (TPM hash generation)
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "kcrypt-discovery-challenger",
|
||||
Short: "kcrypt-challenger discovery client",
|
||||
Long: `kcrypt-challenger discovery client
|
||||
|
||||
This tool provides TPM-based operations for encrypted partition management.
|
||||
By default, it outputs the TPM hash for this device.
|
||||
|
||||
Configuration:
|
||||
The client reads configuration from Kairos configuration files in the following directories:
|
||||
- /oem (during installation from ISO)
|
||||
- /sysroot/oem (on installed systems during initramfs)
|
||||
- /tmp/oem (when running in hooks)
|
||||
|
||||
Configuration format (YAML):
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "https://my-server.com:8082" # Server URL
|
||||
mdns: true # Enable mDNS discovery
|
||||
certificate: "/path/to/server-cert.pem" # Server certificate
|
||||
nv_index: "0x1500000" # TPM NV index (offline mode)
|
||||
c_index: "0x1500001" # TPM certificate index
|
||||
tpm_device: "/dev/tpmrm0" # TPM device path`,
|
||||
Example: ` # Get TPM hash for this device (default)
|
||||
kcrypt-discovery-challenger
|
||||
|
||||
# Get passphrase for encrypted partition
|
||||
kcrypt-discovery-challenger get --partition-name=/dev/sda2
|
||||
|
||||
# Clean up TPM NV memory (useful for development)
|
||||
kcrypt-discovery-challenger cleanup
|
||||
|
||||
# Run plugin event
|
||||
kcrypt-discovery-challenger discovery.password`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runTPMHash()
|
||||
},
|
||||
}
|
||||
|
||||
// newCleanupCmd creates the cleanup command
|
||||
func newCleanupCmd() *cobra.Command {
|
||||
var nvIndex string
|
||||
var tpmDevice string
|
||||
var skipConfirmation bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "cleanup",
|
||||
Short: "Clean up TPM NV memory",
|
||||
Long: `Clean up TPM NV memory by undefining specific NV indices.
|
||||
|
||||
⚠️ DANGER: This command removes encryption passphrases from TPM memory!
|
||||
⚠️ If you delete the wrong index, your encrypted disk may become UNBOOTABLE!
|
||||
|
||||
This command helps clean up TPM NV memory used by the local pass flow,
|
||||
which stores encrypted passphrases in TPM non-volatile memory. Without
|
||||
cleanup, these passphrases persist indefinitely and take up space.
|
||||
|
||||
The command will prompt for confirmation before deletion unless you use
|
||||
the --i-know-what-i-am-doing flag to skip the safety prompt.
|
||||
|
||||
Default behavior:
|
||||
- Uses the same NV index as the local pass flow (from config or 0x1500000)
|
||||
- Uses the same TPM device as configured (or system default if none specified)
|
||||
- Prompts for confirmation with safety warnings`,
|
||||
Example: ` # Clean up default NV index (with confirmation prompt)
|
||||
kcrypt-discovery-challenger cleanup
|
||||
|
||||
# Clean up specific NV index
|
||||
kcrypt-discovery-challenger cleanup --nv-index=0x1500001
|
||||
|
||||
# Clean up with specific TPM device
|
||||
kcrypt-discovery-challenger cleanup --tpm-device=/dev/tpmrm0
|
||||
|
||||
# Skip confirmation prompt (DANGEROUS!)
|
||||
kcrypt-discovery-challenger cleanup --i-know-what-i-am-doing`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runCleanup(nvIndex, tpmDevice, skipConfirmation)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&nvIndex, "nv-index", "", fmt.Sprintf("NV index to clean up (defaults to configured index or %s)", client.DefaultNVIndex))
|
||||
cmd.Flags().StringVar(&tpmDevice, "tpm-device", "", "TPM device path (defaults to configured device or system default)")
|
||||
cmd.Flags().BoolVar(&skipConfirmation, "i-know-what-i-am-doing", false, "Skip confirmation prompt (DANGEROUS: may make encrypted disks unbootable)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// newGetCmd creates the get command with its flags
|
||||
func newGetCmd() *cobra.Command {
|
||||
flags := &GetFlags{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get passphrase for encrypted partition",
|
||||
Long: `Get passphrase for encrypted partition using TPM attestation.
|
||||
|
||||
This command retrieves passphrases for encrypted partitions by communicating
|
||||
with a challenger server using TPM-based attestation. At least one partition
|
||||
identifier (name, UUID, or label) must be provided.
|
||||
|
||||
The command uses configuration from the root command's config files, but flags
|
||||
can override specific settings:
|
||||
--challenger-server Override kcrypt.challenger.challenger_server
|
||||
--mdns Override kcrypt.challenger.mdns
|
||||
--certificate Override kcrypt.challenger.certificate`,
|
||||
Example: ` # Get passphrase using partition name
|
||||
kcrypt-discovery-challenger get --partition-name=/dev/sda2
|
||||
|
||||
# Get passphrase using UUID
|
||||
kcrypt-discovery-challenger get --partition-uuid=12345-abcde
|
||||
|
||||
# Get passphrase using filesystem label
|
||||
kcrypt-discovery-challenger get --partition-label=encrypted-data
|
||||
|
||||
# Get passphrase with multiple identifiers
|
||||
kcrypt-discovery-challenger get --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data
|
||||
|
||||
# Get passphrase with custom server
|
||||
kcrypt-discovery-challenger get --partition-label=encrypted-data --challenger-server=https://my-server.com:8082`,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Validate that at least one partition identifier is provided
|
||||
if flags.PartitionName == "" && flags.PartitionUUID == "" && flags.PartitionLabel == "" {
|
||||
return fmt.Errorf("at least one of --partition-name, --partition-uuid, or --partition-label must be provided")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runGetPassphrase(flags)
|
||||
},
|
||||
}
|
||||
|
||||
// Register flags
|
||||
cmd.Flags().StringVar(&flags.PartitionName, "partition-name", "", "Name of the partition (at least one identifier required)")
|
||||
cmd.Flags().StringVar(&flags.PartitionUUID, "partition-uuid", "", "UUID of the partition (at least one identifier required)")
|
||||
cmd.Flags().StringVar(&flags.PartitionLabel, "partition-label", "", "Filesystem label of the partition (at least one identifier required)")
|
||||
cmd.Flags().IntVar(&flags.Attempts, "attempts", 30, "Number of attempts to get the passphrase")
|
||||
cmd.Flags().StringVar(&flags.ChallengerServer, "challenger-server", "", "URL of the challenger server (overrides config)")
|
||||
cmd.Flags().BoolVar(&flags.EnableMDNS, "mdns", false, "Enable mDNS discovery (overrides config)")
|
||||
cmd.Flags().StringVar(&flags.ServerCertificate, "certificate", "", "Server certificate for verification (overrides config)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// pluginCmd represents the plugin event commands
|
||||
var pluginCmd = &cobra.Command{
|
||||
Use: string(bus.EventDiscoveryPassword),
|
||||
Short: fmt.Sprintf("Run %s plugin event", bus.EventDiscoveryPassword),
|
||||
Long: fmt.Sprintf(`Run the %s plugin event.
|
||||
|
||||
This command runs in plugin mode, reading JSON partition data from stdin
|
||||
and outputting the passphrase to stdout. This is used for integration
|
||||
with kcrypt and other tools.`, bus.EventDiscoveryPassword),
|
||||
Example: fmt.Sprintf(` # Plugin mode (for integration with kcrypt)
|
||||
echo '{"data": "{\"name\": \"/dev/sda2\", \"uuid\": \"12345-abcde\", \"label\": \"encrypted-data\"}"}' | kcrypt-discovery-challenger %s`, bus.EventDiscoveryPassword),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runPluginMode(bus.EventDiscoveryPassword)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Global/persistent flags (available to all commands)
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
|
||||
|
||||
// Add subcommands
|
||||
rootCmd.AddCommand(newGetCmd())
|
||||
rootCmd.AddCommand(newCleanupCmd())
|
||||
rootCmd.AddCommand(pluginCmd)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteWithArgs executes the root command with the given arguments.
|
||||
// This function is used by tests to simulate CLI execution.
|
||||
func ExecuteWithArgs(args []string) error {
|
||||
// Set command arguments (this overrides os.Args)
|
||||
rootCmd.SetArgs(args)
|
||||
|
||||
return rootCmd.Execute()
|
||||
}
|
||||
|
||||
// runTPMHash handles the root command - TPM hash generation
|
||||
func runTPMHash() error {
|
||||
// Create logger based on debug flag
|
||||
var logger types.KairosLogger
|
||||
if debug {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "debug", false)
|
||||
logger.Debugf("Debug mode enabled for TPM hash generation")
|
||||
} else {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "error", false)
|
||||
}
|
||||
|
||||
// Initialize AK Manager with the standard handle file
|
||||
logger.Debugf("Initializing AK Manager with handle file: %s", constants.AKBlobFile)
|
||||
akManager, err := tpm.NewAKManager(tpm.WithAKHandleFile(constants.AKBlobFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating AK manager: %w", err)
|
||||
}
|
||||
logger.Debugf("AK Manager initialized successfully")
|
||||
|
||||
// Ensure AK exists (create if necessary)
|
||||
logger.Debugf("Getting or creating AK")
|
||||
_, err = akManager.GetOrCreateAK()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting/creating AK: %w", err)
|
||||
}
|
||||
logger.Debugf("AK obtained/created successfully")
|
||||
|
||||
// Get attestation data (includes EK)
|
||||
logger.Debugf("Getting attestation data")
|
||||
ek, _, err := akManager.GetAttestationData()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting attestation data: %w", err)
|
||||
}
|
||||
logger.Debugf("Attestation data retrieved successfully")
|
||||
|
||||
// Compute TPM hash from EK
|
||||
logger.Debugf("Computing TPM hash from EK")
|
||||
tpmHash, err := tpm.DecodePubHash(ek)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing TPM hash: %w", err)
|
||||
}
|
||||
logger.Debugf("TPM hash computed successfully: %s", tpmHash)
|
||||
|
||||
// Output the TPM hash to stdout
|
||||
fmt.Print(tpmHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// runGetPassphrase handles the get subcommand - passphrase retrieval
|
||||
func runGetPassphrase(flags *GetFlags) error {
|
||||
// Create logger based on debug flag
|
||||
var logger types.KairosLogger
|
||||
if debug {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "debug", false)
|
||||
} else {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "error", false)
|
||||
}
|
||||
|
||||
// Create client with potential CLI overrides
|
||||
c, err := createClientWithOverrides(flags.ChallengerServer, flags.EnableMDNS, flags.ServerCertificate, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating client: %w", err)
|
||||
}
|
||||
|
||||
// Create partition object
|
||||
partition := &block.Partition{
|
||||
Name: flags.PartitionName,
|
||||
UUID: flags.PartitionUUID,
|
||||
FilesystemLabel: flags.PartitionLabel,
|
||||
}
|
||||
|
||||
// Log partition information
|
||||
logger.Debugf("Partition details:")
|
||||
logger.Debugf(" Name: %s", partition.Name)
|
||||
logger.Debugf(" UUID: %s", partition.UUID)
|
||||
logger.Debugf(" Label: %s", partition.FilesystemLabel)
|
||||
logger.Debugf(" Attempts: %d", flags.Attempts)
|
||||
|
||||
// Get the passphrase using the same backend logic as the plugin
|
||||
fmt.Fprintf(os.Stderr, "Requesting passphrase for partition %s (UUID: %s, Label: %s)...\n",
|
||||
flags.PartitionName, flags.PartitionUUID, flags.PartitionLabel)
|
||||
|
||||
passphrase, err := c.GetPassphrase(partition, flags.Attempts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting passphrase: %w", err)
|
||||
}
|
||||
|
||||
// Output the passphrase to stdout (this is what tools expect)
|
||||
fmt.Print(passphrase)
|
||||
fmt.Fprintf(os.Stderr, "\nPassphrase retrieved successfully\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// runPluginMode handles plugin event commands
|
||||
func runPluginMode(eventType pluggable.EventType) error {
|
||||
// In plugin mode, use quiet=true to log to file instead of console
|
||||
// Log level depends on debug flag, write logs to /var/log/kairos/kcrypt-discovery-challenger.log
|
||||
var logLevel string
|
||||
if debug {
|
||||
logLevel = "debug"
|
||||
} else {
|
||||
logLevel = "error"
|
||||
}
|
||||
|
||||
logger := types.NewKairosLoggerWithExtraDirs("kcrypt-discovery-challenger", logLevel, true, "/var/log/kairos")
|
||||
logger.Debugf("Debug mode enabled for plugin mode")
|
||||
c, err := client.NewClientWithLogger(logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating client: %w", err)
|
||||
}
|
||||
|
||||
err = c.Start(eventType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting plugin: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createClientWithOverrides creates a client and applies CLI flag overrides to the config
|
||||
func createClientWithOverrides(serverURL string, enableMDNS bool, certificate string, logger types.KairosLogger) (*client.Client, error) {
|
||||
// Start with the default config from files and pass the logger
|
||||
c, err := client.NewClientWithLogger(logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Log the original configuration values
|
||||
logger.Debugf("Original configuration:")
|
||||
logger.Debugf(" Server: %s", c.Config.Kcrypt.Challenger.Server)
|
||||
logger.Debugf(" MDNS: %t", c.Config.Kcrypt.Challenger.MDNS)
|
||||
logger.Debugf(" Certificate: %s", maskSensitiveString(c.Config.Kcrypt.Challenger.Certificate))
|
||||
|
||||
// Apply CLI overrides if provided
|
||||
if serverURL != "" {
|
||||
logger.Debugf("Overriding server URL: %s -> %s", c.Config.Kcrypt.Challenger.Server, serverURL)
|
||||
c.Config.Kcrypt.Challenger.Server = serverURL
|
||||
}
|
||||
|
||||
// For boolean flags, we can directly use the value since Cobra handles it properly
|
||||
if enableMDNS {
|
||||
logger.Debugf("Overriding MDNS setting: %t -> %t", c.Config.Kcrypt.Challenger.MDNS, enableMDNS)
|
||||
c.Config.Kcrypt.Challenger.MDNS = enableMDNS
|
||||
}
|
||||
|
||||
if certificate != "" {
|
||||
logger.Debugf("Overriding certificate: %s -> %s",
|
||||
maskSensitiveString(c.Config.Kcrypt.Challenger.Certificate),
|
||||
maskSensitiveString(certificate))
|
||||
c.Config.Kcrypt.Challenger.Certificate = certificate
|
||||
}
|
||||
|
||||
// Log the final configuration values
|
||||
logger.Debugf("Final configuration:")
|
||||
logger.Debugf(" Server: %s", c.Config.Kcrypt.Challenger.Server)
|
||||
logger.Debugf(" MDNS: %t", c.Config.Kcrypt.Challenger.MDNS)
|
||||
logger.Debugf(" Certificate: %s", maskSensitiveString(c.Config.Kcrypt.Challenger.Certificate))
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// runCleanup handles the cleanup subcommand - TPM NV memory cleanup
|
||||
func runCleanup(nvIndex, tpmDevice string, skipConfirmation bool) error {
|
||||
// Create logger based on debug flag
|
||||
var logger types.KairosLogger
|
||||
if debug {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "debug", false)
|
||||
logger.Debugf("Debug mode enabled for TPM NV cleanup")
|
||||
} else {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "error", false)
|
||||
}
|
||||
|
||||
// Load configuration to get defaults if flags not provided
|
||||
var config client.Config
|
||||
c, err := client.NewClientWithLogger(logger)
|
||||
if err != nil {
|
||||
logger.Debugf("Warning: Could not load configuration: %v", err)
|
||||
// Continue with defaults - not a fatal error
|
||||
} else {
|
||||
config = c.Config
|
||||
}
|
||||
|
||||
// Determine NV index to clean up (follow same pattern as localPass/genAndStore)
|
||||
targetIndex := nvIndex
|
||||
if targetIndex == "" {
|
||||
// First check config, then fall back to the same default used by the local pass flow
|
||||
if config.Kcrypt.Challenger.NVIndex != "" {
|
||||
targetIndex = config.Kcrypt.Challenger.NVIndex
|
||||
} else {
|
||||
targetIndex = client.DefaultNVIndex
|
||||
}
|
||||
}
|
||||
|
||||
// Determine TPM device
|
||||
targetDevice := tpmDevice
|
||||
if targetDevice == "" && config.Kcrypt.Challenger.TPMDevice != "" {
|
||||
targetDevice = config.Kcrypt.Challenger.TPMDevice
|
||||
}
|
||||
|
||||
logger.Debugf("Cleaning up TPM NV index: %s", targetIndex)
|
||||
if targetDevice != "" {
|
||||
logger.Debugf("Using TPM device: %s", targetDevice)
|
||||
}
|
||||
|
||||
// Check if the NV index exists first
|
||||
opts := []tpm.TPMOption{tpm.WithIndex(targetIndex)}
|
||||
if targetDevice != "" {
|
||||
opts = append(opts, tpm.WithDevice(targetDevice))
|
||||
}
|
||||
|
||||
// Try to read from the index to see if it exists
|
||||
logger.Debugf("Checking if NV index %s exists", targetIndex)
|
||||
_, err = tpm.ReadBlob(opts...)
|
||||
if err != nil {
|
||||
// If we can't read it, it might not exist or be empty
|
||||
logger.Debugf("NV index %s appears to be empty or non-existent: %v", targetIndex, err)
|
||||
fmt.Printf("NV index %s appears to be empty or does not exist\n", targetIndex)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Confirmation prompt with warning
|
||||
if !skipConfirmation {
|
||||
fmt.Printf("\n⚠️ WARNING: You are about to delete TPM NV index %s\n", targetIndex)
|
||||
fmt.Printf("⚠️ If this index contains your disk encryption passphrase, your encrypted disk will become UNBOOTABLE!\n")
|
||||
fmt.Printf("⚠️ This action CANNOT be undone.\n\n")
|
||||
fmt.Printf("Are you sure you want to continue? (type 'yes' to confirm): ")
|
||||
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
scanner.Scan()
|
||||
response := strings.TrimSpace(strings.ToLower(scanner.Text()))
|
||||
|
||||
if response != "yes" {
|
||||
fmt.Printf("Cleanup cancelled.\n")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Use native Go TPM library to undefine the NV space
|
||||
logger.Debugf("Using native TPM library to undefine NV index")
|
||||
fmt.Printf("Cleaning up TPM NV index %s...\n", targetIndex)
|
||||
|
||||
err = tpm.UndefineBlob(opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to undefine NV index %s: %w", targetIndex, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully cleaned up NV index %s\n", targetIndex)
|
||||
logger.Debugf("Successfully undefined NV index %s", targetIndex)
|
||||
return nil
|
||||
}
|
||||
|
||||
// maskSensitiveString masks certificate paths/content for logging
|
||||
func maskSensitiveString(s string) string {
|
||||
if s == "" {
|
||||
return "<empty>"
|
||||
}
|
||||
if len(s) <= 10 {
|
||||
return strings.Repeat("*", len(s))
|
||||
}
|
||||
// Show first 3 and last 3 characters with * in between
|
||||
return s[:3] + strings.Repeat("*", len(s)-6) + s[len(s)-3:]
|
||||
}
|
||||
|
@@ -37,6 +37,40 @@ spec:
|
||||
properties:
|
||||
TPMHash:
|
||||
type: string
|
||||
attestation:
|
||||
description: AttestationSpec defines TPM attestation data for TOFU
|
||||
enrollment and verification
|
||||
properties:
|
||||
akPublicKey:
|
||||
description: AKPublicKey stores the Attestation Key public key
|
||||
in PEM format
|
||||
type: string
|
||||
ekPublicKey:
|
||||
description: EKPublicKey stores the Endorsement Key public key
|
||||
in PEM format
|
||||
type: string
|
||||
enrolledAt:
|
||||
description: EnrolledAt timestamp when this TPM was first enrolled
|
||||
format: date-time
|
||||
type: string
|
||||
lastVerifiedAt:
|
||||
description: LastVerifiedAt timestamp of the last successful attestation
|
||||
format: date-time
|
||||
type: string
|
||||
pcrValues:
|
||||
description: PCRValues stores the expected PCR values for boot
|
||||
state verification
|
||||
properties:
|
||||
pcrs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: 'PCRs is a flexible map of PCR index (as string)
|
||||
to PCR value (hex-encoded) Example: {"0": "a1b2c3...", "7":
|
||||
"d4e5f6...", "11": "g7h8i9..."} This allows for any combination
|
||||
of PCRs without hardcoding specific indices'
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
partitions:
|
||||
items:
|
||||
description: 'PartitionSpec defines a Partition. A partition can
|
||||
|
@@ -25,11 +25,6 @@ bases:
|
||||
#- ../prometheus
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
- manager_auth_proxy_patch.yaml
|
||||
|
||||
# Mount the controller config file for loading manager configurations
|
||||
# through a ComponentConfig type
|
||||
#- manager_config_patch.yaml
|
||||
|
@@ -1,39 +0,0 @@
|
||||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- name: manager
|
||||
args:
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
@@ -25,10 +25,6 @@ bases:
|
||||
#- ../prometheus
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
- manager_auth_proxy_patch.yaml
|
||||
- pull.yaml
|
||||
# Mount the controller config file for loading manager configurations
|
||||
# through a ComponentConfig type
|
||||
|
@@ -1,39 +0,0 @@
|
||||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- name: manager
|
||||
args:
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
@@ -9,4 +9,6 @@ spec:
|
||||
containers:
|
||||
- name: manager
|
||||
imagePullPolicy: IfNotPresent
|
||||
- name: kube-rbac-proxy
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
|
@@ -34,10 +34,41 @@ spec:
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- command:
|
||||
- /manager
|
||||
args:
|
||||
- --leader-elect
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
||||
- "--namespace=$(POD_NAMESPACE)"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: controller:latest
|
||||
name: manager
|
||||
securityContext:
|
||||
@@ -82,4 +113,4 @@ spec:
|
||||
- name: wss
|
||||
port: 8082
|
||||
protocol: TCP
|
||||
targetPort: wss
|
||||
targetPort: wss
|
||||
|
@@ -20,14 +20,13 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
@@ -44,10 +43,7 @@ var testEnv *envtest.Environment
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"Controller Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
RunSpecs(t, "Control")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
@@ -73,8 +69,7 @@ var _ = BeforeSuite(func() {
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
}, 60)
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
|
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker run --privileged -v /var/run/docker.sock:/var/run/docker.sock --rm -t -v $(pwd):/workspace -v earthly-tmp:/tmp/earthly:rw earthly/earthly:v0.6.21 --allow-privileged $@
|
||||
docker run --privileged -v /var/run/docker.sock:/var/run/docker.sock --rm -t -v $(pwd):/workspace -v earthly-tmp:/tmp/earthly:rw earthly/earthly:v0.8.16 --allow-privileged $@
|
64
examples/cli-usage.sh
Executable file
64
examples/cli-usage.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Example script demonstrating the new CLI interface for kcrypt-challenger
|
||||
# This makes testing and debugging much easier than using the plugin interface
|
||||
|
||||
echo "=== kcrypt-challenger CLI Examples ==="
|
||||
echo
|
||||
|
||||
# Build the binary if it doesn't exist
|
||||
if [ ! -f "./kcrypt-discovery-challenger" ]; then
|
||||
echo "Building kcrypt-discovery-challenger..."
|
||||
go build -o kcrypt-discovery-challenger ./cmd/discovery/
|
||||
echo
|
||||
fi
|
||||
|
||||
echo "1. Show help:"
|
||||
./kcrypt-discovery-challenger --help
|
||||
echo
|
||||
|
||||
echo "2. Show version:"
|
||||
./kcrypt-discovery-challenger --version
|
||||
echo
|
||||
|
||||
echo "3. Test CLI mode with example parameters (will fail without server, but shows the flow):"
|
||||
echo " Command: ./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --attempts=1"
|
||||
echo " Expected: Error connecting to server, but flow detection should work"
|
||||
echo
|
||||
./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --attempts=1 2>&1 || true
|
||||
echo
|
||||
|
||||
echo "4. Test CLI mode with configuration overrides:"
|
||||
echo " Command: ./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --challenger-server=https://custom-server.com:8082 --mdns=true --attempts=1"
|
||||
echo " Expected: Same error but with custom server configuration"
|
||||
echo
|
||||
./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --challenger-server=https://custom-server.com:8082 --mdns=true --attempts=1 2>&1 || true
|
||||
echo
|
||||
|
||||
echo "4. Check the log file for flow detection:"
|
||||
if [ -f "/tmp/kcrypt-challenger-client.log" ]; then
|
||||
echo " Log contents:"
|
||||
cat /tmp/kcrypt-challenger-client.log
|
||||
echo
|
||||
else
|
||||
echo " No log file found"
|
||||
fi
|
||||
|
||||
echo "5. Test plugin mode (for comparison):"
|
||||
echo " Command: echo '{\"data\": \"{\\\"name\\\": \\\"/dev/sda2\\\", \\\"uuid\\\": \\\"12345-abcde\\\", \\\"filesystemLabel\\\": \\\"encrypted-data\\\"}\"}' | ./kcrypt-discovery-challenger discovery.password"
|
||||
echo " Expected: Same behavior as CLI mode"
|
||||
echo
|
||||
echo '{"data": "{\"name\": \"/dev/sda2\", \"uuid\": \"12345-abcde\", \"filesystemLabel\": \"encrypted-data\"}"}' | ./kcrypt-discovery-challenger discovery.password 2>&1 || true
|
||||
echo
|
||||
|
||||
echo "=== Summary ==="
|
||||
echo "✅ CLI interface successfully created"
|
||||
echo "✅ Full compatibility with plugin mode maintained"
|
||||
echo "✅ Same backend logic used for both interfaces"
|
||||
echo "✅ Flow detection works in both modes"
|
||||
echo ""
|
||||
echo "Benefits:"
|
||||
echo "- Much easier testing during development"
|
||||
echo "- Can be used for debugging in production"
|
||||
echo "- Clear command-line interface with help and examples"
|
||||
echo "- Maintains full compatibility with kcrypt integration"
|
249
go.mod
249
go.mod
@@ -1,134 +1,189 @@
|
||||
module github.com/kairos-io/kairos-challenger
|
||||
|
||||
go 1.18
|
||||
go 1.25
|
||||
|
||||
replace github.com/kairos-io/tpm-helpers => github.com/kairos-io/tpm-helpers v0.0.0-20250924104130-49f51e390ef3
|
||||
|
||||
//replace github.com/kairos-io/tpm-helpers => /home/dimitris/workspace/kairos/tpm-helpers
|
||||
|
||||
require (
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/jaypipes/ghw v0.9.0
|
||||
github.com/kairos-io/kairos v1.24.3-56.0.20230118103822-e3dbd41dddd1
|
||||
github.com/kairos-io/kcrypt v0.4.5-0.20230118125949-27183fbce7ea
|
||||
github.com/kairos-io/tpm-helpers v0.0.0-20230119140150-3fa97128ef6b
|
||||
github.com/mudler/go-pluggable v0.0.0-20220716112424-189d463e3ff3
|
||||
github.com/mudler/yip v0.11.4
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.7.0
|
||||
github.com/onsi/gomega v1.25.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
sigs.k8s.io/controller-runtime v0.12.2
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/google/go-attestation v0.5.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hashicorp/mdns v1.0.6
|
||||
github.com/jaypipes/ghw v0.19.1
|
||||
github.com/kairos-io/kairos-sdk v0.10.1
|
||||
github.com/kairos-io/tpm-helpers v0.0.0-20240123063624-f7a3fcc66199
|
||||
github.com/mudler/go-pluggable v0.0.0-20230126220627-7710299a0ae5
|
||||
github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82
|
||||
github.com/mudler/yip v1.18.0
|
||||
github.com/onsi/ginkgo/v2 v2.25.3
|
||||
github.com/onsi/gomega v1.38.2
|
||||
github.com/spectrocloud/peg v0.0.0-20240405075800-c5da7125e30f
|
||||
github.com/spf13/cobra v1.10.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.27.2
|
||||
k8s.io/apimachinery v0.27.4
|
||||
k8s.io/client-go v0.27.2
|
||||
sigs.k8s.io/controller-runtime v0.15.0
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.1.1 // indirect
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
cloud.google.com/go v0.93.3 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.1.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/avast/retry-go v3.0.0+incompatible // indirect
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/bramvdbogaerde/go-scp v1.2.1 // indirect
|
||||
github.com/cavaliergopher/grab/v3 v3.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/codingsince1985/checksum v1.2.6 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.5 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/containerd v1.7.27 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/denisbrodbeck/machineid v1.0.1 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/folbricht/tpmk v0.1.2-0.20230104073416-f20b20c289d7 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/zapr v1.2.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v28.2.2+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v28.3.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.4 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/certificate-transparency-go v1.1.4 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-attestation v0.4.4-0.20220404204839-8820d49b18d9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-tpm v0.3.3 // indirect
|
||||
github.com/google/go-tpm-tools v0.3.10 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-configfs-tsm v0.3.3 // indirect
|
||||
github.com/google/go-containerregistry v0.20.6 // indirect
|
||||
github.com/google/go-tpm v0.9.1 // indirect
|
||||
github.com/google/go-tpm-tools v0.4.4 // indirect
|
||||
github.com/google/go-tspi v0.3.0 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gookit/color v1.5.2 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/itchyny/gojq v0.12.11 // indirect
|
||||
github.com/itchyny/timefmt-go v0.1.5 // indirect
|
||||
github.com/joho/godotenv v1.4.0 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||
github.com/itchyny/gojq v0.12.17 // indirect
|
||||
github.com/itchyny/timefmt-go v0.1.6 // indirect
|
||||
github.com/jaypipes/pcidb v1.1.1 // indirect
|
||||
github.com/joho/godotenv v1.5.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.5 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/miekg/dns v1.1.55 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/pterm/pterm v0.12.53 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_golang v1.20.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/pterm/pterm v0.12.80 // indirect
|
||||
github.com/qeesung/image2ascii v1.0.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/twpayne/go-vfs v1.7.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rs/zerolog v1.33.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.24.7 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twpayne/go-vfs/v4 v4.3.0 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/wayneashleyberry/terminal-dimensions v1.1.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/net v0.5.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
golang.org/x/term v0.4.0 // indirect
|
||||
golang.org/x/text v0.6.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/mod v0.28.0 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.37.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250212204824-5a70512c5d8b // indirect
|
||||
google.golang.org/grpc v1.70.0 // indirect
|
||||
google.golang.org/protobuf v1.36.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v1.0.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
||||
k8s.io/component-base v0.24.2 // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
howett.net/plist v1.0.2-0.20250314012144-ee69052608d9 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.27.2 // indirect
|
||||
k8s.io/component-base v0.27.2 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
5
main.go
5
main.go
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
@@ -120,7 +121,9 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
go challenger.Start(context.Background(), clientset, reconciler, namespace, challengerAddr)
|
||||
serverLog := ctrl.Log.WithName("server")
|
||||
|
||||
go challenger.Start(context.Background(), serverLog, clientset, reconciler, namespace, challengerAddr)
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
|
103
mdns-notes.md
Normal file
103
mdns-notes.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Prerequisites
|
||||
|
||||
Nodes and KMS should be on the same local network (mdns requirement)
|
||||
|
||||
# Steps
|
||||
|
||||
- Create a cluster with a port bound to the host:
|
||||
|
||||
```
|
||||
k3d cluster create kcrypt -p '30000:30000@server:0'
|
||||
```
|
||||
|
||||
(we are going to assign this port to the kcrypt challenger server and advertise it over mdns)
|
||||
|
||||
- Follow [the instructions to setup the kcrypt challenger server](https://github.com/kairos-io/kcrypt-challenger#installation):
|
||||
|
||||
```
|
||||
helm repo add kairos https://kairos-io.github.io/helm-charts
|
||||
helm install kairos-crd kairos/kairos-crds
|
||||
```
|
||||
|
||||
Create the following 'kcrypt-challenger-values.yaml` file:
|
||||
|
||||
|
||||
```yaml
|
||||
service:
|
||||
challenger:
|
||||
type: "NodePort"
|
||||
port: 8082
|
||||
nodePort: 30000
|
||||
```
|
||||
|
||||
and deploy the challenger server with it:
|
||||
|
||||
```bash
|
||||
helm install -f kcrypt-challenger-values.yaml kairos-challenger kairos/kairos-challenger
|
||||
```
|
||||
|
||||
- Add the sealedvolume and secret for the tpm chip:
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: example-host-tpm-secret
|
||||
namespace: default
|
||||
type: Opaque
|
||||
stringData:
|
||||
pass: "awesome-passphrase"
|
||||
---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: example-host
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "5640e37f4016da16b841a93880dcc44886904392fa3c86681087b77db5afedbe"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
secret:
|
||||
name: example-host-tpm-secret
|
||||
path: pass
|
||||
quarantined: false
|
||||
```
|
||||
|
||||
- Start the [simple-mdns-server](https://github.com/kairos-io/simple-mdns-server)
|
||||
|
||||
```
|
||||
go run . --port 30000 --interfaceName enp121s0 --serviceType _kcrypt._tcp --hostName mychallenger.local
|
||||
```
|
||||
|
||||
|
||||
- Start a node in manual install mode
|
||||
|
||||
- Replace `/system/discovery/kcrypt-discovery-challenger` with a custom build (until we merge)
|
||||
|
||||
- Create the following config:
|
||||
|
||||
```
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
|
||||
# Kcrypt configuration block
|
||||
kcrypt:
|
||||
challenger:
|
||||
mdns: true
|
||||
challenger_server: "http://mychallenger.local"
|
||||
```
|
||||
|
||||
- Install:
|
||||
|
||||
```
|
||||
kairos-agent manual-install --device auto config.yaml
|
||||
```
|
File diff suppressed because it is too large
Load Diff
@@ -5,6 +5,11 @@
|
||||
package challenger
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-attestation/attest"
|
||||
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -38,7 +43,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns the sealed volume data", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).ToNot(BeNil())
|
||||
Expect(volumeData.Quarantined).To(BeFalse())
|
||||
Expect(volumeData.SecretName).To(Equal("the_secret"))
|
||||
@@ -67,7 +72,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("doesn't match a request with an empty field", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).To(BeNil())
|
||||
})
|
||||
})
|
||||
@@ -86,7 +91,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns the sealed volume data", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).ToNot(BeNil())
|
||||
Expect(volumeData.Quarantined).To(BeFalse())
|
||||
Expect(volumeData.SecretName).To(Equal("the_secret"))
|
||||
@@ -108,7 +113,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns the sealed volume data", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).ToNot(BeNil())
|
||||
Expect(volumeData.Quarantined).To(BeFalse())
|
||||
Expect(volumeData.SecretName).To(Equal("the_secret"))
|
||||
@@ -130,11 +135,473 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns nil sealedVolumeData", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).To(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Selective Enrollment Mode", func() {
|
||||
var logger logr.Logger
|
||||
|
||||
BeforeEach(func() {
|
||||
logger = logr.Discard()
|
||||
})
|
||||
|
||||
Describe("verifyAKMatch with selective enrollment", func() {
|
||||
var currentAK *attest.AttestationParameters
|
||||
var expectedAKPEM string
|
||||
const mockAKPublicKey = "mock-ak-public-key"
|
||||
|
||||
BeforeEach(func() {
|
||||
// Mock current AK parameters - in real implementation this would come from TPM
|
||||
currentAK = &attest.AttestationParameters{
|
||||
Public: []byte(mockAKPublicKey),
|
||||
UseTCSDActivationFormat: false,
|
||||
CreateData: []byte("mock-create-data"),
|
||||
CreateAttestation: []byte("mock-create-attestation"),
|
||||
CreateSignature: []byte("mock-create-signature"),
|
||||
}
|
||||
|
||||
// Generate the expected PEM encoding from the plain text constant
|
||||
var err error
|
||||
expectedAKPEM, err = encodeAKToPEM(currentAK)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
When("stored AK is empty (re-enrollment mode)", func() {
|
||||
It("should store the current AK value during re-enrollment", func() {
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: "", // Empty = re-enrollment mode
|
||||
}
|
||||
|
||||
// Before re-enrollment: AK should be empty
|
||||
Expect(attestation.AKPublicKey).To(Equal(""))
|
||||
|
||||
// Re-enrollment should store the current AK
|
||||
err := updateAttestationDataSelective(attestation, currentAK, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// After re-enrollment: AK should contain the exact expected PEM value
|
||||
Expect(attestation.AKPublicKey).To(Equal(expectedAKPEM))
|
||||
})
|
||||
|
||||
It("should accept any AK, store it during re-enrollment, then enforce exact match", func() {
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: "", // Start in re-enrollment mode
|
||||
}
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: attestation,
|
||||
},
|
||||
}
|
||||
|
||||
// Step 1: Verification should pass with any AK (re-enrollment mode)
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 2: Re-enroll - store the AK
|
||||
err = updateAttestationDataSelective(attestation, currentAK, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 3: Now we should be in enforcement mode - same AK should pass
|
||||
err = verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 4: Different AK should now fail (enforcement mode)
|
||||
differentAK := &attest.AttestationParameters{
|
||||
Public: []byte("different-ak-key"),
|
||||
}
|
||||
err = verifyAKMatchSelective(sealedVolume, differentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("AK public key does not match"))
|
||||
})
|
||||
})
|
||||
|
||||
When("stored AK is set (enforcement mode)", func() {
|
||||
It("should enforce exact match", func() {
|
||||
// Create a specific AK PEM that won't match our mock
|
||||
storedAKPEM := "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtest\n-----END PUBLIC KEY-----"
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: storedAKPEM,
|
||||
}
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: attestation,
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("AK public key does not match"))
|
||||
})
|
||||
})
|
||||
|
||||
When("no attestation data exists", func() {
|
||||
It("should return error", func() {
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: nil,
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("no attestation data"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("verifyPCRValuesSelective", func() {
|
||||
var currentPCRs *keyserverv1alpha1.PCRValues
|
||||
const expectedPCR0 = "abc123def456"
|
||||
const expectedPCR7 = "ghi789jkl012"
|
||||
const expectedPCR11 = "mno345pqr678"
|
||||
|
||||
BeforeEach(func() {
|
||||
currentPCRs = &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": expectedPCR0,
|
||||
"7": expectedPCR7,
|
||||
"11": expectedPCR11,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
When("stored PCR values are empty (re-enrollment mode)", func() {
|
||||
It("should accept any PCR values during verification", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = re-enrollment mode
|
||||
"7": "", // Empty = re-enrollment mode
|
||||
"11": "", // Empty = re-enrollment mode
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
It("should store the current PCR values during re-enrollment", func() {
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
PCRValues: &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = re-enrollment mode
|
||||
"7": "", // Empty = re-enrollment mode
|
||||
"11": "", // Empty = re-enrollment mode
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Before re-enrollment: PCRs should be empty
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal(""))
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal(""))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal(""))
|
||||
|
||||
// Re-enrollment should store the current PCR values
|
||||
err := updateAttestationDataSelective(attestation, nil, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// After re-enrollment: PCRs should be stored with exact expected values
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal(expectedPCR0))
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal(expectedPCR7))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal(expectedPCR11))
|
||||
})
|
||||
|
||||
It("should transition from re-enrollment mode to enforcement mode", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Start in re-enrollment mode
|
||||
},
|
||||
}
|
||||
|
||||
// Create a limited current PCR set (only PCR0) to test selective enrollment
|
||||
limitedCurrentPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": expectedPCR0, // Only provide PCR0
|
||||
},
|
||||
}
|
||||
|
||||
// Step 1: Should accept any PCR values (re-enrollment mode)
|
||||
err := verifyPCRValuesSelective(storedPCRs, limitedCurrentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 2: Re-enroll - store the PCR value (should only update the empty PCR0)
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
PCRValues: storedPCRs,
|
||||
}
|
||||
err = updateAttestationDataSelective(attestation, nil, limitedCurrentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Verify PCR0 was enrolled and no other PCRs were added
|
||||
Expect(storedPCRs.PCRs["0"]).To(Equal(expectedPCR0))
|
||||
Expect(storedPCRs.PCRs).To(HaveLen(1)) // Should still only have PCR0
|
||||
|
||||
// Step 3: Now should be in enforcement mode - same PCR should pass
|
||||
err = verifyPCRValuesSelective(storedPCRs, limitedCurrentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 4: Different PCR should now fail (enforcement mode)
|
||||
differentPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "different_value",
|
||||
},
|
||||
}
|
||||
err = verifyPCRValuesSelective(storedPCRs, differentPCRs, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("PCR0 changed"))
|
||||
})
|
||||
})
|
||||
|
||||
When("stored PCR values are set (enforcement mode)", func() {
|
||||
It("should enforce exact match for set values", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456", // Matches current
|
||||
"7": "different_value", // Different from current
|
||||
"11": "mno345pqr678", // Matches current
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("PCR7 changed"))
|
||||
})
|
||||
|
||||
It("should pass when all set values match", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456", // Matches current
|
||||
"7": "ghi789jkl012", // Matches current
|
||||
"11": "mno345pqr678", // Matches current
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
When("PCR fields are omitted (skip verification)", func() {
|
||||
It("should skip verification for omitted PCRs entirely", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456", // Present and matches
|
||||
"7": "ghi789jkl012", // Present and matches
|
||||
// "11" is omitted entirely = skip verification
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
When("mixed selective and enforcement mode", func() {
|
||||
It("should handle combination of empty, set, and omitted PCRs", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = re-enrollment mode
|
||||
"7": "ghi789jkl012", // Set = enforcement mode (matches)
|
||||
"14": "any_value", // Set but PCR14 not in current (should fail)
|
||||
// "11" omitted = skip verification
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("PCR14"))
|
||||
})
|
||||
})
|
||||
|
||||
When("no stored PCR values exist", func() {
|
||||
It("should accept any current PCR values", func() {
|
||||
err := verifyPCRValuesSelective(nil, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
When("no current PCR values provided", func() {
|
||||
It("should pass if no stored values either", func() {
|
||||
err := verifyPCRValuesSelective(nil, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
It("should fail if stored values expect specific PCRs", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456",
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, nil, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("no current PCR values"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("updateAttestationData for selective enrollment", func() {
|
||||
It("should update empty fields with current values", func() {
|
||||
currentAK := &attest.AttestationParameters{
|
||||
Public: []byte("new-ak-public-key"),
|
||||
}
|
||||
currentPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "new_pcr0_value",
|
||||
"7": "new_pcr7_value",
|
||||
"11": "new_pcr11_value",
|
||||
},
|
||||
}
|
||||
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: "", // Empty = should be updated
|
||||
PCRValues: &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = should be updated
|
||||
"7": "fixed_pcr7_value", // Set = should NOT be updated
|
||||
"11": "", // Empty = should be updated
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := updateAttestationDataSelective(attestation, currentAK, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// AK should be updated
|
||||
Expect(attestation.AKPublicKey).ToNot(BeEmpty())
|
||||
|
||||
// PCR0 should be updated (was empty)
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal("new_pcr0_value"))
|
||||
|
||||
// PCR7 should NOT be updated (was set)
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal("fixed_pcr7_value"))
|
||||
|
||||
// PCR11 should be updated (was empty)
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal("new_pcr11_value"))
|
||||
})
|
||||
|
||||
It("should demonstrate AK re-enrollment workflow", func() {
|
||||
// Step 1: Start with empty AK (re-enrollment mode)
|
||||
originalAK := ""
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: originalAK, // Empty = re-enrollment mode
|
||||
}
|
||||
|
||||
// Step 2: Current AK from client
|
||||
currentAK := &attest.AttestationParameters{
|
||||
Public: []byte("client-provided-ak-key"),
|
||||
}
|
||||
|
||||
// Step 3: Verification should pass (empty stored AK accepts any)
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: attestation,
|
||||
},
|
||||
}
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 4: Update should store the new AK (this is the re-enrollment)
|
||||
err = updateAttestationDataSelective(attestation, currentAK, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 5: Verify the AK was actually enrolled (stored)
|
||||
Expect(attestation.AKPublicKey).ToNot(BeEmpty())
|
||||
Expect(attestation.AKPublicKey).ToNot(Equal(originalAK))
|
||||
|
||||
// Step 6: Future verification should now require exact match
|
||||
err = verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil()) // Should still pass with same AK
|
||||
|
||||
// Step 7: Different AK should now fail (enforcement mode)
|
||||
differentAK := &attest.AttestationParameters{
|
||||
Public: []byte("different-ak-key"),
|
||||
}
|
||||
err = verifyAKMatchSelective(sealedVolume, differentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("AK public key does not match"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Initial TOFU Enrollment behavior", func() {
|
||||
It("should store ALL provided PCRs during initial enrollment", func() {
|
||||
clientPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "pcr0_value",
|
||||
"1": "pcr1_value",
|
||||
"2": "pcr2_value",
|
||||
"7": "pcr7_value",
|
||||
"11": "pcr11_value",
|
||||
"14": "pcr14_value",
|
||||
},
|
||||
}
|
||||
|
||||
attestation := createInitialTOFUAttestation(nil, clientPCRs, logger)
|
||||
|
||||
// All provided PCRs should be stored
|
||||
Expect(attestation.PCRValues).ToNot(BeNil())
|
||||
Expect(attestation.PCRValues.PCRs).To(HaveLen(6))
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal("pcr0_value"))
|
||||
Expect(attestation.PCRValues.PCRs["1"]).To(Equal("pcr1_value"))
|
||||
Expect(attestation.PCRValues.PCRs["2"]).To(Equal("pcr2_value"))
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal("pcr7_value"))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal("pcr11_value"))
|
||||
Expect(attestation.PCRValues.PCRs["14"]).To(Equal("pcr14_value"))
|
||||
})
|
||||
|
||||
It("should not filter or omit any PCRs during TOFU", func() {
|
||||
// Test that even "sensitive" PCRs like PCR11 are stored
|
||||
clientPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"11": "kernel_pcr_value", // Previously filtered out
|
||||
"12": "other_pcr_value",
|
||||
},
|
||||
}
|
||||
|
||||
attestation := createInitialTOFUAttestation(nil, clientPCRs, logger)
|
||||
|
||||
Expect(attestation.PCRValues.PCRs).To(HaveKey("11"))
|
||||
Expect(attestation.PCRValues.PCRs).To(HaveKey("12"))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal("kernel_pcr_value"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("handleTPMAttestation functions", func() {
|
||||
Describe("establishAttestationConnection", func() {
|
||||
var mockResponseWriter *httptest.ResponseRecorder
|
||||
var mockRequest *http.Request
|
||||
var logger logr.Logger
|
||||
|
||||
BeforeEach(func() {
|
||||
logger = logr.Discard()
|
||||
mockResponseWriter = httptest.NewRecorder()
|
||||
mockRequest = httptest.NewRequest("GET", "/test", nil)
|
||||
|
||||
// Set partition headers
|
||||
mockRequest.Header.Set("label", "COS_PERSISTENT")
|
||||
mockRequest.Header.Set("name", "/dev/sda1")
|
||||
mockRequest.Header.Set("uuid", "test-uuid-123")
|
||||
})
|
||||
|
||||
It("should return error when WebSocket upgrade fails", func() {
|
||||
// This test checks the error behavior when WebSocket upgrade fails
|
||||
conn, partition, err := establishAttestationConnection(mockResponseWriter, mockRequest, logger)
|
||||
|
||||
// WebSocket upgrade should fail with regular HTTP request
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("upgrade"))
|
||||
Expect(conn).To(BeNil())
|
||||
|
||||
// When upgrade fails, partition info is not extracted (function returns early)
|
||||
Expect(partition.Label).To(Equal(""))
|
||||
Expect(partition.DeviceName).To(Equal(""))
|
||||
Expect(partition.UUID).To(Equal(""))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
func volumeListWithPartitionSpec(partitionSpec keyserverv1alpha1.PartitionSpec) *keyserverv1alpha1.SealedVolumeList {
|
||||
@@ -151,3 +618,25 @@ func volumeListWithPartitionSpec(partitionSpec keyserverv1alpha1.PartitionSpec)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func volumeListWithAttestationSpec(tpmHash string, attestation *keyserverv1alpha1.AttestationSpec) *keyserverv1alpha1.SealedVolumeList {
|
||||
return &keyserverv1alpha1.SealedVolumeList{
|
||||
Items: []keyserverv1alpha1.SealedVolume{
|
||||
{Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
TPMHash: tpmHash,
|
||||
Partitions: []keyserverv1alpha1.PartitionSpec{
|
||||
{
|
||||
Label: "COS_PERSISTENT",
|
||||
Secret: &keyserverv1alpha1.SecretSpec{
|
||||
Name: "test-secret",
|
||||
Path: "pass",
|
||||
},
|
||||
},
|
||||
},
|
||||
Quarantined: false,
|
||||
Attestation: attestation,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@@ -2,3 +2,4 @@ package constants
|
||||
|
||||
const TPMSecret = "tpm"
|
||||
const GeneratedByKey = "generated_by"
|
||||
const AKBlobFile = "/etc/kairos/ak.blob"
|
||||
|
44
renovate.json
Normal file
44
renovate.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:base"
|
||||
],
|
||||
"schedule": [
|
||||
"after 11pm every weekday",
|
||||
"before 7am every weekday",
|
||||
"every weekend"
|
||||
],
|
||||
"timezone": "Europe/Brussels",
|
||||
"rebaseWhen": "behind-base-branch",
|
||||
"reviewers": [ "team:maintainers" ],
|
||||
"packageRules": [
|
||||
{
|
||||
"matchUpdateTypes": [
|
||||
"patch"
|
||||
],
|
||||
"automerge": true
|
||||
}
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
"fileMatch": [
|
||||
"^Earthfile$"
|
||||
],
|
||||
"matchStrings": [
|
||||
"#\\s*renovate:\\s*datasource=(?<datasource>.*?) depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?\\sARG\\s+.+_VERSION=(?<currentValue>.*?)\\s"
|
||||
],
|
||||
"versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}"
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"^earthly\\.(sh|ps1)$"
|
||||
],
|
||||
"datasourceTemplate": "docker",
|
||||
"depNameTemplate": "earthly/earthly",
|
||||
"matchStrings": [
|
||||
"earthly\\/earthly:(?<currentValue>.*?)\\s"
|
||||
],
|
||||
"versioningTemplate": "semver-coerced"
|
||||
}
|
||||
]
|
||||
}
|
60
scripts/e2e-tests.sh
Executable file
60
scripts/e2e-tests.sh
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# This scripts prepares a cluster where we install the kcrypt CRDs.
|
||||
# This is where sealed volumes are created.
|
||||
|
||||
GINKGO_NODES="${GINKGO_NODES:-1}"
|
||||
K3S_IMAGE="rancher/k3s:v1.26.1-k3s1"
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
CLUSTER_NAME=$(echo $RANDOM | md5sum | head -c 10; echo;)
|
||||
export KUBECONFIG=$(mktemp)
|
||||
|
||||
# https://unix.stackexchange.com/a/423052
|
||||
getFreePort() {
|
||||
echo $(comm -23 <(seq "30000" "30200" | sort) <(ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n "1")
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
echo "Cleaning up $CLUSTER_NAME"
|
||||
k3d cluster delete "$CLUSTER_NAME" || true
|
||||
rm -rf "$KUBECONFIG"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create a cluster and bind ports 80 and 443 on the host
|
||||
# This will allow us to access challenger server on 10.0.2.2 which is the IP
|
||||
# on which qemu "sees" the host.
|
||||
# We change the CIDR because k3s creates iptables rules that block DNS traffic to this CIDR
|
||||
# (something like that). If you run k3d inside a k3s cluster (inside a Pod), DNS won't work
|
||||
# inside the k3d server container unless you use a different CIDR.
|
||||
# Here we are avoiding CIDR "10.43.x.x"
|
||||
k3d cluster create "$CLUSTER_NAME" --k3s-arg "--cluster-cidr=10.49.0.1/16@server:0" --k3s-arg "--service-cidr=10.48.0.1/16@server:0" -p '80:80@server:0' -p '443:443@server:0' --image "$K3S_IMAGE"
|
||||
k3d kubeconfig get "$CLUSTER_NAME" > "$KUBECONFIG"
|
||||
|
||||
# Import the controller image that we built at the start into to the cluster
|
||||
# this image has to exists and be available in the local docker
|
||||
k3d image import -c "$CLUSTER_NAME" controller:latest
|
||||
|
||||
# Install cert manager
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
|
||||
kubectl wait --for=condition=Available deployment --timeout=2m -n cert-manager --all
|
||||
|
||||
# Replace the CLUSTER_IP in the kustomize resource
|
||||
# Only needed for debugging so that we can access the server from the host
|
||||
# (the 10.0.2.2 IP address is only useful from within qemu)
|
||||
export CLUSTER_IP=$(docker inspect "k3d-${CLUSTER_NAME}-server-0" | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
|
||||
envsubst \
|
||||
< "$SCRIPT_DIR/../tests/assets/challenger-server-ingress.template.yaml" \
|
||||
> "$SCRIPT_DIR/../tests/assets/challenger-server-ingress.yaml"
|
||||
|
||||
# Install the challenger server kustomization
|
||||
kubectl apply -k "$SCRIPT_DIR/../tests/assets/"
|
||||
|
||||
# 10.0.2.2 is where the vm sees the host
|
||||
# https://stackoverflow.com/a/6752280
|
||||
export KMS_ADDRESS="10.0.2.2.challenger.sslip.io"
|
||||
|
||||
go run github.com/onsi/ginkgo/v2/ginkgo -v --nodes $GINKGO_NODES --label-filter $LABEL --fail-fast -r ./tests/
|
37
tests/assets/challenger-patch.yaml
Normal file
37
tests/assets/challenger-patch.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: manager
|
||||
# Don't try to pull the image we built locally
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "--health-probe-bind-address"
|
||||
- ":8081"
|
||||
- "--metrics-bind-address"
|
||||
- "127.0.0.1:8080"
|
||||
- "--namespace"
|
||||
- "default"
|
||||
- "--leader-elect"
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kcrypt-escrow-server
|
||||
namespace: system
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
control-plane: controller-manager
|
||||
ports:
|
||||
- name: wss
|
||||
port: 8082
|
||||
protocol: TCP
|
||||
targetPort: 8082
|
46
tests/assets/challenger-server-ingress.template.yaml
Normal file
46
tests/assets/challenger-server-ingress.template.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: challenger-server
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "selfsigned"
|
||||
kubernetes.io/ingress.class: "traefik"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- 10.0.2.2.challenger.sslip.io
|
||||
- ${CLUSTER_IP}.challenger.sslip.io
|
||||
- discoverable-kms.local
|
||||
secretName: kms-tls
|
||||
rules:
|
||||
- host: 10.0.2.2.challenger.sslip.io
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: "/"
|
||||
backend:
|
||||
service:
|
||||
name: kcrypt-controller-kcrypt-escrow-server
|
||||
port:
|
||||
number: 8082
|
||||
- host: ${CLUSTER_IP}.challenger.sslip.io
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: "/"
|
||||
backend:
|
||||
service:
|
||||
name: kcrypt-controller-kcrypt-escrow-server
|
||||
port:
|
||||
number: 8082
|
||||
- host: discoverable-kms.local
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: "/"
|
||||
backend:
|
||||
service:
|
||||
name: kcrypt-controller-kcrypt-escrow-server
|
||||
port:
|
||||
number: 8082
|
8
tests/assets/cluster-issuer.yaml
Normal file
8
tests/assets/cluster-issuer.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
# Self-signed issuer
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: selfsigned
|
||||
spec:
|
||||
selfSigned: {}
|
13
tests/assets/kustomization.yaml
Normal file
13
tests/assets/kustomization.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
# Adds namespace to all resources.
|
||||
namespace: default
|
||||
|
||||
bases:
|
||||
- ../../config/default
|
||||
|
||||
resources:
|
||||
- challenger-server-ingress.yaml
|
||||
- cluster-issuer.yaml
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Fix labels and selectors to make challenger server accessible
|
||||
- challenger-patch.yaml
|
450
tests/encryption_test.go
Normal file
450
tests/encryption_test.go
Normal file
@@ -0,0 +1,450 @@
|
||||
package e2e_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
. "github.com/spectrocloud/peg/matcher"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
client "github.com/kairos-io/kairos-challenger/cmd/discovery/client"
|
||||
)
|
||||
|
||||
var installationOutput string
|
||||
var vm VM
|
||||
var mdnsVM VM
|
||||
|
||||
var _ = Describe("kcrypt encryption", Label("encryption-tests"), func() {
|
||||
var config string
|
||||
var vmOpts VMOptions
|
||||
var expectedInstallationSuccess bool
|
||||
|
||||
BeforeEach(func() {
|
||||
expectedInstallationSuccess = true
|
||||
|
||||
vmOpts = DefaultVMOptions()
|
||||
RegisterFailHandler(printInstallationOutput)
|
||||
_, vm = startVM(vmOpts)
|
||||
fmt.Printf("\nvm.StateDir = %+v\n", vm.StateDir)
|
||||
|
||||
vm.EventuallyConnects(1200)
|
||||
})
|
||||
|
||||
JustBeforeEach(func() {
|
||||
configFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(configFile.Name())
|
||||
|
||||
err = os.WriteFile(configFile.Name(), []byte(config), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = vm.Scp(configFile.Name(), "config.yaml", "0744")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
installationOutput, err = vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
if expectedInstallationSuccess {
|
||||
Expect(err).ToNot(HaveOccurred(), installationOutput)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
vm.GatherLog("/run/immucore/immucore.log")
|
||||
err := vm.Destroy(func(vm VM) {
|
||||
// Stop TPM emulator
|
||||
tpmPID, err := os.ReadFile(path.Join(vm.StateDir, "tpm", "pid"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if len(tpmPID) != 0 {
|
||||
pid, err := strconv.Atoi(string(tpmPID))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
syscall.Kill(pid, syscall.SIGKILL)
|
||||
}
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
When("discovering KMS with mdns", Label("discoverable-kms"), func() {
|
||||
var tpmHash string
|
||||
var mdnsHostname string
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the secret in kubernetes")
|
||||
tpmHash = createTPMPassphraseSecret(vm)
|
||||
|
||||
mdnsHostname = "discoverable-kms.local"
|
||||
|
||||
By("deploying simple-mdns-server vm")
|
||||
mdnsVM = deploySimpleMDNSServer(mdnsHostname)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false # we will reboot manually
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
mdns: true
|
||||
challenger_server: "http://%[1]s"
|
||||
`, mdnsHostname)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", sealedVolumeName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
|
||||
err = mdnsVM.Destroy(func(vm VM) {})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("discovers the KMS using mdns", func() {
|
||||
Skip("TODO: make this test work")
|
||||
|
||||
By("rebooting")
|
||||
vm.Reboot()
|
||||
By("checking that we can connect after installation")
|
||||
vm.EventuallyConnects(1200)
|
||||
By("checking if we got an encrypted partition")
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
})
|
||||
})
|
||||
|
||||
// https://kairos.io/docs/advanced/partition_encryption/#offline-mode
|
||||
When("doing local encryption", Label("local-encryption"), func() {
|
||||
BeforeEach(func() {
|
||||
config = `#cloud-config
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
reboot: false # we will reboot manually
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
`
|
||||
})
|
||||
|
||||
It("boots and has an encrypted partition", func() {
|
||||
vm.Reboot()
|
||||
vm.EventuallyConnects(1200)
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
})
|
||||
})
|
||||
|
||||
//https://kairos.io/docs/advanced/partition_encryption/#online-mode
|
||||
When("using a remote key management server (automated passphrase generation)", Label("remote-auto"), func() {
|
||||
var tpmHash string
|
||||
|
||||
BeforeEach(func() {
|
||||
tpmHash = createTPMPassphraseSecret(vm)
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false # we will reboot manually
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
nv_index: ""
|
||||
c_index: ""
|
||||
tpm_device: ""
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", sealedVolumeName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
})
|
||||
|
||||
It("creates a passphrase and a key/pair to decrypt it", func() {
|
||||
// Expect a LUKS partition
|
||||
vm.Reboot(750)
|
||||
vm.EventuallyConnects(1200)
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
|
||||
// Expect a secret to be created
|
||||
cmd := exec.Command("kubectl", "get", "secrets",
|
||||
fmt.Sprintf("%s-cos-persistent", getSealedVolumeName(tpmHash)),
|
||||
"-o=go-template='{{.data.generated_by|base64decode}}'",
|
||||
)
|
||||
|
||||
secretOut, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(secretOut))
|
||||
Expect(string(secretOut)).To(MatchRegexp("tpm"))
|
||||
})
|
||||
})
|
||||
|
||||
// https://kairos.io/docs/advanced/partition_encryption/#scenario-static-keys
|
||||
When("using a remote key management server (static keys)", Label("remote-static"), func() {
|
||||
var tpmHash string
|
||||
var err error
|
||||
|
||||
BeforeEach(func() {
|
||||
tpmHash, err = vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
|
||||
Expect(err).ToNot(HaveOccurred(), tpmHash)
|
||||
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: %[1]s
|
||||
namespace: default
|
||||
type: Opaque
|
||||
stringData:
|
||||
pass: "awesome-plaintext-passphrase"
|
||||
`, tpmHash))
|
||||
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: %[1]s
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%[1]s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
secret:
|
||||
name: %[1]s
|
||||
path: pass
|
||||
quarantined: false
|
||||
`, tpmHash))
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false # we will reboot manually
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", sealedVolumeName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
|
||||
cmd = exec.Command("kubectl", "delete", "secret", tpmHash)
|
||||
out, err = cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
})
|
||||
|
||||
It("creates uses the existing passphrase to decrypt it", func() {
|
||||
// Expect a LUKS partition
|
||||
vm.Reboot()
|
||||
vm.EventuallyConnects(1200)
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
Expect(out).To(MatchRegexp("/dev/mapper.*LABEL=\"COS_PERSISTENT\""), out)
|
||||
})
|
||||
})
|
||||
|
||||
When("the certificate is pinned on the configuration", Label("remote-https-pinned"), func() {
|
||||
var tpmHash string
|
||||
|
||||
BeforeEach(func() {
|
||||
tpmHash = createTPMPassphraseSecret(vm)
|
||||
cert := getChallengerServerCert()
|
||||
kcryptConfig := createConfigWithCert(fmt.Sprintf("https://%s", os.Getenv("KMS_ADDRESS")), cert)
|
||||
kcryptConfigBytes, err := yaml.Marshal(kcryptConfig)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false # we will reboot manually
|
||||
|
||||
%s
|
||||
|
||||
`, string(kcryptConfigBytes))
|
||||
})
|
||||
|
||||
It("successfully talks to the server", func() {
|
||||
vm.Reboot()
|
||||
vm.EventuallyConnects(1200)
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
Expect(out).To(MatchRegexp("/dev/mapper.*LABEL=\"COS_PERSISTENT\""), out)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", sealedVolumeName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
})
|
||||
})
|
||||
|
||||
When("the no certificate is set in the configuration", Label("remote-https-bad-cert"), func() {
|
||||
var tpmHash string
|
||||
|
||||
BeforeEach(func() {
|
||||
tpmHash = createTPMPassphraseSecret(vm)
|
||||
expectedInstallationSuccess = false
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false # we will reboot manually
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "https://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
})
|
||||
|
||||
It("fails to talk to the server", func() {
|
||||
out, err := vm.Sudo("cat manual-install.txt")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("failed to verify certificate: x509: certificate signed by unknown authority"))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", sealedVolumeName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func printInstallationOutput(message string, callerSkip ...int) {
|
||||
fmt.Printf("This is the installation output in case it's useful:\n%s\n", installationOutput)
|
||||
|
||||
// Ensures the correct line numbers are reported
|
||||
Fail(message, callerSkip[0]+1)
|
||||
}
|
||||
|
||||
func getChallengerServerCert() string {
|
||||
cmd := exec.Command(
|
||||
"kubectl", "get", "secret", "-n", "default", "kms-tls",
|
||||
"-o", `go-template={{ index .data "ca.crt" | base64decode }}`)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func createConfigWithCert(server, cert string) client.Config {
|
||||
c := client.Config{}
|
||||
c.Kcrypt.Challenger.Server = server
|
||||
c.Kcrypt.Challenger.Certificate = cert
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func createTPMPassphraseSecret(vm VM) string {
|
||||
tpmHash, err := vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
|
||||
Expect(err).ToNot(HaveOccurred(), tpmHash)
|
||||
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%[1]s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%[1]s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false
|
||||
`, strings.TrimSpace(tpmHash)))
|
||||
|
||||
return tpmHash
|
||||
}
|
||||
|
||||
// We run the simple-mdns-server (https://github.com/kairos-io/simple-mdns-server/)
|
||||
// inside a VM next to the one we test. The server advertises the KMS as running on 10.0.2.2
|
||||
// (the host machine). This is a "hack" and is needed because of how the default
|
||||
// networking in qemu works. We need to be within the same network and that
|
||||
// network is only available withing another VM.
|
||||
// https://wiki.qemu.org/Documentation/Networking
|
||||
func deploySimpleMDNSServer(hostname string) VM {
|
||||
opts := DefaultVMOptions()
|
||||
opts.Memory = "2000"
|
||||
opts.CPUS = "1"
|
||||
opts.EmulateTPM = false
|
||||
_, vm := startVM(opts)
|
||||
vm.EventuallyConnects(1200)
|
||||
|
||||
out, err := vm.Sudo(`curl -s https://api.github.com/repos/kairos-io/simple-mdns-server/releases/latest | jq -r .assets[].browser_download_url | grep $(uname -m) | xargs curl -L -o sms.tar.gz`)
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
|
||||
out, err = vm.Sudo("tar xvf sms.tar.gz")
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
|
||||
// Start the simple-mdns-server in the background
|
||||
out, err = vm.Sudo(fmt.Sprintf(
|
||||
"/bin/bash -c './simple-mdns-server --port 80 --address 10.0.2.2 --serviceType _kcrypt._tcp --hostName %s &'", hostname))
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
|
||||
return vm
|
||||
}
|
262
tests/remote_attestation_test.go
Normal file
262
tests/remote_attestation_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package e2e_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
. "github.com/spectrocloud/peg/matcher"
|
||||
)
|
||||
|
||||
// Advanced scenarios that test complex operational workflows,
|
||||
// performance aspects, and edge cases
|
||||
|
||||
var _ = Describe("Remote Attestation E2E Tests", Label("remote-complete-workflow"), func() {
|
||||
var config string
|
||||
var vmOpts VMOptions
|
||||
var expectedInstallationSuccess bool
|
||||
var testVM VM
|
||||
var tpmHash string
|
||||
|
||||
BeforeEach(func() {
|
||||
expectedInstallationSuccess = true
|
||||
vmOpts = DefaultVMOptions()
|
||||
_, testVM = startVM(vmOpts)
|
||||
testVM.EventuallyConnects(1200)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cleanupVM(testVM)
|
||||
// Clean up test resources if tpmHash was set
|
||||
if tpmHash != "" {
|
||||
cleanupTestResources(tpmHash)
|
||||
}
|
||||
})
|
||||
|
||||
installKairosWithConfig := func(config string) {
|
||||
installKairosWithConfigAdvanced(testVM, config, expectedInstallationSuccess)
|
||||
}
|
||||
|
||||
It("should perform TOFU enrollment, quarantine testing, PCR management, AK management, error handling, secret reuse, and multi-partition support", func() {
|
||||
tpmHash = getTPMHash(testVM)
|
||||
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
- COS_OEM
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Verify both partitions are encrypted
|
||||
By("Verifying both partitions are encrypted")
|
||||
out, err := testVM.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"oem\""), out)
|
||||
|
||||
By("Verifying SealedVolume was auto-created with attestation data")
|
||||
Eventually(func() bool {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", sealedVolumeName, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Check that attestation data was populated (not empty)
|
||||
return strings.Contains(string(out), "attestation:") &&
|
||||
strings.Contains(string(out), "ekPublicKey:") &&
|
||||
strings.Contains(string(out), "akPublicKey:")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
By("Verifying encryption secrets were auto-generated for both partitions")
|
||||
Eventually(func() bool {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
return secretExists(fmt.Sprintf("%s-cos-persistent", sealedVolumeName)) &&
|
||||
secretExists(fmt.Sprintf("%s-cos-oem", sealedVolumeName))
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
By("Testing subsequent authentication with learned attestation data")
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
By("quarantining the TPM")
|
||||
quarantineTPM(tpmHash)
|
||||
|
||||
By("Testing that quarantined TPM is rejected via CLI for both partitions")
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", false)
|
||||
|
||||
By("Testing recovery by unquarantining TPM")
|
||||
unquarantineTPM(tpmHash)
|
||||
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", true)
|
||||
|
||||
// Continue with PCR and AK Management testing
|
||||
By("Testing PCR re-enrollment by setting PCR 0 to wrong value")
|
||||
updateSealedVolumeAttestation(tpmHash, "pcrValues.pcrs.0", "wrong-pcr0-value")
|
||||
|
||||
By("checking that the passphrase retrieval fails with wrong PCR for both partitions")
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", false)
|
||||
|
||||
By("setting PCR 0 to an empty value (re-enrollment mode)")
|
||||
updateSealedVolumeAttestation(tpmHash, "pcrValues.pcrs.0", "")
|
||||
|
||||
By("checking that the passphrase retrieval works after PCR re-enrollment for both partitions")
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", true)
|
||||
|
||||
By("Verifying PCR 0 was re-enrolled with current value")
|
||||
Eventually(func() bool {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", sealedVolumeName, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// PCR 0 should now have a new non-empty value
|
||||
return strings.Contains(string(out), "\"0\":") &&
|
||||
!strings.Contains(string(out), "\"0\": \"\"") &&
|
||||
!strings.Contains(string(out), "\"0\": \"wrong-pcr0-value\"")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Continue with AK Management testing
|
||||
By("Testing AK re-enrollment by setting AK to empty")
|
||||
updateSealedVolumeAttestation(tpmHash, "akPublicKey", "")
|
||||
|
||||
By("Verifying AK was re-enrolled with actual value")
|
||||
var learnedAK, learnedEK string
|
||||
Eventually(func() bool {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", sealedVolumeName, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract learned AK and EK for later enforcement test
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, "akPublicKey:") && !strings.Contains(line, "akPublicKey: \"\"") {
|
||||
parts := strings.Split(line, "akPublicKey:")
|
||||
if len(parts) > 1 {
|
||||
learnedAK = strings.TrimSpace(strings.Trim(parts[1], "\""))
|
||||
}
|
||||
}
|
||||
if strings.Contains(line, "ekPublicKey:") && !strings.Contains(line, "ekPublicKey: \"\"") {
|
||||
parts := strings.Split(line, "ekPublicKey:")
|
||||
if len(parts) > 1 {
|
||||
learnedEK = strings.TrimSpace(strings.Trim(parts[1], "\""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return learnedAK != "" && learnedEK != ""
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Test AK enforcement by setting wrong AK
|
||||
By("Testing AK enforcement by setting wrong AK value")
|
||||
updateSealedVolumeAttestation(tpmHash, "akPublicKey", "wrong-ak-value")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should fail to retrieve passphrase with wrong AK for both partitions
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", false)
|
||||
|
||||
// Restore correct AK and verify it works via CLI
|
||||
By("Restoring correct AK and verifying authentication works for both partitions")
|
||||
updateSealedVolumeAttestation(tpmHash, "akPublicKey", learnedAK)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should now work with correct AK for both partitions
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", true)
|
||||
|
||||
// Continue with Error Handling testing
|
||||
By("Testing invalid TPM hash rejection")
|
||||
invalidHash := "invalid-tpm-hash-12345"
|
||||
createSealedVolumeWithAttestation(invalidHash, nil)
|
||||
|
||||
// Should fail due to TPM hash mismatch for both partitions (test via CLI, no risky reboot)
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", false)
|
||||
|
||||
// Cleanup invalid SealedVolume
|
||||
deleteSealedVolume(invalidHash)
|
||||
|
||||
// Test with correct TPM hash to verify system still works for both partitions
|
||||
By("Verifying system still works with correct TPM hash for both partitions")
|
||||
// The original SealedVolume should still exist and work
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
expectPassphraseRetrieval(testVM, "COS_OEM", true)
|
||||
|
||||
// Continue with Secret Reuse testing
|
||||
By("Testing secret reuse when SealedVolume is recreated for both partitions")
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
persistentSecretName := fmt.Sprintf("%s-cos-persistent", sealedVolumeName)
|
||||
oemSecretName := fmt.Sprintf("%s-cos-oem", sealedVolumeName)
|
||||
|
||||
// Get secret data for comparison for both partitions
|
||||
cmd := exec.Command("kubectl", "get", "secret", persistentSecretName, "-o", "yaml")
|
||||
originalPersistentSecretData, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
cmd = exec.Command("kubectl", "get", "secret", oemSecretName, "-o", "yaml")
|
||||
originalOemSecretData, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Delete SealedVolume but keep secrets
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Verify secrets still exist
|
||||
Expect(secretExists(persistentSecretName)).To(BeTrue())
|
||||
Expect(secretExists(oemSecretName)).To(BeTrue())
|
||||
|
||||
// Recreate SealedVolume and verify secret reuse
|
||||
By("Recreating SealedVolume and verifying secret reuse for both partitions")
|
||||
createSealedVolumeWithAttestation(tpmHash, nil)
|
||||
|
||||
// Should reuse existing secrets
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Verify the same secrets are being used
|
||||
cmd = exec.Command("kubectl", "get", "secret", persistentSecretName, "-o", "yaml")
|
||||
newPersistentSecretData, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
cmd = exec.Command("kubectl", "get", "secret", oemSecretName, "-o", "yaml")
|
||||
newOemSecretData, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The secret data should be identical (reused, not regenerated) for both partitions
|
||||
Expect(string(newPersistentSecretData)).To(Equal(string(originalPersistentSecretData)))
|
||||
Expect(string(newOemSecretData)).To(Equal(string(originalOemSecretData)))
|
||||
})
|
||||
})
|
550
tests/suite_test.go
Normal file
550
tests/suite_test.go
Normal file
@@ -0,0 +1,550 @@
|
||||
package e2e_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
process "github.com/mudler/go-processmanager"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
. "github.com/spectrocloud/peg/matcher"
|
||||
machine "github.com/spectrocloud/peg/pkg/machine"
|
||||
"github.com/spectrocloud/peg/pkg/machine/types"
|
||||
)
|
||||
|
||||
func TestE2e(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "kcrypt-challenger e2e test Suite")
|
||||
}
|
||||
|
||||
type VMOptions struct {
|
||||
ISO string
|
||||
User string
|
||||
Password string
|
||||
Memory string
|
||||
CPUS string
|
||||
RunSpicy bool
|
||||
UseKVM bool
|
||||
EmulateTPM bool
|
||||
}
|
||||
|
||||
func DefaultVMOptions() VMOptions {
|
||||
var err error
|
||||
|
||||
memory := os.Getenv("MEMORY")
|
||||
if memory == "" {
|
||||
memory = "2096"
|
||||
}
|
||||
cpus := os.Getenv("CPUS")
|
||||
if cpus == "" {
|
||||
cpus = "2"
|
||||
}
|
||||
|
||||
runSpicy := false
|
||||
if s := os.Getenv("MACHINE_SPICY"); s != "" {
|
||||
runSpicy, err = strconv.ParseBool(os.Getenv("MACHINE_SPICY"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
useKVM := false
|
||||
if envKVM := os.Getenv("KVM"); envKVM != "" {
|
||||
useKVM, err = strconv.ParseBool(os.Getenv("KVM"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
return VMOptions{
|
||||
ISO: os.Getenv("ISO"),
|
||||
User: user(),
|
||||
Password: pass(),
|
||||
Memory: memory,
|
||||
CPUS: cpus,
|
||||
RunSpicy: runSpicy,
|
||||
UseKVM: useKVM,
|
||||
EmulateTPM: true,
|
||||
}
|
||||
}
|
||||
|
||||
func user() string {
|
||||
user := os.Getenv("SSH_USER")
|
||||
if user == "" {
|
||||
user = "kairos"
|
||||
}
|
||||
return user
|
||||
}
|
||||
|
||||
func pass() string {
|
||||
pass := os.Getenv("SSH_PASS")
|
||||
if pass == "" {
|
||||
pass = "kairos"
|
||||
}
|
||||
|
||||
return pass
|
||||
}
|
||||
|
||||
func startVM(vmOpts VMOptions) (context.Context, VM) {
|
||||
if vmOpts.ISO == "" {
|
||||
fmt.Println("ISO missing")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
vmName := uuid.New().String()
|
||||
|
||||
stateDir, err := os.MkdirTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if vmOpts.EmulateTPM {
|
||||
emulateTPM(stateDir)
|
||||
}
|
||||
|
||||
sshPort, err := getFreePort()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
opts := []types.MachineOption{
|
||||
types.QEMUEngine,
|
||||
types.WithISO(vmOpts.ISO),
|
||||
types.WithMemory(vmOpts.Memory),
|
||||
types.WithCPU(vmOpts.CPUS),
|
||||
types.WithSSHPort(strconv.Itoa(sshPort)),
|
||||
types.WithID(vmName),
|
||||
types.WithSSHUser(vmOpts.User),
|
||||
types.WithSSHPass(vmOpts.Password),
|
||||
types.OnFailure(func(p *process.Process) {
|
||||
defer GinkgoRecover()
|
||||
|
||||
var stdout, stderr, serial, status string
|
||||
|
||||
if stdoutBytes, err := os.ReadFile(p.StdoutPath()); err != nil {
|
||||
stdout = fmt.Sprintf("Error reading stdout file: %s\n", err)
|
||||
} else {
|
||||
stdout = string(stdoutBytes)
|
||||
}
|
||||
|
||||
if stderrBytes, err := os.ReadFile(p.StderrPath()); err != nil {
|
||||
stderr = fmt.Sprintf("Error reading stderr file: %s\n", err)
|
||||
} else {
|
||||
stderr = string(stderrBytes)
|
||||
}
|
||||
|
||||
if status, err = p.ExitCode(); err != nil {
|
||||
status = fmt.Sprintf("Error reading exit code file: %s\n", err)
|
||||
}
|
||||
|
||||
if serialBytes, err := os.ReadFile(path.Join(p.StateDir(), "serial.log")); err != nil {
|
||||
serial = fmt.Sprintf("Error reading serial log file: %s\n", err)
|
||||
} else {
|
||||
serial = string(serialBytes)
|
||||
}
|
||||
|
||||
Fail(fmt.Sprintf("\nVM Aborted.\nstdout: %s\nstderr: %s\nserial: %s\nExit status: %s\n",
|
||||
stdout, stderr, serial, status))
|
||||
}),
|
||||
types.WithStateDir(stateDir),
|
||||
// Serial output to file: https://superuser.com/a/1412150
|
||||
func(m *types.MachineConfig) error {
|
||||
if vmOpts.EmulateTPM {
|
||||
m.Args = append(m.Args,
|
||||
"-chardev", fmt.Sprintf("socket,id=chrtpm,path=%s/swtpm-sock", path.Join(stateDir, "tpm")),
|
||||
"-tpmdev", "emulator,id=tpm0,chardev=chrtpm", "-device", "tpm-tis,tpmdev=tpm0")
|
||||
}
|
||||
m.Args = append(m.Args,
|
||||
"-chardev", fmt.Sprintf("stdio,mux=on,id=char0,logfile=%s,signal=off", path.Join(stateDir, "serial.log")),
|
||||
"-serial", "chardev:char0",
|
||||
"-mon", "chardev=char0",
|
||||
)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Set this to true to debug.
|
||||
// You can connect to it with "spicy" or other tool.
|
||||
var spicePort int
|
||||
if vmOpts.RunSpicy {
|
||||
spicePort, err = getFreePort()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
fmt.Printf("Spice port = %d\n", spicePort)
|
||||
opts = append(opts, types.WithDisplay(fmt.Sprintf("-spice port=%d,addr=127.0.0.1,disable-ticketing", spicePort)))
|
||||
}
|
||||
|
||||
if vmOpts.UseKVM {
|
||||
opts = append(opts, func(m *types.MachineConfig) error {
|
||||
m.Args = append(m.Args,
|
||||
"-enable-kvm",
|
||||
)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
m, err := machine.New(opts...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
vm := NewVM(m, stateDir)
|
||||
|
||||
ctx, err := vm.Start(context.Background())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if vmOpts.RunSpicy {
|
||||
cmd := exec.Command("spicy",
|
||||
"-h", "127.0.0.1",
|
||||
"-p", strconv.Itoa(spicePort))
|
||||
err = cmd.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
return ctx, vm
|
||||
}
|
||||
|
||||
// return the PID of the swtpm (to be killed later) and the state directory
|
||||
func emulateTPM(stateDir string) {
|
||||
t := path.Join(stateDir, "tpm")
|
||||
err := os.MkdirAll(t, os.ModePerm)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
cmd := exec.Command("swtpm",
|
||||
"socket",
|
||||
"--tpmstate", fmt.Sprintf("dir=%s", t),
|
||||
"--ctrl", fmt.Sprintf("type=unixio,path=%s/swtpm-sock", t),
|
||||
"--tpm2", "--log", "level=20")
|
||||
err = cmd.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = os.WriteFile(path.Join(t, "pid"), []byte(strconv.Itoa(cmd.Process.Pid)), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// https://gist.github.com/sevkin/96bdae9274465b2d09191384f86ef39d
|
||||
// GetFreePort asks the kernel for a free open port that is ready to use.
|
||||
func getFreePort() (port int, err error) {
|
||||
var a *net.TCPAddr
|
||||
if a, err = net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
|
||||
var l *net.TCPListener
|
||||
if l, err = net.ListenTCP("tcp", a); err == nil {
|
||||
defer l.Close()
|
||||
return l.Addr().(*net.TCPAddr).Port, nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// Common Test Helper Functions
|
||||
// ========================================
|
||||
|
||||
// Helper to install Kairos with given config
|
||||
func installKairosWithConfig(vm VM, config string) {
|
||||
configFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(configFile.Name())
|
||||
|
||||
err = os.WriteFile(configFile.Name(), []byte(config), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = vm.Scp(configFile.Name(), "config.yaml", "0744")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
By("Installing Kairos with config")
|
||||
installationOutput, err := vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
Expect(err).ToNot(HaveOccurred(), installationOutput)
|
||||
}
|
||||
|
||||
// Helper to reboot and wait for connection
|
||||
func rebootAndConnect(vm VM) {
|
||||
By("Rebooting VM")
|
||||
vm.Reboot()
|
||||
By("Waiting for VM to be connectable")
|
||||
vm.EventuallyConnects(1200)
|
||||
}
|
||||
|
||||
// Helper to verify encrypted partition exists
|
||||
func verifyEncryptedPartition(vm VM) {
|
||||
By("Verifying encrypted partition exists")
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
Expect(out).To(MatchRegexp("/dev/mapper.*LABEL=\"COS_PERSISTENT\""), out)
|
||||
}
|
||||
|
||||
// Helper to get TPM hash from VM
|
||||
func getTPMHash(vm VM) string {
|
||||
By("Getting TPM hash from VM")
|
||||
hash, err := vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
|
||||
Expect(err).ToNot(HaveOccurred(), hash)
|
||||
return strings.TrimSpace(hash)
|
||||
}
|
||||
|
||||
// Helper to test passphrase retrieval via CLI (returns true if successful, false if failed)
|
||||
func checkPassphraseRetrieval(vm VM, partitionLabel string) bool {
|
||||
By(fmt.Sprintf("Testing passphrase retrieval for partition %s via CLI", partitionLabel))
|
||||
|
||||
// Configure the CLI to use the challenger server
|
||||
cliCmd := fmt.Sprintf(`/system/discovery/kcrypt-discovery-challenger get \
|
||||
--partition-label=%s \
|
||||
--challenger-server="http://%s" \
|
||||
2>/dev/null`, partitionLabel, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
out, err := vm.Sudo(cliCmd)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Passphrase retrieval failed: %v", err))
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if we got a passphrase (non-empty output)
|
||||
passphrase := strings.TrimSpace(out)
|
||||
success := len(passphrase) > 0
|
||||
|
||||
if success {
|
||||
By("Passphrase retrieval successful")
|
||||
} else {
|
||||
By("Passphrase retrieval failed - empty response")
|
||||
}
|
||||
|
||||
return success
|
||||
}
|
||||
|
||||
// Helper to test passphrase retrieval with expectation (for cleaner test logic)
|
||||
func expectPassphraseRetrieval(vm VM, partitionLabel string, shouldSucceed bool) {
|
||||
success := checkPassphraseRetrieval(vm, partitionLabel)
|
||||
if shouldSucceed {
|
||||
Expect(success).To(BeTrue(), "Passphrase retrieval should have succeeded")
|
||||
} else {
|
||||
Expect(success).To(BeFalse(), "Passphrase retrieval should have failed")
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to get the correct SealedVolume name from TPM hash
|
||||
func getSealedVolumeName(tpmHash string) string {
|
||||
// Convert to lowercase and take first 8 characters to match the actual naming pattern
|
||||
// This matches the pattern used in pkg/challenger/challenger.go: fmt.Sprintf("tofu-%s", tpmHash[:8])
|
||||
return fmt.Sprintf("tofu-%s", strings.ToLower(tpmHash[:8]))
|
||||
}
|
||||
|
||||
// Helper to create SealedVolume with specific attestation configuration
|
||||
func createSealedVolumeWithAttestation(tpmHash string, attestationConfig map[string]interface{}) {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
sealedVolumeYaml := fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false`, sealedVolumeName, tpmHash)
|
||||
|
||||
if attestationConfig != nil {
|
||||
sealedVolumeYaml += "\n attestation:"
|
||||
for key, value := range attestationConfig {
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
sealedVolumeYaml += fmt.Sprintf("\n %s: \"%s\"", key, v)
|
||||
case map[string]string:
|
||||
sealedVolumeYaml += fmt.Sprintf("\n %s:", key)
|
||||
for k, val := range v {
|
||||
sealedVolumeYaml += "\n pcrs:"
|
||||
sealedVolumeYaml += fmt.Sprintf("\n \"%s\": \"%s\"", k, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating SealedVolume with attestation config: %+v", attestationConfig))
|
||||
kubectlApplyYaml(sealedVolumeYaml)
|
||||
}
|
||||
|
||||
// Helper to update SealedVolume attestation configuration
|
||||
func updateSealedVolumeAttestation(tpmHashParam string, field, value string) {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHashParam)
|
||||
By(fmt.Sprintf("Updating SealedVolume %s field %s to %s", sealedVolumeName, field, value))
|
||||
patch := fmt.Sprintf(`{"spec":{"attestation":{"%s":"%s"}}}`, field, value)
|
||||
cmd := exec.Command("kubectl", "patch", "sealedvolume", sealedVolumeName, "--type=merge", "-p", patch)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to quarantine TPM
|
||||
func quarantineTPM(tpmHash string) {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHash)
|
||||
By(fmt.Sprintf("Quarantining TPM %s", sealedVolumeName))
|
||||
patch := `{"spec":{"quarantined":true}}`
|
||||
cmd := exec.Command("kubectl", "patch", "sealedvolume", sealedVolumeName, "--type=merge", "-p", patch)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to unquarantine TPM
|
||||
func unquarantineTPM(tpmHashParam string) {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHashParam)
|
||||
By(fmt.Sprintf("Unquarantining TPM %s", sealedVolumeName))
|
||||
patch := `{"spec":{"quarantined":false}}`
|
||||
cmd := exec.Command("kubectl", "patch", "sealedvolume", sealedVolumeName, "--type=merge", "-p", patch)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to delete SealedVolume
|
||||
func deleteSealedVolume(tpmHashParam string) {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHashParam)
|
||||
By(fmt.Sprintf("Deleting SealedVolume %s", sealedVolumeName))
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", sealedVolumeName, "--ignore-not-found=true")
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to delete SealedVolume from all namespaces
|
||||
func deleteSealedVolumeAllNamespaces(tpmHashParam string) {
|
||||
sealedVolumeName := getSealedVolumeName(tpmHashParam)
|
||||
By(fmt.Sprintf("Deleting SealedVolume %s from all namespaces", sealedVolumeName))
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", sealedVolumeName, "--ignore-not-found=true", "--all-namespaces")
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to check if secret exists
|
||||
func secretExists(secretName string) bool {
|
||||
cmd := exec.Command("kubectl", "get", "secret", secretName, "--ignore-not-found=true")
|
||||
out, err := cmd.CombinedOutput()
|
||||
return err == nil && len(out) > 0 && !strings.Contains(string(out), "NotFound")
|
||||
}
|
||||
|
||||
// Helper to check if secret exists in namespace
|
||||
func secretExistsInNamespace(secretName, namespace string) bool {
|
||||
cmd := exec.Command("kubectl", "get", "secret", secretName, "-n", namespace, "--ignore-not-found=true")
|
||||
out, err := cmd.CombinedOutput()
|
||||
return err == nil && len(out) > 0 && !strings.Contains(string(out), "NotFound")
|
||||
}
|
||||
|
||||
// Helper to apply YAML to Kubernetes
|
||||
func kubectlApplyYaml(yamlData string) {
|
||||
yamlFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(yamlFile.Name())
|
||||
|
||||
err = os.WriteFile(yamlFile.Name(), []byte(yamlData), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
cmd := exec.Command("kubectl", "apply", "-f", yamlFile.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to create SealedVolume with multi-partition configuration
|
||||
func createMultiPartitionSealedVolume(tpmHash string, partitions []string) {
|
||||
sealedVolumeYaml := fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:`, tpmHash, tpmHash)
|
||||
|
||||
for _, partition := range partitions {
|
||||
sealedVolumeYaml += fmt.Sprintf(`
|
||||
- label: %s`, partition)
|
||||
}
|
||||
|
||||
sealedVolumeYaml += "\n quarantined: false"
|
||||
|
||||
By(fmt.Sprintf("Creating multi-partition SealedVolume for partitions: %v", partitions))
|
||||
kubectlApplyYaml(sealedVolumeYaml)
|
||||
}
|
||||
|
||||
// Helper to create SealedVolume in specific namespace
|
||||
func createSealedVolumeInNamespace(tpmHash, namespace string) {
|
||||
// First create the namespace if it doesn't exist with test labels
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: %s
|
||||
labels:
|
||||
test.kcrypt.kairos.io/type: test-namespace
|
||||
test.kcrypt.kairos.io/purpose: kcrypt-challenger-testing`, namespace))
|
||||
|
||||
sealedVolumeYaml := fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: %s
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false`, tpmHash, namespace, tpmHash)
|
||||
|
||||
By(fmt.Sprintf("Creating SealedVolume in namespace %s", namespace))
|
||||
kubectlApplyYaml(sealedVolumeYaml)
|
||||
}
|
||||
|
||||
// Helper to cleanup test resources
|
||||
func cleanupTestResources(tpmHash string) {
|
||||
if tpmHash != "" {
|
||||
deleteSealedVolumeAllNamespaces(tpmHash)
|
||||
|
||||
// Cleanup associated secrets using labels
|
||||
// This will delete all secrets created by kcrypt-challenger for this TPM hash
|
||||
cmd := exec.Command("kubectl", "delete", "secret",
|
||||
"-l", fmt.Sprintf("kcrypt.kairos.io/tpm-hash=%s", tpmHash),
|
||||
"--ignore-not-found=true", "--all-namespaces")
|
||||
cmd.CombinedOutput()
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to delete specific test namespaces
|
||||
func deleteTestNamespaces(namespaces ...string) {
|
||||
for _, namespace := range namespaces {
|
||||
cmd := exec.Command("kubectl", "delete", "namespace", namespace, "--ignore-not-found=true")
|
||||
cmd.CombinedOutput()
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to install Kairos with config (handles both success and failure cases)
|
||||
func installKairosWithConfigAdvanced(vm VM, config string, expectSuccess bool) {
|
||||
configFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(configFile.Name())
|
||||
|
||||
err = os.WriteFile(configFile.Name(), []byte(config), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = vm.Scp(configFile.Name(), "config.yaml", "0744")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if expectSuccess {
|
||||
By("Installing Kairos with config")
|
||||
installationOutput, err := vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
Expect(err).ToNot(HaveOccurred(), installationOutput)
|
||||
} else {
|
||||
By("Installing Kairos with config (expecting failure)")
|
||||
vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to cleanup VM and TPM emulator
|
||||
func cleanupVM(vm VM) {
|
||||
By("Cleaning up test VM")
|
||||
err := vm.Destroy(func(vm VM) {
|
||||
// Stop TPM emulator
|
||||
tpmPID, err := os.ReadFile(path.Join(vm.StateDir, "tpm", "pid"))
|
||||
if err == nil && len(tpmPID) != 0 {
|
||||
pid, err := strconv.Atoi(string(tpmPID))
|
||||
if err == nil {
|
||||
syscall.Kill(pid, syscall.SIGKILL)
|
||||
}
|
||||
}
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
Reference in New Issue
Block a user