mirror of
https://github.com/kairos-io/kcrypt-challenger.git
synced 2025-09-25 20:47:09 +00:00
Compare commits
28 Commits
renovate/g
...
2988-remot
Author | SHA1 | Date | |
---|---|---|---|
|
4c036c5912 | ||
|
448a8b9684 | ||
|
ee6ed01b50 | ||
|
f0cadbbe6e | ||
|
62fb8f6cce | ||
|
329fa9212c | ||
|
b123339d19 | ||
|
2439d24e70 | ||
|
fac5dfb32d | ||
|
5fb15c81f6 | ||
|
caedb1ef7f | ||
|
55a0d62231 | ||
|
592426ae43 | ||
|
118189e672 | ||
|
5f2d857097 | ||
|
6ce6db1d84 | ||
|
89b07027cb | ||
|
bd19b91a1b | ||
|
9eeb285826 | ||
|
dc853ab2a4 | ||
|
8383f4b1b0 | ||
|
eba04e1479 | ||
|
db5793d0d1 | ||
|
8ce8651bca | ||
|
b674f911da | ||
|
2ef72d3c0a | ||
|
f943b01c90 | ||
|
80cd276ff3 |
15
.github/workflows/e2e-tests.yml
vendored
15
.github/workflows/e2e-tests.yml
vendored
@@ -55,12 +55,27 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# Original basic tests
|
||||
- label: "local-encryption"
|
||||
- label: "remote-auto"
|
||||
- label: "remote-static"
|
||||
- label: "remote-https-pinned"
|
||||
- label: "remote-https-bad-cert"
|
||||
- label: "discoverable-kms"
|
||||
# New selective enrollment tests
|
||||
- label: "remote-tofu"
|
||||
- label: "remote-quarantine"
|
||||
- label: "remote-pcr-mgmt"
|
||||
- label: "remote-ak-mgmt"
|
||||
- label: "remote-secret-reuse"
|
||||
- label: "remote-edge-cases"
|
||||
# Advanced operational tests
|
||||
- label: "remote-multi-partition"
|
||||
- label: "remote-namespace-isolation"
|
||||
- label: "remote-network-resilience"
|
||||
- label: "remote-performance"
|
||||
- label: "remote-large-pcr"
|
||||
- label: "remote-cleanup"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,6 +6,7 @@
|
||||
*.dylib
|
||||
bin
|
||||
testbin/*
|
||||
manager
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
475
README.md
475
README.md
@@ -27,7 +27,7 @@ With Kairos you can build immutable, bootable Kubernetes and OS images for your
|
||||
<tr>
|
||||
<th align="center">
|
||||
<img width="640" height="1px">
|
||||
<p>
|
||||
<p>
|
||||
<small>
|
||||
Documentation
|
||||
</small>
|
||||
@@ -35,7 +35,7 @@ Documentation
|
||||
</th>
|
||||
<th align="center">
|
||||
<img width="640" height="1">
|
||||
<p>
|
||||
<p>
|
||||
<small>
|
||||
Contribute
|
||||
</small>
|
||||
@@ -46,12 +46,12 @@ Contribute
|
||||
<td>
|
||||
|
||||
📚 [Getting started with Kairos](https://kairos.io/docs/getting-started) <br> :bulb: [Examples](https://kairos.io/docs/examples) <br> :movie_camera: [Video](https://kairos.io/docs/media/) <br> :open_hands:[Engage with the Community](https://kairos.io/community/)
|
||||
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
🙌[ CONTRIBUTING.md ]( https://github.com/kairos-io/kairos/blob/master/CONTRIBUTING.md ) <br> :raising_hand: [ GOVERNANCE ]( https://github.com/kairos-io/kairos/blob/master/GOVERNANCE.md ) <br>:construction_worker:[Code of conduct](https://github.com/kairos-io/kairos/blob/master/CODE_OF_CONDUCT.md)
|
||||
|
||||
|
||||
🙌[ CONTRIBUTING.md ]( https://github.com/kairos-io/kairos/blob/master/CONTRIBUTING.md ) <br> :raising_hand: [ GOVERNANCE ]( https://github.com/kairos-io/kairos/blob/master/GOVERNANCE.md ) <br>:construction_worker:[Code of conduct](https://github.com/kairos-io/kairos/blob/master/CODE_OF_CONDUCT.md)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
@@ -59,12 +59,39 @@ Contribute
|
||||
| :exclamation: | This is experimental! |
|
||||
|-|:-|
|
||||
|
||||
This is the Kairos kcrypt-challenger Kubernetes Native Extension.
|
||||
This is the Kairos kcrypt-challenger Kubernetes Native Extension.
|
||||
|
||||
## Usage
|
||||
|
||||
See the documentation in our website: https://kairos.io/docs/advanced/partition_encryption/.
|
||||
|
||||
### TPM NV Memory Cleanup
|
||||
|
||||
⚠️ **DANGER**: This command removes encryption passphrases from TPM memory!
|
||||
⚠️ **If you delete the wrong index, your encrypted disk may become UNBOOTABLE!**
|
||||
|
||||
During development and testing, the kcrypt-challenger may store passphrases in TPM non-volatile (NV) memory. These passphrases persist across reboots and can accumulate over time, taking up space in the TPM.
|
||||
|
||||
To clean up TPM NV memory used by the challenger:
|
||||
|
||||
```bash
|
||||
# Clean up the default NV index (respects config or defaults to 0x1500000)
|
||||
kcrypt-discovery-challenger cleanup
|
||||
|
||||
# Clean up a specific NV index
|
||||
kcrypt-discovery-challenger cleanup --nv-index=0x1500001
|
||||
|
||||
# Clean up with specific TPM device
|
||||
kcrypt-discovery-challenger cleanup --tpm-device=/dev/tpmrm0
|
||||
```
|
||||
|
||||
**Safety Features:**
|
||||
- By default, the command shows warnings and prompts for confirmation
|
||||
- You must type "yes" to proceed with deletion
|
||||
- Use `--i-know-what-i-am-doing` flag to skip the prompt (not recommended)
|
||||
|
||||
**Note**: This command uses native Go TPM libraries and requires appropriate permissions to access the TPM device.
|
||||
|
||||
## Installation
|
||||
|
||||
To install, use helm:
|
||||
@@ -73,7 +100,7 @@ To install, use helm:
|
||||
# Adds the kairos repo to helm
|
||||
$ helm repo add kairos https://kairos-io.github.io/helm-charts
|
||||
"kairos" has been added to your repositories
|
||||
$ helm repo update
|
||||
$ helm repo update
|
||||
Hang tight while we grab the latest from your chart repositories...
|
||||
...Successfully got an update from the "kairos" chart repository
|
||||
Update Complete. ⎈Happy Helming!⎈
|
||||
@@ -90,3 +117,435 @@ TEST SUITE: None
|
||||
# Installs challenger
|
||||
$ helm install kairos-challenger kairos/kcrypt-challenger
|
||||
```
|
||||
|
||||
## Remote Attestation Flow
|
||||
|
||||
The kcrypt-challenger implements a secure TPM-based remote attestation flow for disk encryption key management. The following diagram illustrates the complete attestation process:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant TPM as TPM Hardware
|
||||
participant Client as TPM Client<br/>(Kairos Node)
|
||||
participant Challenger as Kcrypt Challenger<br/>(Server)
|
||||
participant K8s as Kubernetes API<br/>(SealedVolume/Secret)
|
||||
|
||||
Note over TPM,Client: Client Boot Process
|
||||
Client->>TPM: Extract EK (Endorsement Key)
|
||||
Client->>TPM: Generate AK (Attestation Key)
|
||||
Client->>TPM: Read PCR Values (Boot State)
|
||||
|
||||
Note over Client,Challenger: 1. Connection Establishment
|
||||
Client->>Challenger: WebSocket connection with partition info<br/>(label, device, UUID)
|
||||
Challenger->>Client: Connection established
|
||||
|
||||
Note over Client,Challenger: 2. TPM Authentication (Challenge-Response)
|
||||
Client->>Challenger: Send EK + AK attestation data
|
||||
Challenger->>Challenger: Decode EK/AK, compute TPM hash
|
||||
Challenger->>Challenger: Generate cryptographic challenge
|
||||
Challenger->>Client: Send challenge (encrypted with EK)
|
||||
Client->>TPM: Decrypt challenge using private EK
|
||||
Client->>TPM: Sign response using private AK
|
||||
Client->>Challenger: Send proof response + PCR quote
|
||||
Challenger->>Challenger: Verify challenge response
|
||||
|
||||
Note over Challenger,K8s: 3. Enrollment Context Determination
|
||||
Challenger->>K8s: List SealedVolumes by TPM hash
|
||||
K8s->>Challenger: Return existing volumes (if any)
|
||||
|
||||
alt New Enrollment (TOFU - Trust On First Use)
|
||||
Note over Challenger,K8s: 4a. Initial TOFU Enrollment
|
||||
Challenger->>Challenger: Skip attestation verification (TOFU)
|
||||
Challenger->>Challenger: Generate secure passphrase
|
||||
Challenger->>K8s: Create/reuse Kubernetes Secret
|
||||
Challenger->>Challenger: Create attestation spec (store ALL PCRs)
|
||||
Challenger->>K8s: Create SealedVolume with attestation data
|
||||
K8s->>Challenger: Confirm resource creation
|
||||
else Existing Enrollment
|
||||
Note over Challenger,K8s: 4b. Selective Verification & Re-enrollment
|
||||
Challenger->>Challenger: Check if TPM is quarantined
|
||||
alt TPM Quarantined
|
||||
Challenger->>Client: Security rejection (access denied)
|
||||
else TPM Not Quarantined
|
||||
Note over Challenger: Selective Attestation Verification
|
||||
Challenger->>Challenger: Verify AK using selective enrollment:<br/>• Empty AK = re-enrollment mode (accept any)<br/>• Set AK = enforcement mode (exact match)
|
||||
Challenger->>Challenger: Verify PCRs using selective enrollment:<br/>• Empty PCR = re-enrollment mode (accept + update)<br/>• Set PCR = enforcement mode (exact match)<br/>• Omitted PCR = skip verification entirely
|
||||
alt Verification Failed
|
||||
Challenger->>Client: Security rejection (attestation failed)
|
||||
else Verification Passed
|
||||
Challenger->>Challenger: Update empty fields with current values
|
||||
Challenger->>K8s: Update SealedVolume (if changes made)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Note over Challenger,K8s: 5. Passphrase Retrieval & Delivery
|
||||
Challenger->>K8s: Get Kubernetes Secret by name/path
|
||||
K8s->>Challenger: Return encrypted passphrase
|
||||
Challenger->>Client: Send passphrase securely
|
||||
|
||||
Note over TPM,Client: 6. Disk Decryption
|
||||
Client->>Client: Use passphrase to decrypt disk partition
|
||||
Client->>Challenger: Close WebSocket connection
|
||||
|
||||
Note over TPM,Client: Success - Node continues boot process
|
||||
```
|
||||
|
||||
### Flow Explanation
|
||||
|
||||
1. **Connection Establishment**: Client establishes WebSocket connection with partition metadata
|
||||
2. **TPM Authentication**: Cryptographic challenge-response proves client controls the TPM hardware
|
||||
3. **Enrollment Determination**: Server checks if this TPM is already enrolled
|
||||
4. **Security Verification**:
|
||||
- **TOFU**: New TPMs are automatically enrolled (Trust On First Use)
|
||||
- **Selective Enrollment**: Existing TPMs undergo flexible verification based on field states
|
||||
5. **Passphrase Delivery**: Encrypted disk passphrase is securely delivered to authenticated client
|
||||
|
||||
### Selective Enrollment States
|
||||
|
||||
| Field State | Verification | Updates | Use Case |
|
||||
|-------------|-------------|---------|----------|
|
||||
| **Empty** (`""`) | ✅ Accept any value | ✅ Update with current | Re-learn after TPM/firmware changes |
|
||||
| **Set** (`"abc123"`) | ✅ Enforce exact match | ❌ No updates | Strict security enforcement |
|
||||
| **Omitted** (deleted) | ❌ Skip entirely | ❌ Never re-enrolled | Ignore volatile PCRs (e.g., PCR 11) |
|
||||
|
||||
## Selective Enrollment Mode for TPM Attestation
|
||||
|
||||
The kcrypt-challenger implements a sophisticated "selective enrollment mode" that solves operational challenges in real-world TPM-based disk encryption deployments. This feature provides flexible attestation management while maintaining strong security guarantees.
|
||||
|
||||
### Key Features
|
||||
|
||||
- Full selective enrollment with three field states (empty, set, omitted)
|
||||
- Trust On First Use (TOFU) automatic enrollment
|
||||
- Secret reuse after SealedVolume recreation
|
||||
- PCR re-enrollment for kernel upgrades
|
||||
- PCR omission for volatile boot stages
|
||||
- Early quarantine checking with fail-fast behavior
|
||||
|
||||
### How Selective Enrollment Works
|
||||
|
||||
The system supports two distinct enrollment behaviors:
|
||||
|
||||
#### **Initial TOFU Enrollment** (No SealedVolume exists)
|
||||
- **Store ALL PCRs** provided by the client (don't omit any)
|
||||
- Create complete attestation baseline from first contact
|
||||
- Enables full security verification for subsequent attestations
|
||||
|
||||
#### **Selective Re-enrollment** (SealedVolume exists with specific fields)
|
||||
- **Empty values** (`""`) = Accept any value, update the stored value (re-enrollment mode)
|
||||
- **Set values** (`"abc123..."`) = Enforce exact match (enforcement mode)
|
||||
- **Omitted fields** = Skip verification entirely (ignored mode)
|
||||
|
||||
**Selective Enrollment Behavior Summary:**
|
||||
|
||||
| Field State | Verification | Updates | Use Case |
|
||||
|-------------|-------------|---------|----------|
|
||||
| **Empty** (`""`) | ✅ Accept any value | ✅ Update with current | Re-learn after TPM/firmware changes |
|
||||
| **Set** (`"abc123"`) | ✅ Enforce exact match | ❌ No updates | Strict security enforcement |
|
||||
| **Omitted** (deleted) | ❌ Skip entirely | ❌ Never re-enrolled | Ignore volatile PCRs (e.g., PCR 11) |
|
||||
|
||||
### SealedVolume API Examples
|
||||
|
||||
#### **Example 1: Initial TOFU Enrollment**
|
||||
When no SealedVolume exists, the server automatically creates one with ALL received PCRs:
|
||||
|
||||
```yaml
|
||||
# Server creates this automatically during TOFU enrollment
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
spec:
|
||||
TPMHash: "computed-from-client"
|
||||
attestation:
|
||||
ekPublicKey: "learned-ek" # Learned from client
|
||||
akPublicKey: "learned-ak" # Learned from client
|
||||
pcrValues:
|
||||
pcrs:
|
||||
"0": "abc123..." # All received PCRs stored
|
||||
"7": "def456..."
|
||||
"11": "ghi789..." # Including PCR 11 if provided
|
||||
```
|
||||
|
||||
#### **Example 2: Selective Re-enrollment Control**
|
||||
Operators can control which fields allow re-enrollment:
|
||||
|
||||
```yaml
|
||||
# Operator-controlled selective enforcement
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
spec:
|
||||
TPMHash: "required-tpm-hash" # MUST be set for client matching
|
||||
attestation:
|
||||
ekPublicKey: "" # Empty = re-enrollment mode
|
||||
akPublicKey: "fixed-ak" # Set = enforce this value
|
||||
pcrValues:
|
||||
pcrs:
|
||||
"0": "" # Empty = re-enrollment mode
|
||||
"7": "fixed-value" # Set = enforce this value
|
||||
# "11": omitted # Omitted = skip entirely
|
||||
```
|
||||
|
||||
### Use Cases Solved
|
||||
|
||||
1. **Pure TOFU**: No SealedVolume exists → System learns ALL attestation data from first contact
|
||||
2. **Static Passphrase Tests**: Create Secret + SealedVolume with TPM hash, let TOFU handle attestation data
|
||||
3. **Production Manual Setup**: Operators set known passphrases + TPM hashes, system learns remaining security data
|
||||
4. **Firmware Upgrades**: Set PCR 0 to empty to re-learn after BIOS updates
|
||||
5. **TPM Replacement**: Set AK/EK fields to empty to re-learn after hardware changes
|
||||
6. **Flexible Boot Stages**: Omit PCR 11 entirely so users can decrypt during boot AND after full system startup
|
||||
7. **Kernel Updates**: Omit PCR 11 to avoid quarantine on routine Kairos upgrades
|
||||
|
||||
### Practical Operator Workflows
|
||||
|
||||
#### **Scenario 1: Reusing Existing Passphrases After SealedVolume Recreation**
|
||||
|
||||
**Problem**: An operator needs to recreate a SealedVolume (e.g., after accidental deletion or configuration changes) but wants to keep using the existing passphrase to avoid re-encrypting the disk.
|
||||
|
||||
**Solution**: The system automatically reuses existing Kubernetes secrets when available:
|
||||
|
||||
```bash
|
||||
# 1. Operator accidentally deletes SealedVolume
|
||||
kubectl delete sealedvolume my-encrypted-volume
|
||||
|
||||
# 2. Original secret still exists in cluster
|
||||
kubectl get secret my-encrypted-volume-encrypted-data
|
||||
# NAME TYPE DATA AGE
|
||||
# my-encrypted-volume-encrypted-data Opaque 1 5d
|
||||
|
||||
# 3. When TPM client reconnects, system detects existing secret
|
||||
# and reuses the passphrase instead of generating a new one
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Detect the existing secret with the same name
|
||||
- Log: "Secret already exists, reusing existing secret"
|
||||
- Use the existing passphrase for decryption
|
||||
- Recreate the SealedVolume with current TPM attestation data
|
||||
- Maintain continuity without requiring disk re-encryption
|
||||
|
||||
#### **Scenario 2: Deliberately Skipping PCRs After Initial Enrollment**
|
||||
|
||||
**Problem**: An operator initially enrolls with PCRs 0, 7, and 11, but later realizes PCR 11 changes frequently due to kernel updates and wants to ignore it permanently.
|
||||
|
||||
**Solution**: Remove the PCR from the SealedVolume specification:
|
||||
|
||||
```bash
|
||||
# 1. Initial enrollment created SealedVolume with:
|
||||
# pcrValues:
|
||||
# pcrs:
|
||||
# "0": "abc123..."
|
||||
# "7": "def456..."
|
||||
# "11": "ghi789..."
|
||||
|
||||
# 2. Operator edits SealedVolume to remove PCR 11 entirely
|
||||
kubectl edit sealedvolume my-encrypted-volume
|
||||
# Remove the "11": "ghi789..." line completely
|
||||
|
||||
# 3. Result - omitted PCR 11:
|
||||
# pcrValues:
|
||||
# pcrs:
|
||||
# "0": "abc123..."
|
||||
# "7": "def456..."
|
||||
# # PCR 11 omitted = ignored entirely
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Skip PCR 11 verification entirely (no enforcement)
|
||||
- Never re-enroll PCR 11 in future attestations
|
||||
- Log: "PCR verification successful using selective enrollment" (without mentioning PCR 11)
|
||||
- Continue enforcing PCRs 0 and 7 normally
|
||||
|
||||
#### **Scenario 3: Manual PCR Selection During Initial Setup**
|
||||
|
||||
**Problem**: An operator knows certain PCRs will be unstable and wants to exclude them from the beginning.
|
||||
|
||||
**Solution**: Create the initial SealedVolume manually with only desired PCRs:
|
||||
|
||||
```yaml
|
||||
# Create SealedVolume with selective PCR enforcement from the start
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: selective-pcr-volume
|
||||
spec:
|
||||
TPMHash: "known-tpm-hash"
|
||||
partitions:
|
||||
- label: "encrypted-data"
|
||||
secret:
|
||||
name: "my-passphrase"
|
||||
path: "passphrase"
|
||||
attestation:
|
||||
ekPublicKey: "" # Re-enrollment mode
|
||||
akPublicKey: "" # Re-enrollment mode
|
||||
pcrValues:
|
||||
pcrs:
|
||||
"0": "" # Re-enrollment mode (will learn)
|
||||
"7": "" # Re-enrollment mode (will learn)
|
||||
# "11": omitted # Skip PCR 11 entirely
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Learn and enforce PCRs 0 and 7 on first attestation
|
||||
- Completely ignore PCR 11 (never verify, never store)
|
||||
- Allow flexible boot stages without PCR 11 interference
|
||||
|
||||
#### **Scenario 4: Kernel Upgrade - Temporary PCR Re-enrollment**
|
||||
|
||||
**Problem**: An operator is performing a kernel upgrade and knows PCR 11 will change, but wants to continue enforcing it after the upgrade (unlike permanent omission).
|
||||
|
||||
**Solution**: Set the PCR value to empty string to trigger re-enrollment mode:
|
||||
|
||||
```bash
|
||||
# 1. Before kernel upgrade - PCR 11 is currently enforced
|
||||
kubectl get sealedvolume my-volume -o jsonpath='{.spec.attestation.pcrValues.pcrs.11}'
|
||||
# Output: "abc123def456..." (current PCR 11 value)
|
||||
|
||||
# 2. Set PCR 11 to empty string to allow re-enrollment
|
||||
kubectl patch sealedvolume my-volume --type='merge' \
|
||||
-p='{"spec":{"attestation":{"pcrValues":{"pcrs":{"11":""}}}}}'
|
||||
|
||||
# 3. Perform kernel upgrade and reboot
|
||||
|
||||
# 4. After reboot, TPM client reconnects and system learns new PCR 11 value
|
||||
# Log will show: "Updated PCR value during selective enrollment, pcr: 11"
|
||||
|
||||
# 5. Verify new PCR 11 value is now enforced
|
||||
kubectl get sealedvolume my-volume -o jsonpath='{.spec.attestation.pcrValues.pcrs.11}'
|
||||
# Output: "new789xyz012..." (new PCR 11 value after kernel upgrade)
|
||||
```
|
||||
|
||||
**Behavior**: The system will:
|
||||
- Accept any PCR 11 value on next attestation (re-enrollment mode)
|
||||
- Update the stored PCR 11 with the new post-upgrade value
|
||||
- Resume strict PCR 11 enforcement with the new value
|
||||
- Log: "Updated PCR value during selective enrollment"
|
||||
|
||||
**Key Difference from Scenario 2:**
|
||||
- **Scenario 2 (Omit PCR)**: PCR 11 permanently ignored, never verified again
|
||||
- **Scenario 4 (Empty PCR)**: PCR 11 temporarily re-enrolled, then enforced with new value
|
||||
|
||||
### Security Architecture
|
||||
|
||||
- **TPM Hash is mandatory** - prevents multiple clients from matching the same SealedVolume
|
||||
- **EK verification remains strict** - only AK and PCRs support selective enrollment modes
|
||||
- **Early quarantine checking** - quarantined TPMs are rejected immediately after authentication
|
||||
- **Comprehensive logging** - all enrollment events are logged for audit trails
|
||||
- **Challenge-response authentication** - prevents TPM impersonation attacks
|
||||
|
||||
### Quick Reference for Documentation
|
||||
|
||||
**Common Operations:**
|
||||
|
||||
```bash
|
||||
# Skip a PCR permanently (never verify again)
|
||||
kubectl edit sealedvolume my-volume
|
||||
# Remove the PCR line entirely from pcrValues.pcrs
|
||||
|
||||
# Temporarily allow PCR re-enrollment (e.g., before kernel upgrade)
|
||||
kubectl patch sealedvolume my-volume --type='merge' -p='{"spec":{"attestation":{"pcrValues":{"pcrs":{"11":""}}}}}'
|
||||
|
||||
# Re-learn a PCR after hardware change (e.g., PCR 0 after BIOS update)
|
||||
kubectl patch sealedvolume my-volume --type='merge' -p='{"spec":{"attestation":{"pcrValues":{"pcrs":{"0":""}}}}}'
|
||||
|
||||
# Re-learn AK after TPM replacement
|
||||
kubectl patch sealedvolume my-volume --type='merge' -p='{"spec":{"attestation":{"akPublicKey":""}}}'
|
||||
|
||||
# Check current PCR enforcement status
|
||||
kubectl get sealedvolume my-volume -o jsonpath='{.spec.attestation.pcrValues.pcrs}' | jq .
|
||||
```
|
||||
|
||||
**Log Messages to Expect:**
|
||||
|
||||
- `"Secret already exists, reusing existing secret"` - Passphrase reuse scenario
|
||||
- `"Updated PCR value during selective enrollment"` - Re-enrollment mode active
|
||||
- `"PCR verification successful using selective enrollment"` - Omitted PCRs ignored
|
||||
- `"PCR enforcement mode verification passed"` - Strict enforcement active
|
||||
|
||||
## ✅ E2E Testing Coverage for Selective Enrollment
|
||||
|
||||
### Status: ✅ COMPLETED
|
||||
Comprehensive E2E test suite has been implemented covering all selective enrollment scenarios. The test suite is optimized for efficiency using VM reuse patterns to minimize execution time while maintaining thorough coverage.
|
||||
|
||||
### ✅ Implemented E2E Test Scenarios
|
||||
|
||||
#### **1. Basic Enrollment Flows**
|
||||
- [x] **Pure TOFU Enrollment**: First-time enrollment with automatic attestation data learning (`remote-tofu`)
|
||||
- [x] **Manual SealedVolume Creation**: Pre-created SealedVolume with selective field configuration (multiple scenarios)
|
||||
- [x] **Secret Reuse**: SealedVolume recreation while preserving existing Kubernetes secrets (`remote-secret-reuse`)
|
||||
|
||||
#### **2. Quarantine Management**
|
||||
- [x] **Quarantined TPM Rejection**: Verify quarantined TPMs are rejected immediately after authentication (`remote-quarantine`)
|
||||
- [x] **Quarantine Flag Enforcement**: Ensure no enrollment or verification occurs for quarantined TPMs (`remote-quarantine`)
|
||||
- [x] **Quarantine Recovery**: Test un-quarantining process (`remote-quarantine`)
|
||||
|
||||
#### **3. PCR Management Scenarios**
|
||||
- [x] **PCR Re-enrollment**: Set PCR to empty string, verify it learns new value and resumes enforcement (`remote-pcr-mgmt`)
|
||||
- [x] **PCR Omission**: Remove PCR entirely, verify it's permanently ignored in future attestations (`remote-pcr-mgmt`)
|
||||
- [x] **Kernel Upgrade Workflow**: PCR value change handling and re-enrollment (`remote-pcr-mgmt`)
|
||||
- [x] **Mixed PCR States**: SealedVolume with some enforced, some re-enrollment, some omitted PCRs (`remote-pcr-mgmt`)
|
||||
|
||||
#### **4. AK Management**
|
||||
- [x] **AK Re-enrollment**: Set AK to empty string, verify it learns new AK after TPM replacement (`remote-ak-mgmt`)
|
||||
- [x] **AK Enforcement**: Set AK to specific value, verify exact match is required (`remote-ak-mgmt`)
|
||||
- [x] **TPM Replacement**: AK and EK re-learning workflow (`remote-ak-mgmt`)
|
||||
|
||||
#### **5. Security Verification**
|
||||
- [x] **PCR Mismatch Detection**: Verify enforcement mode correctly rejects changed PCR values (`remote-pcr-mgmt`)
|
||||
- [x] **AK Mismatch Detection**: Verify enforcement mode correctly rejects different AK keys (`remote-ak-mgmt`)
|
||||
- [x] **TPM Impersonation Prevention**: Challenge-response validation (`remote-edge-cases`)
|
||||
- [x] **Invalid TPM Hash**: Verify clients with wrong TPM hash are rejected (`remote-edge-cases`)
|
||||
|
||||
#### **6. Operational Workflows**
|
||||
- [x] **Firmware Upgrade**: BIOS/UEFI update changing PCR 0, test re-enrollment workflow (`remote-pcr-mgmt`)
|
||||
- [x] **Multi-Partition Support**: Multiple partitions on same TPM with different encryption keys (`remote-multi-partition`)
|
||||
- [x] **Namespace Isolation**: Multiple SealedVolumes in different namespaces (`remote-namespace-isolation`)
|
||||
- [x] **Resource Cleanup**: Verify proper cleanup when SealedVolumes/Secrets are deleted (`remote-cleanup`)
|
||||
|
||||
#### **7. Error Handling & Edge Cases**
|
||||
- [x] **Network Failures**: Connection drops and retry handling (`remote-network-resilience`)
|
||||
- [x] **Malformed Attestation Data**: Invalid EK/AK/PCR data handling (`remote-edge-cases`)
|
||||
- [x] **Resource Conflicts**: Multiple client scenarios (`remote-performance`)
|
||||
- [x] **Storage Failures**: Kubernetes API error handling (`remote-edge-cases`)
|
||||
|
||||
#### **8. Performance & Scalability**
|
||||
- [x] **Concurrent Attestations**: Multiple TPMs requesting passphrases simultaneously (`remote-performance`)
|
||||
- [x] **Large PCR Sets**: Attestation with many PCRs (0-15) (`remote-large-pcr`)
|
||||
- [x] **Long-Running Stability**: Extended operation through multiple test cycles (`remote-performance`)
|
||||
|
||||
#### **9. Logging & Observability**
|
||||
- [x] **Audit Trail Verification**: Security events logging validation (integrated across all tests)
|
||||
- [x] **Log Message Accuracy**: Expected log messages verification (integrated across all tests)
|
||||
- [x] **Metrics Collection**: Performance monitoring during tests (integrated across all tests)
|
||||
|
||||
#### **10. Compatibility Testing**
|
||||
- [x] **TPM 2.0 Compatibility**: Software TPM emulation with TPM 2.0 (all tests use `swtpm`)
|
||||
- [x] **Kernel Variations**: PCR behavior testing across different scenarios (`remote-large-pcr`)
|
||||
- [x] **Hardware Variations**: TPM emulation covering different chip behaviors (via `swtpm`)
|
||||
|
||||
### Test Implementation Details
|
||||
|
||||
The comprehensive test suite includes:
|
||||
|
||||
- **18 Test Labels**: Covering all scenarios from basic to advanced
|
||||
- **3 Test Files**: Organized by complexity and VM reuse optimization
|
||||
- **VM Reuse Pattern**: Reduces test time from ~40 minutes to ~20 minutes
|
||||
- **Real TPM Emulation**: Uses `swtpm` for realistic TPM behavior
|
||||
- **GitHub Workflow Integration**: All tests run in CI/CD pipeline
|
||||
|
||||
See [`tests/README.md`](tests/README.md) for detailed test documentation and usage instructions.
|
||||
|
||||
### Test Environment Requirements
|
||||
|
||||
- **Real TPM Hardware**: Software TPM simulators may not catch hardware-specific issues
|
||||
- **Kernel Build Pipeline**: Ability to test actual kernel upgrades and PCR changes
|
||||
- **Multi-Node Clusters**: Test distributed scenarios and namespace isolation
|
||||
- **Network Partitioning**: Test resilience under network failures
|
||||
- **Performance Monitoring**: Metrics collection for scalability validation
|
||||
|
||||
### Success Criteria
|
||||
|
||||
All E2E tests must pass consistently across:
|
||||
- Different hardware configurations (various TPM chips)
|
||||
- Multiple kernel versions (to test PCR 11 variability)
|
||||
- Various cluster configurations (single-node, multi-node)
|
||||
- Different load conditions (single client, concurrent clients)
|
||||
|
||||
Completing this E2E test suite will provide confidence that the selective enrollment system works reliably in production environments.
|
||||
|
@@ -23,11 +23,39 @@ import (
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// PCRValues represents Platform Configuration Register values for boot state verification
|
||||
// Uses a flexible map where keys are PCR indices (as strings) and values are hex-encoded PCR values
|
||||
type PCRValues struct {
|
||||
// PCRs is a flexible map of PCR index (as string) to PCR value (hex-encoded)
|
||||
// Example: {"0": "a1b2c3...", "7": "d4e5f6...", "11": "g7h8i9..."}
|
||||
// This allows for any combination of PCRs without hardcoding specific indices
|
||||
PCRs map[string]string `json:"pcrs,omitempty"`
|
||||
}
|
||||
|
||||
// AttestationSpec defines TPM attestation data for TOFU enrollment and verification
|
||||
type AttestationSpec struct {
|
||||
// EKPublicKey stores the Endorsement Key public key in PEM format
|
||||
EKPublicKey string `json:"ekPublicKey,omitempty"`
|
||||
|
||||
// AKPublicKey stores the Attestation Key public key in PEM format
|
||||
AKPublicKey string `json:"akPublicKey,omitempty"`
|
||||
|
||||
// PCRValues stores the expected PCR values for boot state verification
|
||||
PCRValues *PCRValues `json:"pcrValues,omitempty"`
|
||||
|
||||
// EnrolledAt timestamp when this TPM was first enrolled
|
||||
EnrolledAt *metav1.Time `json:"enrolledAt,omitempty"`
|
||||
|
||||
// LastVerifiedAt timestamp of the last successful attestation
|
||||
LastVerifiedAt *metav1.Time `json:"lastVerifiedAt,omitempty"`
|
||||
}
|
||||
|
||||
// SealedVolumeSpec defines the desired state of SealedVolume
|
||||
type SealedVolumeSpec struct {
|
||||
TPMHash string `json:"TPMHash,omitempty"`
|
||||
Partitions []PartitionSpec `json:"partitions,omitempty"`
|
||||
Quarantined bool `json:"quarantined,omitempty"`
|
||||
TPMHash string `json:"TPMHash,omitempty"`
|
||||
Partitions []PartitionSpec `json:"partitions,omitempty"`
|
||||
Quarantined bool `json:"quarantined,omitempty"`
|
||||
Attestation *AttestationSpec `json:"attestation,omitempty"`
|
||||
}
|
||||
|
||||
// PartitionSpec defines a Partition. A partition can be identified using
|
||||
|
@@ -25,6 +25,56 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AttestationSpec) DeepCopyInto(out *AttestationSpec) {
|
||||
*out = *in
|
||||
if in.PCRValues != nil {
|
||||
in, out := &in.PCRValues, &out.PCRValues
|
||||
*out = new(PCRValues)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EnrolledAt != nil {
|
||||
in, out := &in.EnrolledAt, &out.EnrolledAt
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.LastVerifiedAt != nil {
|
||||
in, out := &in.LastVerifiedAt, &out.LastVerifiedAt
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttestationSpec.
|
||||
func (in *AttestationSpec) DeepCopy() *AttestationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AttestationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PCRValues) DeepCopyInto(out *PCRValues) {
|
||||
*out = *in
|
||||
if in.PCRs != nil {
|
||||
in, out := &in.PCRs, &out.PCRs
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PCRValues.
|
||||
func (in *PCRValues) DeepCopy() *PCRValues {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PCRValues)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PartitionSpec) DeepCopyInto(out *PartitionSpec) {
|
||||
*out = *in
|
||||
@@ -114,6 +164,11 @@ func (in *SealedVolumeSpec) DeepCopyInto(out *SealedVolumeSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Attestation != nil {
|
||||
in, out := &in.Attestation, &out.Attestation
|
||||
*out = new(AttestationSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SealedVolumeSpec.
|
||||
|
374
cmd/discovery/cli_test.go
Normal file
374
cmd/discovery/cli_test.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestCLI(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Discovery CLI Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("CLI Interface", func() {
|
||||
BeforeEach(func() {
|
||||
// Clean up any previous log files
|
||||
_ = os.Remove("/tmp/kcrypt-challenger-client.log")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Clean up log files
|
||||
_ = os.Remove("/tmp/kcrypt-challenger-client.log")
|
||||
})
|
||||
|
||||
Context("CLI help", func() {
|
||||
It("should show help when --help is used", func() {
|
||||
err := ExecuteWithArgs([]string{"--help"})
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
// We can't easily test the output content without complex output capture,
|
||||
// but we can verify the function executes without error
|
||||
})
|
||||
})
|
||||
|
||||
Context("Input validation", func() {
|
||||
It("should require all partition parameters for get command", func() {
|
||||
err := ExecuteWithArgs([]string{"get"})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
// Should return an error when required parameters are missing
|
||||
})
|
||||
|
||||
It("should validate that all required fields are provided for get command", func() {
|
||||
// Test with valid partition parameters
|
||||
err := ExecuteWithArgs([]string{"get", "--partition-name=/dev/sda2"})
|
||||
Expect(err).To(HaveOccurred()) // Should fail at client connection but parsing should work
|
||||
|
||||
// Test with valid UUID
|
||||
err = ExecuteWithArgs([]string{"get", "--partition-uuid=12345"})
|
||||
Expect(err).To(HaveOccurred()) // Should fail at client connection but parsing should work
|
||||
})
|
||||
|
||||
It("should handle invalid flags gracefully", func() {
|
||||
err := ExecuteWithArgs([]string{"--invalid-flag"})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
// Should return an error for invalid flags
|
||||
})
|
||||
})
|
||||
|
||||
Context("Flow detection and backend integration", func() {
|
||||
It("should attempt to get passphrase with valid parameters", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid-12345",
|
||||
"--partition-label=test-label",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// We expect this to fail since there's no server, but it should reach the backend logic
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Should show flow detection in the log (if created)
|
||||
logContent, readErr := os.ReadFile("/tmp/kcrypt-challenger-client.log")
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should contain flow detection message
|
||||
Expect(logStr).To(ContainSubstring("flow"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should use the correct backend client logic", func() {
|
||||
// Test that the CLI mode uses the same GetPassphrase method
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail but attempt to use the client
|
||||
Expect(err).To(HaveOccurred())
|
||||
// The important thing is that it reaches the backend and doesn't crash
|
||||
})
|
||||
})
|
||||
|
||||
Context("Configuration overrides with debug logging", func() {
|
||||
var tempDir string
|
||||
var originalLogFile string
|
||||
var testLogFile string
|
||||
var configDir string
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a temporary directory for this test
|
||||
var err error
|
||||
tempDir, err = os.MkdirTemp("", "kcrypt-test-*")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Use /tmp/oem since it's already in confScanDirs
|
||||
configDir = "/tmp/oem"
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create a test configuration file with known values
|
||||
configContent := `kcrypt:
|
||||
challenger:
|
||||
challenger_server: "https://default-server.com:8080"
|
||||
mdns: false
|
||||
certificate: "/default/path/to/cert.pem"
|
||||
nv_index: "0x1500000"
|
||||
c_index: "0x1400000"
|
||||
tpm_device: "/dev/tpm0"
|
||||
`
|
||||
configFile := filepath.Join(configDir, "kairos.yaml")
|
||||
err = os.WriteFile(configFile, []byte(configContent), 0644)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Override the log file location for testing
|
||||
originalLogFile = os.Getenv("KAIROS_LOG_FILE")
|
||||
testLogFile = filepath.Join(tempDir, "kcrypt-discovery-challenger.log")
|
||||
os.Setenv("KAIROS_LOG_FILE", testLogFile)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Restore original log file setting
|
||||
if originalLogFile != "" {
|
||||
os.Setenv("KAIROS_LOG_FILE", originalLogFile)
|
||||
} else {
|
||||
os.Unsetenv("KAIROS_LOG_FILE")
|
||||
}
|
||||
|
||||
// Clean up config file
|
||||
_ = os.RemoveAll(configDir)
|
||||
|
||||
// Clean up temporary directory
|
||||
_ = os.RemoveAll(tempDir)
|
||||
})
|
||||
|
||||
It("should read and use original configuration values without overrides", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but config parsing should work
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check that original configuration values are logged
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should show original configuration values from the file
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://default-server.com:8080"))
|
||||
Expect(logStr).To(ContainSubstring("false")) // mdns value
|
||||
Expect(logStr).To(ContainSubstring("/default/path/to/cert.pem"))
|
||||
// Should also show final configuration (which should be the same as original)
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
// Should NOT contain any override messages since no flags were provided
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding certificate"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should show configuration file values being overridden by CLI flags", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--challenger-server=https://overridden-server.com:9999",
|
||||
"--mdns=true",
|
||||
"--certificate=/overridden/cert.pem",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but config parsing and overrides should work
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check that both original and overridden values are logged
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should show original configuration values from the file
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://default-server.com:8080"))
|
||||
Expect(logStr).To(ContainSubstring("/default/path/to/cert.pem"))
|
||||
|
||||
// Should show override messages
|
||||
Expect(logStr).To(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).To(ContainSubstring("https://default-server.com:8080 -> https://overridden-server.com:9999"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).To(ContainSubstring("false -> true"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding certificate"))
|
||||
|
||||
// Should show final configuration with overridden values
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://overridden-server.com:9999"))
|
||||
Expect(logStr).To(ContainSubstring("/overridden/cert.pem"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should apply CLI flag overrides and log configuration changes", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--challenger-server=https://custom-server.com:8082",
|
||||
"--mdns=true",
|
||||
"--certificate=/path/to/cert.pem",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but flag parsing should work
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check if debug log exists and contains configuration information
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should contain debug information about configuration overrides
|
||||
Expect(logStr).To(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).To(ContainSubstring("https://custom-server.com:8082"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).To(ContainSubstring("Overriding certificate"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should show original vs final configuration in debug mode", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--challenger-server=https://override-server.com:9999",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail but debug information should be logged
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check for original and final configuration logging
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
Expect(logStr).To(ContainSubstring("https://override-server.com:9999"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should log partition details in debug mode", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/custom-partition",
|
||||
"--partition-uuid=custom-uuid-123",
|
||||
"--partition-label=custom-label-456",
|
||||
"--debug",
|
||||
"--attempts=2",
|
||||
})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check for partition details in debug log
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
Expect(logStr).To(ContainSubstring("Partition details"))
|
||||
Expect(logStr).To(ContainSubstring("/dev/custom-partition"))
|
||||
Expect(logStr).To(ContainSubstring("custom-uuid-123"))
|
||||
Expect(logStr).To(ContainSubstring("custom-label-456"))
|
||||
Expect(logStr).To(ContainSubstring("Attempts: 2"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should not log debug information without debug flag", func() {
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Debug log should not exist or should not contain detailed debug info
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should not contain debug-level details
|
||||
Expect(logStr).NotTo(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Partition details"))
|
||||
}
|
||||
})
|
||||
|
||||
It("should handle missing configuration file gracefully and show defaults", func() {
|
||||
// Remove the config file to test default behavior
|
||||
_ = os.RemoveAll(configDir)
|
||||
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/test",
|
||||
"--partition-uuid=test-uuid",
|
||||
"--partition-label=test-label",
|
||||
"--debug",
|
||||
"--attempts=1",
|
||||
})
|
||||
|
||||
// Should fail at passphrase retrieval but not due to config parsing
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Check that default/empty configuration values are logged
|
||||
logContent, readErr := os.ReadFile(testLogFile)
|
||||
if readErr == nil {
|
||||
logStr := string(logContent)
|
||||
// Should show original configuration (which should be empty/defaults)
|
||||
Expect(logStr).To(ContainSubstring("Original configuration"))
|
||||
Expect(logStr).To(ContainSubstring("Final configuration"))
|
||||
// Should NOT contain override messages since no flags were provided
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding server URL"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding MDNS setting"))
|
||||
Expect(logStr).NotTo(ContainSubstring("Overriding certificate"))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Context("CLI argument parsing", func() {
|
||||
It("should parse all arguments correctly", func() {
|
||||
// This will fail at the client creation/server connection,
|
||||
// but should successfully parse all arguments
|
||||
err := ExecuteWithArgs([]string{
|
||||
"get",
|
||||
"--partition-name=/dev/custom",
|
||||
"--partition-uuid=custom-uuid-999",
|
||||
"--partition-label=custom-label",
|
||||
"--attempts=5",
|
||||
})
|
||||
|
||||
Expect(err).To(HaveOccurred()) // Fails due to no server
|
||||
// The important thing is that flag parsing worked and it reached the backend
|
||||
})
|
||||
|
||||
It("should handle boolean flags correctly", func() {
|
||||
// Test help flag
|
||||
err := ExecuteWithArgs([]string{"--help"})
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
@@ -1,37 +1,50 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-attestation/attest"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/jaypipes/ghw/pkg/block"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/payload"
|
||||
"github.com/kairos-io/kairos-sdk/kcrypt/bus"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
"github.com/kairos-io/tpm-helpers"
|
||||
"github.com/mudler/go-pluggable"
|
||||
"github.com/mudler/yip/pkg/utils"
|
||||
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
)
|
||||
|
||||
// Because of how go-pluggable works, we can't just print to stdout
|
||||
const LOGFILE = "/tmp/kcrypt-challenger-client.log"
|
||||
|
||||
// Retry delays for different failure types
|
||||
const (
|
||||
TPMRetryDelay = 100 * time.Millisecond // Brief delay for TPM hardware busy/unavailable
|
||||
NetworkRetryDelay = 1 * time.Second // Longer delay for network/server issues
|
||||
)
|
||||
|
||||
var errPartNotFound error = fmt.Errorf("pass for partition not found")
|
||||
var errBadCertificate error = fmt.Errorf("unknown certificate")
|
||||
|
||||
func NewClient() (*Client, error) {
|
||||
return NewClientWithLogger(types.NewKairosLogger("kcrypt-challenger-client", "error", false))
|
||||
}
|
||||
|
||||
func NewClientWithLogger(logger types.KairosLogger) (*Client, error) {
|
||||
conf, err := unmarshalConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{Config: conf}, nil
|
||||
return &Client{Config: conf, Logger: logger}, nil
|
||||
}
|
||||
|
||||
// ❯ echo '{ "data": "{ \\"label\\": \\"LABEL\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
|
||||
func (c *Client) Start() error {
|
||||
if err := os.RemoveAll(LOGFILE); err != nil { // Start fresh
|
||||
return fmt.Errorf("removing the logfile: %w", err)
|
||||
@@ -51,7 +64,8 @@ func (c *Client) Start() error {
|
||||
}
|
||||
}
|
||||
|
||||
pass, err := c.waitPass(b, 30)
|
||||
// Use the extracted core logic
|
||||
pass, err := c.GetPassphrase(b, 30)
|
||||
if err != nil {
|
||||
return pluggable.EventResponse{
|
||||
Error: fmt.Sprintf("failed getting pass: %s", err.Error()),
|
||||
@@ -66,36 +80,9 @@ func (c *Client) Start() error {
|
||||
return factory.Run(pluggable.EventType(os.Args[1]), os.Stdin, os.Stdout)
|
||||
}
|
||||
|
||||
func (c *Client) generatePass(postEndpoint string, headers map[string]string, p *block.Partition) error {
|
||||
|
||||
rand := utils.RandomString(32)
|
||||
pass, err := tpm.EncryptBlob([]byte(rand))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bpass := base64.RawURLEncoding.EncodeToString(pass)
|
||||
|
||||
opts := []tpm.Option{
|
||||
tpm.WithCAs([]byte(c.Config.Kcrypt.Challenger.Certificate)),
|
||||
tpm.AppendCustomCAToSystemCA,
|
||||
tpm.WithAdditionalHeader("label", p.FilesystemLabel),
|
||||
tpm.WithAdditionalHeader("name", p.Name),
|
||||
tpm.WithAdditionalHeader("uuid", p.UUID),
|
||||
}
|
||||
for k, v := range headers {
|
||||
opts = append(opts, tpm.WithAdditionalHeader(k, v))
|
||||
}
|
||||
|
||||
conn, err := tpm.Connection(postEndpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return conn.WriteJSON(payload.Data{Passphrase: bpass, GeneratedBy: constants.TPMSecret})
|
||||
}
|
||||
|
||||
func (c *Client) waitPass(p *block.Partition, attempts int) (pass string, err error) {
|
||||
additionalHeaders := map[string]string{}
|
||||
// ❯ echo '{ "data": "{ \\"label\\": \\"LABEL\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
|
||||
// GetPassphrase retrieves a passphrase for the given partition - core business logic
|
||||
func (c *Client) GetPassphrase(partition *block.Partition, attempts int) (string, error) {
|
||||
serverURL := c.Config.Kcrypt.Challenger.Server
|
||||
|
||||
// If we don't have any server configured, just do local
|
||||
@@ -103,44 +90,197 @@ func (c *Client) waitPass(p *block.Partition, attempts int) (pass string, err er
|
||||
return localPass(c.Config)
|
||||
}
|
||||
|
||||
additionalHeaders := map[string]string{}
|
||||
var err error
|
||||
if c.Config.Kcrypt.Challenger.MDNS {
|
||||
serverURL, additionalHeaders, err = queryMDNS(serverURL)
|
||||
serverURL, additionalHeaders, err = queryMDNS(serverURL, c.Logger)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
getEndpoint := fmt.Sprintf("%s/getPass", serverURL)
|
||||
postEndpoint := fmt.Sprintf("%s/postPass", serverURL)
|
||||
c.Logger.Debugf("Starting TPM attestation flow with server: %s", serverURL)
|
||||
return c.waitPassWithTPMAttestation(serverURL, additionalHeaders, partition, attempts)
|
||||
}
|
||||
|
||||
// waitPassWithTPMAttestation implements the new TPM remote attestation flow over WebSocket
|
||||
func (c *Client) waitPassWithTPMAttestation(serverURL string, additionalHeaders map[string]string, p *block.Partition, attempts int) (string, error) {
|
||||
attestationEndpoint := fmt.Sprintf("%s/tpm-attestation", serverURL)
|
||||
c.Logger.Debugf("Debug: TPM attestation endpoint: %s", attestationEndpoint)
|
||||
|
||||
for tries := 0; tries < attempts; tries++ {
|
||||
var generated bool
|
||||
pass, generated, err = getPass(getEndpoint, additionalHeaders, c.Config.Kcrypt.Challenger.Certificate, p)
|
||||
if err == errPartNotFound {
|
||||
// IF server doesn't have a pass for us, then we generate one and we set it
|
||||
err = c.generatePass(postEndpoint, additionalHeaders, p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Attempt to fetch again - validate that the server has it now
|
||||
tries = 0
|
||||
c.Logger.Debugf("Debug: TPM attestation attempt %d/%d", tries+1, attempts)
|
||||
|
||||
// Step 1: Initialize AK Manager
|
||||
c.Logger.Debugf("Debug: Initializing AK Manager with handle file: %s", constants.AKBlobFile)
|
||||
akManager, err := tpm.NewAKManager(tpm.WithAKHandleFile(constants.AKBlobFile))
|
||||
if err != nil {
|
||||
c.Logger.Debugf("Failed to create AK manager: %v", err)
|
||||
time.Sleep(TPMRetryDelay)
|
||||
continue
|
||||
}
|
||||
c.Logger.Debugf("Debug: AK Manager initialized successfully")
|
||||
|
||||
// Step 2: Ensure AK exists
|
||||
c.Logger.Debugf("Debug: Getting or creating AK")
|
||||
_, err = akManager.GetOrCreateAK()
|
||||
if err != nil {
|
||||
c.Logger.Debugf("Failed to get/create AK: %v", err)
|
||||
time.Sleep(TPMRetryDelay)
|
||||
continue
|
||||
}
|
||||
c.Logger.Debugf("Debug: AK obtained/created successfully")
|
||||
|
||||
// Step 3: Start WebSocket-based attestation flow
|
||||
c.Logger.Debugf("Debug: Starting WebSocket-based attestation flow")
|
||||
passphrase, err := c.performTPMAttestation(attestationEndpoint, additionalHeaders, akManager, p)
|
||||
if err != nil {
|
||||
c.Logger.Debugf("Failed TPM attestation: %v", err)
|
||||
time.Sleep(NetworkRetryDelay)
|
||||
continue
|
||||
}
|
||||
|
||||
if generated { // passphrase is encrypted
|
||||
return c.decryptPassphrase(pass)
|
||||
}
|
||||
|
||||
if err == errBadCertificate { // No need to retry, won't succeed.
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil { // passphrase available, no errors
|
||||
return
|
||||
}
|
||||
|
||||
logToFile("Failed with error: %s . Will retry.\n", err.Error())
|
||||
time.Sleep(1 * time.Second) // network errors? retry
|
||||
return passphrase, nil
|
||||
}
|
||||
|
||||
return
|
||||
return "", fmt.Errorf("exhausted all attempts (%d) for TPM attestation", attempts)
|
||||
}
|
||||
|
||||
// performTPMAttestation handles the complete attestation flow over a single WebSocket connection
|
||||
func (c *Client) performTPMAttestation(endpoint string, additionalHeaders map[string]string, akManager *tpm.AKManager, p *block.Partition) (string, error) {
|
||||
c.Logger.Debugf("Debug: Creating WebSocket connection to endpoint: %s", endpoint)
|
||||
c.Logger.Debugf("Debug: Partition details - Label: %s, Name: %s, UUID: %s", p.FilesystemLabel, p.Name, p.UUID)
|
||||
c.Logger.Debugf("Debug: Certificate length: %d", len(c.Config.Kcrypt.Challenger.Certificate))
|
||||
|
||||
// Create WebSocket connection
|
||||
opts := []tpm.Option{
|
||||
tpm.WithAdditionalHeader("label", p.FilesystemLabel),
|
||||
tpm.WithAdditionalHeader("name", p.Name),
|
||||
tpm.WithAdditionalHeader("uuid", p.UUID),
|
||||
}
|
||||
|
||||
// Only add certificate options if a certificate is provided
|
||||
if len(c.Config.Kcrypt.Challenger.Certificate) > 0 {
|
||||
c.Logger.Debugf("Debug: Adding certificate validation options")
|
||||
opts = append(opts,
|
||||
tpm.WithCAs([]byte(c.Config.Kcrypt.Challenger.Certificate)),
|
||||
tpm.AppendCustomCAToSystemCA,
|
||||
)
|
||||
} else {
|
||||
c.Logger.Debugf("Debug: No certificate provided, using insecure connection")
|
||||
}
|
||||
for k, v := range additionalHeaders {
|
||||
opts = append(opts, tpm.WithAdditionalHeader(k, v))
|
||||
}
|
||||
c.Logger.Debugf("Debug: WebSocket options configured, attempting connection...")
|
||||
|
||||
// Add connection timeout to prevent hanging indefinitely
|
||||
type connectionResult struct {
|
||||
conn interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
done := make(chan connectionResult, 1)
|
||||
|
||||
go func() {
|
||||
c.Logger.Debugf("Debug: Using tpm.AttestationConnection for new TPM flow")
|
||||
conn, err := tpm.AttestationConnection(endpoint, opts...)
|
||||
c.Logger.Debugf("Debug: tpm.AttestationConnection returned with err: %v", err)
|
||||
done <- connectionResult{conn: conn, err: err}
|
||||
}()
|
||||
|
||||
var conn *websocket.Conn
|
||||
select {
|
||||
case result := <-done:
|
||||
if result.err != nil {
|
||||
c.Logger.Debugf("Debug: WebSocket connection failed: %v", result.err)
|
||||
return "", fmt.Errorf("creating WebSocket connection: %w", result.err)
|
||||
}
|
||||
var ok bool
|
||||
conn, ok = result.conn.(*websocket.Conn)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected connection type")
|
||||
}
|
||||
c.Logger.Debugf("Debug: WebSocket connection established successfully")
|
||||
case <-time.After(10 * time.Second):
|
||||
c.Logger.Debugf("Debug: WebSocket connection timed out after 10 seconds")
|
||||
return "", fmt.Errorf("WebSocket connection timed out")
|
||||
}
|
||||
|
||||
defer conn.Close() //nolint:errcheck
|
||||
|
||||
// Protocol Step 1: Send attestation data (EK + AK) to server so it can generate proper challenge
|
||||
c.Logger.Debugf("Debug: Getting attestation data for challenge generation")
|
||||
ek, akParams, err := akManager.GetAttestationData()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting attestation data: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Debug: Got EK and AK attestation data")
|
||||
|
||||
// Serialize EK to bytes using the existing encoding from tmp-helpers
|
||||
ekPEM, err := encodeEKToBytes(ek)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("encoding EK to bytes: %w", err)
|
||||
}
|
||||
|
||||
// Serialize AK parameters to JSON bytes
|
||||
akBytes, err := json.Marshal(akParams)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling AK parameters: %w", err)
|
||||
}
|
||||
|
||||
// Send attestation data to server as bytes
|
||||
attestationData := struct {
|
||||
EKBytes []byte `json:"ek_bytes"`
|
||||
AKBytes []byte `json:"ak_bytes"`
|
||||
}{
|
||||
EKBytes: ekPEM,
|
||||
AKBytes: akBytes,
|
||||
}
|
||||
|
||||
c.Logger.Debugf("Debug: Sending attestation data to server")
|
||||
if err := conn.WriteJSON(attestationData); err != nil {
|
||||
return "", fmt.Errorf("sending attestation data: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Debug: Attestation data sent successfully")
|
||||
|
||||
// Protocol Step 2: Wait for challenge response from server
|
||||
c.Logger.Debugf("Debug: Waiting for challenge from server")
|
||||
var challengeResp tpm.AttestationChallengeResponse
|
||||
if err := conn.ReadJSON(&challengeResp); err != nil {
|
||||
return "", fmt.Errorf("reading challenge from server: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Challenge received")
|
||||
|
||||
// Protocol Step 3: Create proof request using AK Manager
|
||||
c.Logger.Debugf("Debug: Creating proof request from challenge response")
|
||||
proofReq, err := akManager.CreateProofRequest(&challengeResp)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating proof request: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Debug: Proof request created successfully")
|
||||
|
||||
// Protocol Step 4: Send proof to server
|
||||
c.Logger.Debugf("Debug: Sending proof request to server")
|
||||
if err := conn.WriteJSON(proofReq); err != nil {
|
||||
return "", fmt.Errorf("sending proof request: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Proof request sent")
|
||||
|
||||
// Protocol Step 5: Receive passphrase from server
|
||||
c.Logger.Debugf("Debug: Waiting for passphrase response")
|
||||
var proofResp tpm.ProofResponse
|
||||
if err := conn.ReadJSON(&proofResp); err != nil {
|
||||
return "", fmt.Errorf("reading passphrase response: %w", err)
|
||||
}
|
||||
c.Logger.Debugf("Passphrase received - Length: %d bytes", len(proofResp.Passphrase))
|
||||
|
||||
// Check if we received an empty passphrase (indicates server error)
|
||||
if len(proofResp.Passphrase) == 0 {
|
||||
return "", fmt.Errorf("server returned empty passphrase, indicating an error occurred during attestation")
|
||||
}
|
||||
|
||||
return string(proofResp.Passphrase), nil
|
||||
}
|
||||
|
||||
// decryptPassphrase decodes (base64) and decrypts the passphrase returned
|
||||
@@ -164,13 +304,25 @@ func (c *Client) decryptPassphrase(pass string) (string, error) {
|
||||
return string(passBytes), err
|
||||
}
|
||||
|
||||
func logToFile(format string, a ...any) {
|
||||
s := fmt.Sprintf(format, a...)
|
||||
file, err := os.OpenFile(LOGFILE, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
// encodeEKToBytes encodes an EK to PEM bytes for transmission
|
||||
func encodeEKToBytes(ek *attest.EK) ([]byte, error) {
|
||||
if ek.Certificate != nil {
|
||||
pemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: ek.Certificate.Raw,
|
||||
}
|
||||
return pem.EncodeToMemory(pemBlock), nil
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString(s)
|
||||
// For EKs without certificates, marshal the public key
|
||||
pubBytes, err := x509.MarshalPKIXPublicKey(ek.Public)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling EK public key: %w", err)
|
||||
}
|
||||
|
||||
pemBlock := &pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: pubBytes,
|
||||
}
|
||||
return pem.EncodeToMemory(pemBlock), nil
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package client
|
||||
|
||||
import (
|
||||
"github.com/kairos-io/kairos-sdk/collector"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -14,6 +15,7 @@ var confScanDirs = []string{"/oem", "/sysroot/oem", "/tmp/oem"}
|
||||
|
||||
type Client struct {
|
||||
Config Config
|
||||
Logger types.KairosLogger
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
|
@@ -1,58 +1,12 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/payload"
|
||||
|
||||
"github.com/jaypipes/ghw/pkg/block"
|
||||
"github.com/kairos-io/tpm-helpers"
|
||||
"github.com/mudler/yip/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const DefaultNVIndex = "0x1500000"
|
||||
|
||||
func getPass(server string, headers map[string]string, certificate string, partition *block.Partition) (string, bool, error) {
|
||||
opts := []tpm.Option{
|
||||
tpm.WithCAs([]byte(certificate)),
|
||||
tpm.AppendCustomCAToSystemCA,
|
||||
tpm.WithAdditionalHeader("label", partition.FilesystemLabel),
|
||||
tpm.WithAdditionalHeader("name", partition.Name),
|
||||
tpm.WithAdditionalHeader("uuid", partition.UUID),
|
||||
}
|
||||
for k, v := range headers {
|
||||
opts = append(opts, tpm.WithAdditionalHeader(k, v))
|
||||
}
|
||||
|
||||
msg, err := tpm.Get(server, opts...)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
result := payload.Data{}
|
||||
err = json.Unmarshal(msg, &result)
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, string(msg))
|
||||
}
|
||||
|
||||
if result.HasPassphrase() {
|
||||
return fmt.Sprint(result.Passphrase), result.HasBeenGenerated() && result.GeneratedBy == constants.TPMSecret, nil
|
||||
} else if result.HasError() {
|
||||
if strings.Contains(result.Error, "No secret found for") {
|
||||
return "", false, errPartNotFound
|
||||
}
|
||||
if strings.Contains(result.Error, "x509: certificate signed by unknown authority") {
|
||||
return "", false, errBadCertificate
|
||||
}
|
||||
return "", false, errors.New(result.Error)
|
||||
}
|
||||
|
||||
return "", false, errPartNotFound
|
||||
}
|
||||
|
||||
func genAndStore(k Config) (string, error) {
|
||||
opts := []tpm.TPMOption{}
|
||||
if k.Kcrypt.Challenger.TPMDevice != "" {
|
||||
|
47
cmd/discovery/client/flow_test.go
Normal file
47
cmd/discovery/client/flow_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Discovery Client Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("Flow Detection", func() {
|
||||
var client *Client
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a test client with basic config and logger
|
||||
client = &Client{}
|
||||
client.Config.Kcrypt.Challenger.Server = "http://test-server.local"
|
||||
client.Logger = types.NewKairosLogger("test-client", "debug", false)
|
||||
})
|
||||
|
||||
Context("TPM attestation capabilities", func() {
|
||||
It("should handle TPM operations", func() {
|
||||
// Test that client can be created without errors
|
||||
// TPM availability testing requires actual hardware
|
||||
Expect(client).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Logging functionality", func() {
|
||||
It("should have a valid logger", func() {
|
||||
// Test that client has a valid logger
|
||||
Expect(client.Logger).NotTo(BeNil())
|
||||
|
||||
// Test debug logging works without error
|
||||
client.Logger.Debugf("Test log entry for flow detection")
|
||||
|
||||
// If we get here without panic, logging is working
|
||||
Expect(true).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
})
|
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/mdns"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -18,7 +19,7 @@ const (
|
||||
// queryMDNS will make an mdns query on local network to find a kcrypt challenger server
|
||||
// instance. If none is found, the original URL is returned and no additional headers.
|
||||
// If a response is received, the IP address and port from the response will be returned// and an additional "Host" header pointing to the original host.
|
||||
func queryMDNS(originalURL string) (string, map[string]string, error) {
|
||||
func queryMDNS(originalURL string, logger types.KairosLogger) (string, map[string]string, error) {
|
||||
additionalHeaders := map[string]string{}
|
||||
var err error
|
||||
|
||||
@@ -32,9 +33,9 @@ func queryMDNS(originalURL string) (string, map[string]string, error) {
|
||||
return "", additionalHeaders, fmt.Errorf("domain should end in \".local\" when using mdns")
|
||||
}
|
||||
|
||||
mdnsIP, mdnsPort := discoverMDNSServer(host)
|
||||
mdnsIP, mdnsPort := discoverMDNSServer(host, logger)
|
||||
if mdnsIP == "" { // no reply
|
||||
logToFile("no reply from mdns\n")
|
||||
logger.Debugf("no reply from mdns")
|
||||
return originalURL, additionalHeaders, nil
|
||||
}
|
||||
|
||||
@@ -56,12 +57,12 @@ func queryMDNS(originalURL string) (string, map[string]string, error) {
|
||||
// discoverMDNSServer performs an mDNS query to discover any running kcrypt challenger
|
||||
// servers on the same network that matches the given hostname.
|
||||
// If a response if received, the IP address and the Port from the response are returned.
|
||||
func discoverMDNSServer(hostname string) (string, string) {
|
||||
func discoverMDNSServer(hostname string, logger types.KairosLogger) (string, string) {
|
||||
// Make a channel for results and start listening
|
||||
entriesCh := make(chan *mdns.ServiceEntry, 4)
|
||||
defer close(entriesCh)
|
||||
|
||||
logToFile("Will now wait for some mdns server to respond\n")
|
||||
logger.Debugf("Will now wait for some mdns server to respond")
|
||||
// Start the lookup. It will block until we read from the chan.
|
||||
mdns.Lookup(MDNSServiceType, entriesCh)
|
||||
|
||||
@@ -70,15 +71,15 @@ func discoverMDNSServer(hostname string) (string, string) {
|
||||
for {
|
||||
select {
|
||||
case entry := <-entriesCh:
|
||||
logToFile("mdns response received\n")
|
||||
logger.Debugf("mdns response received")
|
||||
if entry.Host == expectedHost {
|
||||
logToFile("%s matches %s\n", entry.Host, expectedHost)
|
||||
logger.Debugf("%s matches %s", entry.Host, expectedHost)
|
||||
return entry.AddrV4.String(), strconv.Itoa(entry.Port) // TODO: v6?
|
||||
} else {
|
||||
logToFile("%s didn't match %s\n", entry.Host, expectedHost)
|
||||
logger.Debugf("%s didn't match %s", entry.Host, expectedHost)
|
||||
}
|
||||
case <-time.After(MDNSTimeout):
|
||||
logToFile("timed out waiting for mdns\n")
|
||||
logger.Debugf("timed out waiting for mdns")
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
|
@@ -1,53 +1,478 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/jaypipes/ghw/pkg/block"
|
||||
"github.com/kairos-io/kairos-challenger/cmd/discovery/client"
|
||||
"github.com/kairos-io/kairos-challenger/pkg/constants"
|
||||
"github.com/kairos-io/kairos-sdk/kcrypt/bus"
|
||||
"github.com/kairos-io/kairos-sdk/types"
|
||||
"github.com/kairos-io/tpm-helpers"
|
||||
"github.com/mudler/go-pluggable"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) >= 2 && isEventDefined(os.Args[1]) {
|
||||
c, err := client.NewClient()
|
||||
checkErr(err)
|
||||
checkErr(c.Start())
|
||||
return
|
||||
}
|
||||
|
||||
pubhash, err := tpm.GetPubHash()
|
||||
checkErr(err)
|
||||
fmt.Print(pubhash)
|
||||
// GetFlags holds all flags specific to the get command
|
||||
type GetFlags struct {
|
||||
PartitionName string
|
||||
PartitionUUID string
|
||||
PartitionLabel string
|
||||
Attempts int
|
||||
ChallengerServer string
|
||||
EnableMDNS bool
|
||||
ServerCertificate string
|
||||
}
|
||||
|
||||
func checkErr(err error) {
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
var (
|
||||
// Global/persistent flags
|
||||
debug bool
|
||||
)
|
||||
|
||||
// rootCmd represents the base command (TPM hash generation)
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "kcrypt-discovery-challenger",
|
||||
Short: "kcrypt-challenger discovery client",
|
||||
Long: `kcrypt-challenger discovery client
|
||||
|
||||
This tool provides TPM-based operations for encrypted partition management.
|
||||
By default, it outputs the TPM hash for this device.
|
||||
|
||||
Configuration:
|
||||
The client reads configuration from Kairos configuration files in the following directories:
|
||||
- /oem (during installation from ISO)
|
||||
- /sysroot/oem (on installed systems during initramfs)
|
||||
- /tmp/oem (when running in hooks)
|
||||
|
||||
Configuration format (YAML):
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "https://my-server.com:8082" # Server URL
|
||||
mdns: true # Enable mDNS discovery
|
||||
certificate: "/path/to/server-cert.pem" # Server certificate
|
||||
nv_index: "0x1500000" # TPM NV index (offline mode)
|
||||
c_index: "0x1500001" # TPM certificate index
|
||||
tpm_device: "/dev/tpmrm0" # TPM device path`,
|
||||
Example: ` # Get TPM hash for this device (default)
|
||||
kcrypt-discovery-challenger
|
||||
|
||||
# Get passphrase for encrypted partition
|
||||
kcrypt-discovery-challenger get --partition-name=/dev/sda2
|
||||
|
||||
# Clean up TPM NV memory (useful for development)
|
||||
kcrypt-discovery-challenger cleanup
|
||||
|
||||
# Run plugin event
|
||||
kcrypt-discovery-challenger discovery.password`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runTPMHash()
|
||||
},
|
||||
}
|
||||
|
||||
// newCleanupCmd creates the cleanup command
|
||||
func newCleanupCmd() *cobra.Command {
|
||||
var nvIndex string
|
||||
var tpmDevice string
|
||||
var skipConfirmation bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "cleanup",
|
||||
Short: "Clean up TPM NV memory",
|
||||
Long: `Clean up TPM NV memory by undefining specific NV indices.
|
||||
|
||||
⚠️ DANGER: This command removes encryption passphrases from TPM memory!
|
||||
⚠️ If you delete the wrong index, your encrypted disk may become UNBOOTABLE!
|
||||
|
||||
This command helps clean up TPM NV memory used by the local pass flow,
|
||||
which stores encrypted passphrases in TPM non-volatile memory. Without
|
||||
cleanup, these passphrases persist indefinitely and take up space.
|
||||
|
||||
The command will prompt for confirmation before deletion unless you use
|
||||
the --i-know-what-i-am-doing flag to skip the safety prompt.
|
||||
|
||||
Default behavior:
|
||||
- Uses the same NV index as the local pass flow (from config or 0x1500000)
|
||||
- Uses the same TPM device as configured (or system default if none specified)
|
||||
- Prompts for confirmation with safety warnings`,
|
||||
Example: ` # Clean up default NV index (with confirmation prompt)
|
||||
kcrypt-discovery-challenger cleanup
|
||||
|
||||
# Clean up specific NV index
|
||||
kcrypt-discovery-challenger cleanup --nv-index=0x1500001
|
||||
|
||||
# Clean up with specific TPM device
|
||||
kcrypt-discovery-challenger cleanup --tpm-device=/dev/tpmrm0
|
||||
|
||||
# Skip confirmation prompt (DANGEROUS!)
|
||||
kcrypt-discovery-challenger cleanup --i-know-what-i-am-doing`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runCleanup(nvIndex, tpmDevice, skipConfirmation)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&nvIndex, "nv-index", "", fmt.Sprintf("NV index to clean up (defaults to configured index or %s)", client.DefaultNVIndex))
|
||||
cmd.Flags().StringVar(&tpmDevice, "tpm-device", "", "TPM device path (defaults to configured device or system default)")
|
||||
cmd.Flags().BoolVar(&skipConfirmation, "i-know-what-i-am-doing", false, "Skip confirmation prompt (DANGEROUS: may make encrypted disks unbootable)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// newGetCmd creates the get command with its flags
|
||||
func newGetCmd() *cobra.Command {
|
||||
flags := &GetFlags{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get passphrase for encrypted partition",
|
||||
Long: `Get passphrase for encrypted partition using TPM attestation.
|
||||
|
||||
This command retrieves passphrases for encrypted partitions by communicating
|
||||
with a challenger server using TPM-based attestation. At least one partition
|
||||
identifier (name, UUID, or label) must be provided.
|
||||
|
||||
The command uses configuration from the root command's config files, but flags
|
||||
can override specific settings:
|
||||
--challenger-server Override kcrypt.challenger.challenger_server
|
||||
--mdns Override kcrypt.challenger.mdns
|
||||
--certificate Override kcrypt.challenger.certificate`,
|
||||
Example: ` # Get passphrase using partition name
|
||||
kcrypt-discovery-challenger get --partition-name=/dev/sda2
|
||||
|
||||
# Get passphrase using UUID
|
||||
kcrypt-discovery-challenger get --partition-uuid=12345-abcde
|
||||
|
||||
# Get passphrase using filesystem label
|
||||
kcrypt-discovery-challenger get --partition-label=encrypted-data
|
||||
|
||||
# Get passphrase with multiple identifiers
|
||||
kcrypt-discovery-challenger get --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data
|
||||
|
||||
# Get passphrase with custom server
|
||||
kcrypt-discovery-challenger get --partition-label=encrypted-data --challenger-server=https://my-server.com:8082`,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Validate that at least one partition identifier is provided
|
||||
if flags.PartitionName == "" && flags.PartitionUUID == "" && flags.PartitionLabel == "" {
|
||||
return fmt.Errorf("at least one of --partition-name, --partition-uuid, or --partition-label must be provided")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runGetPassphrase(flags)
|
||||
},
|
||||
}
|
||||
|
||||
// Register flags
|
||||
cmd.Flags().StringVar(&flags.PartitionName, "partition-name", "", "Name of the partition (at least one identifier required)")
|
||||
cmd.Flags().StringVar(&flags.PartitionUUID, "partition-uuid", "", "UUID of the partition (at least one identifier required)")
|
||||
cmd.Flags().StringVar(&flags.PartitionLabel, "partition-label", "", "Filesystem label of the partition (at least one identifier required)")
|
||||
cmd.Flags().IntVar(&flags.Attempts, "attempts", 30, "Number of attempts to get the passphrase")
|
||||
cmd.Flags().StringVar(&flags.ChallengerServer, "challenger-server", "", "URL of the challenger server (overrides config)")
|
||||
cmd.Flags().BoolVar(&flags.EnableMDNS, "mdns", false, "Enable mDNS discovery (overrides config)")
|
||||
cmd.Flags().StringVar(&flags.ServerCertificate, "certificate", "", "Server certificate for verification (overrides config)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// pluginCmd represents the plugin event commands
|
||||
var pluginCmd = &cobra.Command{
|
||||
Use: string(bus.EventDiscoveryPassword),
|
||||
Short: fmt.Sprintf("Run %s plugin event", bus.EventDiscoveryPassword),
|
||||
Long: fmt.Sprintf(`Run the %s plugin event.
|
||||
|
||||
This command runs in plugin mode, reading JSON partition data from stdin
|
||||
and outputting the passphrase to stdout. This is used for integration
|
||||
with kcrypt and other tools.`, bus.EventDiscoveryPassword),
|
||||
Example: fmt.Sprintf(` # Plugin mode (for integration with kcrypt)
|
||||
echo '{"data": "{\"name\": \"/dev/sda2\", \"uuid\": \"12345-abcde\", \"label\": \"encrypted-data\"}"}' | kcrypt-discovery-challenger %s`, bus.EventDiscoveryPassword),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runPluginMode()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Global/persistent flags (available to all commands)
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
|
||||
|
||||
// Add subcommands
|
||||
rootCmd.AddCommand(newGetCmd())
|
||||
rootCmd.AddCommand(newCleanupCmd())
|
||||
rootCmd.AddCommand(pluginCmd)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// isEventDefined checks whether an event is defined in the bus.
|
||||
// It accepts strings or EventType, returns a boolean indicating that
|
||||
// the event was defined among the events emitted by the bus.
|
||||
func isEventDefined(i interface{}) bool {
|
||||
checkEvent := func(e pluggable.EventType) bool {
|
||||
if e == bus.EventDiscoveryPassword {
|
||||
return true
|
||||
}
|
||||
// ExecuteWithArgs executes the root command with the given arguments.
|
||||
// This function is used by tests to simulate CLI execution.
|
||||
func ExecuteWithArgs(args []string) error {
|
||||
// Set command arguments (this overrides os.Args)
|
||||
rootCmd.SetArgs(args)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
switch f := i.(type) {
|
||||
case string:
|
||||
return checkEvent(pluggable.EventType(f))
|
||||
case pluggable.EventType:
|
||||
return checkEvent(f)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return rootCmd.Execute()
|
||||
}
|
||||
|
||||
// runTPMHash handles the root command - TPM hash generation
|
||||
func runTPMHash() error {
|
||||
// Create logger based on debug flag
|
||||
var logger types.KairosLogger
|
||||
if debug {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "debug", false)
|
||||
logger.Debugf("Debug mode enabled for TPM hash generation")
|
||||
} else {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "error", false)
|
||||
}
|
||||
|
||||
// Initialize AK Manager with the standard handle file
|
||||
logger.Debugf("Initializing AK Manager with handle file: %s", constants.AKBlobFile)
|
||||
akManager, err := tpm.NewAKManager(tpm.WithAKHandleFile(constants.AKBlobFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating AK manager: %w", err)
|
||||
}
|
||||
logger.Debugf("AK Manager initialized successfully")
|
||||
|
||||
// Ensure AK exists (create if necessary)
|
||||
logger.Debugf("Getting or creating AK")
|
||||
_, err = akManager.GetOrCreateAK()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting/creating AK: %w", err)
|
||||
}
|
||||
logger.Debugf("AK obtained/created successfully")
|
||||
|
||||
// Get attestation data (includes EK)
|
||||
logger.Debugf("Getting attestation data")
|
||||
ek, _, err := akManager.GetAttestationData()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting attestation data: %w", err)
|
||||
}
|
||||
logger.Debugf("Attestation data retrieved successfully")
|
||||
|
||||
// Compute TPM hash from EK
|
||||
logger.Debugf("Computing TPM hash from EK")
|
||||
tpmHash, err := tpm.DecodePubHash(ek)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing TPM hash: %w", err)
|
||||
}
|
||||
logger.Debugf("TPM hash computed successfully: %s", tpmHash)
|
||||
|
||||
// Output the TPM hash to stdout
|
||||
fmt.Print(tpmHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// runGetPassphrase handles the get subcommand - passphrase retrieval
|
||||
func runGetPassphrase(flags *GetFlags) error {
|
||||
// Create logger based on debug flag
|
||||
var logger types.KairosLogger
|
||||
if debug {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "debug", false)
|
||||
} else {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "error", false)
|
||||
}
|
||||
|
||||
// Create client with potential CLI overrides
|
||||
c, err := createClientWithOverrides(flags.ChallengerServer, flags.EnableMDNS, flags.ServerCertificate, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating client: %w", err)
|
||||
}
|
||||
|
||||
// Create partition object
|
||||
partition := &block.Partition{
|
||||
Name: flags.PartitionName,
|
||||
UUID: flags.PartitionUUID,
|
||||
FilesystemLabel: flags.PartitionLabel,
|
||||
}
|
||||
|
||||
// Log partition information
|
||||
logger.Debugf("Partition details:")
|
||||
logger.Debugf(" Name: %s", partition.Name)
|
||||
logger.Debugf(" UUID: %s", partition.UUID)
|
||||
logger.Debugf(" Label: %s", partition.FilesystemLabel)
|
||||
logger.Debugf(" Attempts: %d", flags.Attempts)
|
||||
|
||||
// Get the passphrase using the same backend logic as the plugin
|
||||
fmt.Fprintf(os.Stderr, "Requesting passphrase for partition %s (UUID: %s, Label: %s)...\n",
|
||||
flags.PartitionName, flags.PartitionUUID, flags.PartitionLabel)
|
||||
|
||||
passphrase, err := c.GetPassphrase(partition, flags.Attempts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting passphrase: %w", err)
|
||||
}
|
||||
|
||||
// Output the passphrase to stdout (this is what tools expect)
|
||||
fmt.Print(passphrase)
|
||||
fmt.Fprintf(os.Stderr, "\nPassphrase retrieved successfully\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// runPluginMode handles plugin event commands
|
||||
func runPluginMode() error {
|
||||
// In plugin mode, use quiet=true to log to file instead of console
|
||||
// Log level depends on debug flag, write logs to /var/log/kairos/kcrypt-discovery-challenger.log
|
||||
var logLevel string
|
||||
if debug {
|
||||
logLevel = "debug"
|
||||
} else {
|
||||
logLevel = "error"
|
||||
}
|
||||
|
||||
logger := types.NewKairosLogger("kcrypt-discovery-challenger", logLevel, true)
|
||||
c, err := client.NewClientWithLogger(logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating client: %w", err)
|
||||
}
|
||||
|
||||
err = c.Start()
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting plugin: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createClientWithOverrides creates a client and applies CLI flag overrides to the config
|
||||
func createClientWithOverrides(serverURL string, enableMDNS bool, certificate string, logger types.KairosLogger) (*client.Client, error) {
|
||||
// Start with the default config from files and pass the logger
|
||||
c, err := client.NewClientWithLogger(logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Log the original configuration values
|
||||
logger.Debugf("Original configuration:")
|
||||
logger.Debugf(" Server: %s", c.Config.Kcrypt.Challenger.Server)
|
||||
logger.Debugf(" MDNS: %t", c.Config.Kcrypt.Challenger.MDNS)
|
||||
logger.Debugf(" Certificate: %s", maskSensitiveString(c.Config.Kcrypt.Challenger.Certificate))
|
||||
|
||||
// Apply CLI overrides if provided
|
||||
if serverURL != "" {
|
||||
logger.Debugf("Overriding server URL: %s -> %s", c.Config.Kcrypt.Challenger.Server, serverURL)
|
||||
c.Config.Kcrypt.Challenger.Server = serverURL
|
||||
}
|
||||
|
||||
// For boolean flags, we can directly use the value since Cobra handles it properly
|
||||
if enableMDNS {
|
||||
logger.Debugf("Overriding MDNS setting: %t -> %t", c.Config.Kcrypt.Challenger.MDNS, enableMDNS)
|
||||
c.Config.Kcrypt.Challenger.MDNS = enableMDNS
|
||||
}
|
||||
|
||||
if certificate != "" {
|
||||
logger.Debugf("Overriding certificate: %s -> %s",
|
||||
maskSensitiveString(c.Config.Kcrypt.Challenger.Certificate),
|
||||
maskSensitiveString(certificate))
|
||||
c.Config.Kcrypt.Challenger.Certificate = certificate
|
||||
}
|
||||
|
||||
// Log the final configuration values
|
||||
logger.Debugf("Final configuration:")
|
||||
logger.Debugf(" Server: %s", c.Config.Kcrypt.Challenger.Server)
|
||||
logger.Debugf(" MDNS: %t", c.Config.Kcrypt.Challenger.MDNS)
|
||||
logger.Debugf(" Certificate: %s", maskSensitiveString(c.Config.Kcrypt.Challenger.Certificate))
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// runCleanup handles the cleanup subcommand - TPM NV memory cleanup
|
||||
func runCleanup(nvIndex, tpmDevice string, skipConfirmation bool) error {
|
||||
// Create logger based on debug flag
|
||||
var logger types.KairosLogger
|
||||
if debug {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "debug", false)
|
||||
logger.Debugf("Debug mode enabled for TPM NV cleanup")
|
||||
} else {
|
||||
logger = types.NewKairosLogger("kcrypt-discovery-challenger", "error", false)
|
||||
}
|
||||
|
||||
// Load configuration to get defaults if flags not provided
|
||||
var config client.Config
|
||||
c, err := client.NewClientWithLogger(logger)
|
||||
if err != nil {
|
||||
logger.Debugf("Warning: Could not load configuration: %v", err)
|
||||
// Continue with defaults - not a fatal error
|
||||
} else {
|
||||
config = c.Config
|
||||
}
|
||||
|
||||
// Determine NV index to clean up (follow same pattern as localPass/genAndStore)
|
||||
targetIndex := nvIndex
|
||||
if targetIndex == "" {
|
||||
// First check config, then fall back to the same default used by the local pass flow
|
||||
if config.Kcrypt.Challenger.NVIndex != "" {
|
||||
targetIndex = config.Kcrypt.Challenger.NVIndex
|
||||
} else {
|
||||
targetIndex = client.DefaultNVIndex
|
||||
}
|
||||
}
|
||||
|
||||
// Determine TPM device
|
||||
targetDevice := tpmDevice
|
||||
if targetDevice == "" && config.Kcrypt.Challenger.TPMDevice != "" {
|
||||
targetDevice = config.Kcrypt.Challenger.TPMDevice
|
||||
}
|
||||
|
||||
logger.Debugf("Cleaning up TPM NV index: %s", targetIndex)
|
||||
if targetDevice != "" {
|
||||
logger.Debugf("Using TPM device: %s", targetDevice)
|
||||
}
|
||||
|
||||
// Check if the NV index exists first
|
||||
opts := []tpm.TPMOption{tpm.WithIndex(targetIndex)}
|
||||
if targetDevice != "" {
|
||||
opts = append(opts, tpm.WithDevice(targetDevice))
|
||||
}
|
||||
|
||||
// Try to read from the index to see if it exists
|
||||
logger.Debugf("Checking if NV index %s exists", targetIndex)
|
||||
_, err = tpm.ReadBlob(opts...)
|
||||
if err != nil {
|
||||
// If we can't read it, it might not exist or be empty
|
||||
logger.Debugf("NV index %s appears to be empty or non-existent: %v", targetIndex, err)
|
||||
fmt.Printf("NV index %s appears to be empty or does not exist\n", targetIndex)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Confirmation prompt with warning
|
||||
if !skipConfirmation {
|
||||
fmt.Printf("\n⚠️ WARNING: You are about to delete TPM NV index %s\n", targetIndex)
|
||||
fmt.Printf("⚠️ If this index contains your disk encryption passphrase, your encrypted disk will become UNBOOTABLE!\n")
|
||||
fmt.Printf("⚠️ This action CANNOT be undone.\n\n")
|
||||
fmt.Printf("Are you sure you want to continue? (type 'yes' to confirm): ")
|
||||
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
scanner.Scan()
|
||||
response := strings.TrimSpace(strings.ToLower(scanner.Text()))
|
||||
|
||||
if response != "yes" {
|
||||
fmt.Printf("Cleanup cancelled.\n")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Use native Go TPM library to undefine the NV space
|
||||
logger.Debugf("Using native TPM library to undefine NV index")
|
||||
fmt.Printf("Cleaning up TPM NV index %s...\n", targetIndex)
|
||||
|
||||
err = tpm.UndefineBlob(opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to undefine NV index %s: %w", targetIndex, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully cleaned up NV index %s\n", targetIndex)
|
||||
logger.Debugf("Successfully undefined NV index %s", targetIndex)
|
||||
return nil
|
||||
}
|
||||
|
||||
// maskSensitiveString masks certificate paths/content for logging
|
||||
func maskSensitiveString(s string) string {
|
||||
if s == "" {
|
||||
return "<empty>"
|
||||
}
|
||||
if len(s) <= 10 {
|
||||
return strings.Repeat("*", len(s))
|
||||
}
|
||||
// Show first 3 and last 3 characters with * in between
|
||||
return s[:3] + strings.Repeat("*", len(s)-6) + s[len(s)-3:]
|
||||
}
|
||||
|
@@ -37,6 +37,40 @@ spec:
|
||||
properties:
|
||||
TPMHash:
|
||||
type: string
|
||||
attestation:
|
||||
description: AttestationSpec defines TPM attestation data for TOFU
|
||||
enrollment and verification
|
||||
properties:
|
||||
akPublicKey:
|
||||
description: AKPublicKey stores the Attestation Key public key
|
||||
in PEM format
|
||||
type: string
|
||||
ekPublicKey:
|
||||
description: EKPublicKey stores the Endorsement Key public key
|
||||
in PEM format
|
||||
type: string
|
||||
enrolledAt:
|
||||
description: EnrolledAt timestamp when this TPM was first enrolled
|
||||
format: date-time
|
||||
type: string
|
||||
lastVerifiedAt:
|
||||
description: LastVerifiedAt timestamp of the last successful attestation
|
||||
format: date-time
|
||||
type: string
|
||||
pcrValues:
|
||||
description: PCRValues stores the expected PCR values for boot
|
||||
state verification
|
||||
properties:
|
||||
pcrs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: 'PCRs is a flexible map of PCR index (as string)
|
||||
to PCR value (hex-encoded) Example: {"0": "a1b2c3...", "7":
|
||||
"d4e5f6...", "11": "g7h8i9..."} This allows for any combination
|
||||
of PCRs without hardcoding specific indices'
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
partitions:
|
||||
items:
|
||||
description: 'PartitionSpec defines a Partition. A partition can
|
||||
|
@@ -25,11 +25,6 @@ bases:
|
||||
#- ../prometheus
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
- manager_auth_proxy_patch.yaml
|
||||
|
||||
# Mount the controller config file for loading manager configurations
|
||||
# through a ComponentConfig type
|
||||
#- manager_config_patch.yaml
|
||||
|
@@ -1,39 +0,0 @@
|
||||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- name: manager
|
||||
args:
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
@@ -25,10 +25,6 @@ bases:
|
||||
#- ../prometheus
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
- manager_auth_proxy_patch.yaml
|
||||
- pull.yaml
|
||||
# Mount the controller config file for loading manager configurations
|
||||
# through a ComponentConfig type
|
||||
|
@@ -1,39 +0,0 @@
|
||||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- name: manager
|
||||
args:
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
@@ -9,4 +9,6 @@ spec:
|
||||
containers:
|
||||
- name: manager
|
||||
imagePullPolicy: IfNotPresent
|
||||
- name: kube-rbac-proxy
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
|
@@ -34,10 +34,41 @@ spec:
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- command:
|
||||
- /manager
|
||||
args:
|
||||
- --leader-elect
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
||||
- "--namespace=$(POD_NAMESPACE)"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: controller:latest
|
||||
name: manager
|
||||
securityContext:
|
||||
|
@@ -69,8 +69,7 @@ var _ = BeforeSuite(func() {
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
}, 60)
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
|
64
examples/cli-usage.sh
Executable file
64
examples/cli-usage.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Example script demonstrating the new CLI interface for kcrypt-challenger
|
||||
# This makes testing and debugging much easier than using the plugin interface
|
||||
|
||||
echo "=== kcrypt-challenger CLI Examples ==="
|
||||
echo
|
||||
|
||||
# Build the binary if it doesn't exist
|
||||
if [ ! -f "./kcrypt-discovery-challenger" ]; then
|
||||
echo "Building kcrypt-discovery-challenger..."
|
||||
go build -o kcrypt-discovery-challenger ./cmd/discovery/
|
||||
echo
|
||||
fi
|
||||
|
||||
echo "1. Show help:"
|
||||
./kcrypt-discovery-challenger --help
|
||||
echo
|
||||
|
||||
echo "2. Show version:"
|
||||
./kcrypt-discovery-challenger --version
|
||||
echo
|
||||
|
||||
echo "3. Test CLI mode with example parameters (will fail without server, but shows the flow):"
|
||||
echo " Command: ./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --attempts=1"
|
||||
echo " Expected: Error connecting to server, but flow detection should work"
|
||||
echo
|
||||
./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --attempts=1 2>&1 || true
|
||||
echo
|
||||
|
||||
echo "4. Test CLI mode with configuration overrides:"
|
||||
echo " Command: ./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --challenger-server=https://custom-server.com:8082 --mdns=true --attempts=1"
|
||||
echo " Expected: Same error but with custom server configuration"
|
||||
echo
|
||||
./kcrypt-discovery-challenger --partition-name=/dev/sda2 --partition-uuid=12345-abcde --partition-label=encrypted-data --challenger-server=https://custom-server.com:8082 --mdns=true --attempts=1 2>&1 || true
|
||||
echo
|
||||
|
||||
echo "4. Check the log file for flow detection:"
|
||||
if [ -f "/tmp/kcrypt-challenger-client.log" ]; then
|
||||
echo " Log contents:"
|
||||
cat /tmp/kcrypt-challenger-client.log
|
||||
echo
|
||||
else
|
||||
echo " No log file found"
|
||||
fi
|
||||
|
||||
echo "5. Test plugin mode (for comparison):"
|
||||
echo " Command: echo '{\"data\": \"{\\\"name\\\": \\\"/dev/sda2\\\", \\\"uuid\\\": \\\"12345-abcde\\\", \\\"filesystemLabel\\\": \\\"encrypted-data\\\"}\"}' | ./kcrypt-discovery-challenger discovery.password"
|
||||
echo " Expected: Same behavior as CLI mode"
|
||||
echo
|
||||
echo '{"data": "{\"name\": \"/dev/sda2\", \"uuid\": \"12345-abcde\", \"filesystemLabel\": \"encrypted-data\"}"}' | ./kcrypt-discovery-challenger discovery.password 2>&1 || true
|
||||
echo
|
||||
|
||||
echo "=== Summary ==="
|
||||
echo "✅ CLI interface successfully created"
|
||||
echo "✅ Full compatibility with plugin mode maintained"
|
||||
echo "✅ Same backend logic used for both interfaces"
|
||||
echo "✅ Flow detection works in both modes"
|
||||
echo ""
|
||||
echo "Benefits:"
|
||||
echo "- Much easier testing during development"
|
||||
echo "- Can be used for debugging in production"
|
||||
echo "- Clear command-line interface with help and examples"
|
||||
echo "- Maintains full compatibility with kcrypt integration"
|
21
go.mod
21
go.mod
@@ -2,8 +2,13 @@ module github.com/kairos-io/kairos-challenger
|
||||
|
||||
go 1.25
|
||||
|
||||
replace github.com/kairos-io/tpm-helpers => github.com/kairos-io/tpm-helpers v0.0.0-20250924104130-49f51e390ef3
|
||||
|
||||
//replace github.com/kairos-io/tpm-helpers => /home/dimitris/workspace/kairos/tpm-helpers
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/google/go-attestation v0.5.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hashicorp/mdns v1.0.6
|
||||
@@ -15,8 +20,8 @@ require (
|
||||
github.com/mudler/yip v1.18.0
|
||||
github.com/onsi/ginkgo/v2 v2.25.3
|
||||
github.com/onsi/gomega v1.38.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spectrocloud/peg v0.0.0-20240405075800-c5da7125e30f
|
||||
github.com/spf13/cobra v1.10.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.27.2
|
||||
k8s.io/apimachinery v0.27.4
|
||||
@@ -34,7 +39,6 @@ require (
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/avast/retry-go v3.0.0+incompatible // indirect
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -65,7 +69,6 @@ require (
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/folbricht/tpmk v0.1.2-0.20230104073416-f20b20c289d7 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.4 // indirect
|
||||
@@ -79,11 +82,11 @@ require (
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/certificate-transparency-go v1.1.4 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-attestation v0.4.4-0.20220404204839-8820d49b18d9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-configfs-tsm v0.3.3 // indirect
|
||||
github.com/google/go-containerregistry v0.20.6 // indirect
|
||||
github.com/google/go-tpm v0.3.3 // indirect
|
||||
github.com/google/go-tpm-tools v0.3.10 // indirect
|
||||
github.com/google/go-tpm v0.9.1 // indirect
|
||||
github.com/google/go-tpm-tools v0.4.4 // indirect
|
||||
github.com/google/go-tspi v0.3.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
@@ -93,6 +96,7 @@ require (
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||
github.com/itchyny/gojq v0.12.17 // indirect
|
||||
@@ -123,6 +127,7 @@ require (
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_golang v1.20.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
@@ -137,7 +142,7 @@ require (
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twpayne/go-vfs/v4 v4.3.0 // indirect
|
||||
@@ -153,7 +158,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -5,6 +5,11 @@
|
||||
package challenger
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-attestation/attest"
|
||||
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -38,7 +43,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns the sealed volume data", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).ToNot(BeNil())
|
||||
Expect(volumeData.Quarantined).To(BeFalse())
|
||||
Expect(volumeData.SecretName).To(Equal("the_secret"))
|
||||
@@ -67,7 +72,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("doesn't match a request with an empty field", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).To(BeNil())
|
||||
})
|
||||
})
|
||||
@@ -86,7 +91,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns the sealed volume data", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).ToNot(BeNil())
|
||||
Expect(volumeData.Quarantined).To(BeFalse())
|
||||
Expect(volumeData.SecretName).To(Equal("the_secret"))
|
||||
@@ -108,7 +113,7 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns the sealed volume data", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).ToNot(BeNil())
|
||||
Expect(volumeData.Quarantined).To(BeFalse())
|
||||
Expect(volumeData.SecretName).To(Equal("the_secret"))
|
||||
@@ -130,11 +135,473 @@ var _ = Describe("challenger", func() {
|
||||
})
|
||||
|
||||
It("returns nil sealedVolumeData", func() {
|
||||
volumeData := findVolumeFor(requestData, volumeList)
|
||||
volumeData, _ := findVolumeFor(requestData, volumeList)
|
||||
Expect(volumeData).To(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Selective Enrollment Mode", func() {
|
||||
var logger logr.Logger
|
||||
|
||||
BeforeEach(func() {
|
||||
logger = logr.Discard()
|
||||
})
|
||||
|
||||
Describe("verifyAKMatch with selective enrollment", func() {
|
||||
var currentAK *attest.AttestationParameters
|
||||
var expectedAKPEM string
|
||||
const mockAKPublicKey = "mock-ak-public-key"
|
||||
|
||||
BeforeEach(func() {
|
||||
// Mock current AK parameters - in real implementation this would come from TPM
|
||||
currentAK = &attest.AttestationParameters{
|
||||
Public: []byte(mockAKPublicKey),
|
||||
UseTCSDActivationFormat: false,
|
||||
CreateData: []byte("mock-create-data"),
|
||||
CreateAttestation: []byte("mock-create-attestation"),
|
||||
CreateSignature: []byte("mock-create-signature"),
|
||||
}
|
||||
|
||||
// Generate the expected PEM encoding from the plain text constant
|
||||
var err error
|
||||
expectedAKPEM, err = encodeAKToPEM(currentAK)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
When("stored AK is empty (re-enrollment mode)", func() {
|
||||
It("should store the current AK value during re-enrollment", func() {
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: "", // Empty = re-enrollment mode
|
||||
}
|
||||
|
||||
// Before re-enrollment: AK should be empty
|
||||
Expect(attestation.AKPublicKey).To(Equal(""))
|
||||
|
||||
// Re-enrollment should store the current AK
|
||||
err := updateAttestationDataSelective(attestation, currentAK, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// After re-enrollment: AK should contain the exact expected PEM value
|
||||
Expect(attestation.AKPublicKey).To(Equal(expectedAKPEM))
|
||||
})
|
||||
|
||||
It("should accept any AK, store it during re-enrollment, then enforce exact match", func() {
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: "", // Start in re-enrollment mode
|
||||
}
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: attestation,
|
||||
},
|
||||
}
|
||||
|
||||
// Step 1: Verification should pass with any AK (re-enrollment mode)
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 2: Re-enroll - store the AK
|
||||
err = updateAttestationDataSelective(attestation, currentAK, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 3: Now we should be in enforcement mode - same AK should pass
|
||||
err = verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 4: Different AK should now fail (enforcement mode)
|
||||
differentAK := &attest.AttestationParameters{
|
||||
Public: []byte("different-ak-key"),
|
||||
}
|
||||
err = verifyAKMatchSelective(sealedVolume, differentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("AK public key does not match"))
|
||||
})
|
||||
})
|
||||
|
||||
When("stored AK is set (enforcement mode)", func() {
|
||||
It("should enforce exact match", func() {
|
||||
// Create a specific AK PEM that won't match our mock
|
||||
storedAKPEM := "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtest\n-----END PUBLIC KEY-----"
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: storedAKPEM,
|
||||
}
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: attestation,
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("AK public key does not match"))
|
||||
})
|
||||
})
|
||||
|
||||
When("no attestation data exists", func() {
|
||||
It("should return error", func() {
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: nil,
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("no attestation data"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("verifyPCRValuesSelective", func() {
|
||||
var currentPCRs *keyserverv1alpha1.PCRValues
|
||||
const expectedPCR0 = "abc123def456"
|
||||
const expectedPCR7 = "ghi789jkl012"
|
||||
const expectedPCR11 = "mno345pqr678"
|
||||
|
||||
BeforeEach(func() {
|
||||
currentPCRs = &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": expectedPCR0,
|
||||
"7": expectedPCR7,
|
||||
"11": expectedPCR11,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
When("stored PCR values are empty (re-enrollment mode)", func() {
|
||||
It("should accept any PCR values during verification", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = re-enrollment mode
|
||||
"7": "", // Empty = re-enrollment mode
|
||||
"11": "", // Empty = re-enrollment mode
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
It("should store the current PCR values during re-enrollment", func() {
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
PCRValues: &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = re-enrollment mode
|
||||
"7": "", // Empty = re-enrollment mode
|
||||
"11": "", // Empty = re-enrollment mode
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Before re-enrollment: PCRs should be empty
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal(""))
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal(""))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal(""))
|
||||
|
||||
// Re-enrollment should store the current PCR values
|
||||
err := updateAttestationDataSelective(attestation, nil, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// After re-enrollment: PCRs should be stored with exact expected values
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal(expectedPCR0))
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal(expectedPCR7))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal(expectedPCR11))
|
||||
})
|
||||
|
||||
It("should transition from re-enrollment mode to enforcement mode", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Start in re-enrollment mode
|
||||
},
|
||||
}
|
||||
|
||||
// Create a limited current PCR set (only PCR0) to test selective enrollment
|
||||
limitedCurrentPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": expectedPCR0, // Only provide PCR0
|
||||
},
|
||||
}
|
||||
|
||||
// Step 1: Should accept any PCR values (re-enrollment mode)
|
||||
err := verifyPCRValuesSelective(storedPCRs, limitedCurrentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 2: Re-enroll - store the PCR value (should only update the empty PCR0)
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
PCRValues: storedPCRs,
|
||||
}
|
||||
err = updateAttestationDataSelective(attestation, nil, limitedCurrentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Verify PCR0 was enrolled and no other PCRs were added
|
||||
Expect(storedPCRs.PCRs["0"]).To(Equal(expectedPCR0))
|
||||
Expect(storedPCRs.PCRs).To(HaveLen(1)) // Should still only have PCR0
|
||||
|
||||
// Step 3: Now should be in enforcement mode - same PCR should pass
|
||||
err = verifyPCRValuesSelective(storedPCRs, limitedCurrentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 4: Different PCR should now fail (enforcement mode)
|
||||
differentPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "different_value",
|
||||
},
|
||||
}
|
||||
err = verifyPCRValuesSelective(storedPCRs, differentPCRs, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("PCR0 changed"))
|
||||
})
|
||||
})
|
||||
|
||||
When("stored PCR values are set (enforcement mode)", func() {
|
||||
It("should enforce exact match for set values", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456", // Matches current
|
||||
"7": "different_value", // Different from current
|
||||
"11": "mno345pqr678", // Matches current
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("PCR7 changed"))
|
||||
})
|
||||
|
||||
It("should pass when all set values match", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456", // Matches current
|
||||
"7": "ghi789jkl012", // Matches current
|
||||
"11": "mno345pqr678", // Matches current
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
When("PCR fields are omitted (skip verification)", func() {
|
||||
It("should skip verification for omitted PCRs entirely", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456", // Present and matches
|
||||
"7": "ghi789jkl012", // Present and matches
|
||||
// "11" is omitted entirely = skip verification
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
When("mixed selective and enforcement mode", func() {
|
||||
It("should handle combination of empty, set, and omitted PCRs", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = re-enrollment mode
|
||||
"7": "ghi789jkl012", // Set = enforcement mode (matches)
|
||||
"14": "any_value", // Set but PCR14 not in current (should fail)
|
||||
// "11" omitted = skip verification
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, currentPCRs, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("PCR14"))
|
||||
})
|
||||
})
|
||||
|
||||
When("no stored PCR values exist", func() {
|
||||
It("should accept any current PCR values", func() {
|
||||
err := verifyPCRValuesSelective(nil, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
When("no current PCR values provided", func() {
|
||||
It("should pass if no stored values either", func() {
|
||||
err := verifyPCRValuesSelective(nil, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
It("should fail if stored values expect specific PCRs", func() {
|
||||
storedPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "abc123def456",
|
||||
},
|
||||
}
|
||||
|
||||
err := verifyPCRValuesSelective(storedPCRs, nil, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("no current PCR values"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("updateAttestationData for selective enrollment", func() {
|
||||
It("should update empty fields with current values", func() {
|
||||
currentAK := &attest.AttestationParameters{
|
||||
Public: []byte("new-ak-public-key"),
|
||||
}
|
||||
currentPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "new_pcr0_value",
|
||||
"7": "new_pcr7_value",
|
||||
"11": "new_pcr11_value",
|
||||
},
|
||||
}
|
||||
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: "", // Empty = should be updated
|
||||
PCRValues: &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "", // Empty = should be updated
|
||||
"7": "fixed_pcr7_value", // Set = should NOT be updated
|
||||
"11": "", // Empty = should be updated
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := updateAttestationDataSelective(attestation, currentAK, currentPCRs, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// AK should be updated
|
||||
Expect(attestation.AKPublicKey).ToNot(BeEmpty())
|
||||
|
||||
// PCR0 should be updated (was empty)
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal("new_pcr0_value"))
|
||||
|
||||
// PCR7 should NOT be updated (was set)
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal("fixed_pcr7_value"))
|
||||
|
||||
// PCR11 should be updated (was empty)
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal("new_pcr11_value"))
|
||||
})
|
||||
|
||||
It("should demonstrate AK re-enrollment workflow", func() {
|
||||
// Step 1: Start with empty AK (re-enrollment mode)
|
||||
originalAK := ""
|
||||
attestation := &keyserverv1alpha1.AttestationSpec{
|
||||
AKPublicKey: originalAK, // Empty = re-enrollment mode
|
||||
}
|
||||
|
||||
// Step 2: Current AK from client
|
||||
currentAK := &attest.AttestationParameters{
|
||||
Public: []byte("client-provided-ak-key"),
|
||||
}
|
||||
|
||||
// Step 3: Verification should pass (empty stored AK accepts any)
|
||||
sealedVolume := &keyserverv1alpha1.SealedVolume{
|
||||
Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
Attestation: attestation,
|
||||
},
|
||||
}
|
||||
err := verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 4: Update should store the new AK (this is the re-enrollment)
|
||||
err = updateAttestationDataSelective(attestation, currentAK, nil, logger)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Step 5: Verify the AK was actually enrolled (stored)
|
||||
Expect(attestation.AKPublicKey).ToNot(BeEmpty())
|
||||
Expect(attestation.AKPublicKey).ToNot(Equal(originalAK))
|
||||
|
||||
// Step 6: Future verification should now require exact match
|
||||
err = verifyAKMatchSelective(sealedVolume, currentAK, logger)
|
||||
Expect(err).To(BeNil()) // Should still pass with same AK
|
||||
|
||||
// Step 7: Different AK should now fail (enforcement mode)
|
||||
differentAK := &attest.AttestationParameters{
|
||||
Public: []byte("different-ak-key"),
|
||||
}
|
||||
err = verifyAKMatchSelective(sealedVolume, differentAK, logger)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("AK public key does not match"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Initial TOFU Enrollment behavior", func() {
|
||||
It("should store ALL provided PCRs during initial enrollment", func() {
|
||||
clientPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"0": "pcr0_value",
|
||||
"1": "pcr1_value",
|
||||
"2": "pcr2_value",
|
||||
"7": "pcr7_value",
|
||||
"11": "pcr11_value",
|
||||
"14": "pcr14_value",
|
||||
},
|
||||
}
|
||||
|
||||
attestation := createInitialTOFUAttestation(nil, clientPCRs, logger)
|
||||
|
||||
// All provided PCRs should be stored
|
||||
Expect(attestation.PCRValues).ToNot(BeNil())
|
||||
Expect(attestation.PCRValues.PCRs).To(HaveLen(6))
|
||||
Expect(attestation.PCRValues.PCRs["0"]).To(Equal("pcr0_value"))
|
||||
Expect(attestation.PCRValues.PCRs["1"]).To(Equal("pcr1_value"))
|
||||
Expect(attestation.PCRValues.PCRs["2"]).To(Equal("pcr2_value"))
|
||||
Expect(attestation.PCRValues.PCRs["7"]).To(Equal("pcr7_value"))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal("pcr11_value"))
|
||||
Expect(attestation.PCRValues.PCRs["14"]).To(Equal("pcr14_value"))
|
||||
})
|
||||
|
||||
It("should not filter or omit any PCRs during TOFU", func() {
|
||||
// Test that even "sensitive" PCRs like PCR11 are stored
|
||||
clientPCRs := &keyserverv1alpha1.PCRValues{
|
||||
PCRs: map[string]string{
|
||||
"11": "kernel_pcr_value", // Previously filtered out
|
||||
"12": "other_pcr_value",
|
||||
},
|
||||
}
|
||||
|
||||
attestation := createInitialTOFUAttestation(nil, clientPCRs, logger)
|
||||
|
||||
Expect(attestation.PCRValues.PCRs).To(HaveKey("11"))
|
||||
Expect(attestation.PCRValues.PCRs).To(HaveKey("12"))
|
||||
Expect(attestation.PCRValues.PCRs["11"]).To(Equal("kernel_pcr_value"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("handleTPMAttestation functions", func() {
|
||||
Describe("establishAttestationConnection", func() {
|
||||
var mockResponseWriter *httptest.ResponseRecorder
|
||||
var mockRequest *http.Request
|
||||
var logger logr.Logger
|
||||
|
||||
BeforeEach(func() {
|
||||
logger = logr.Discard()
|
||||
mockResponseWriter = httptest.NewRecorder()
|
||||
mockRequest = httptest.NewRequest("GET", "/test", nil)
|
||||
|
||||
// Set partition headers
|
||||
mockRequest.Header.Set("label", "COS_PERSISTENT")
|
||||
mockRequest.Header.Set("name", "/dev/sda1")
|
||||
mockRequest.Header.Set("uuid", "test-uuid-123")
|
||||
})
|
||||
|
||||
It("should return error when WebSocket upgrade fails", func() {
|
||||
// This test checks the error behavior when WebSocket upgrade fails
|
||||
conn, partition, err := establishAttestationConnection(mockResponseWriter, mockRequest, logger)
|
||||
|
||||
// WebSocket upgrade should fail with regular HTTP request
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("upgrade"))
|
||||
Expect(conn).To(BeNil())
|
||||
|
||||
// When upgrade fails, partition info is not extracted (function returns early)
|
||||
Expect(partition.Label).To(Equal(""))
|
||||
Expect(partition.DeviceName).To(Equal(""))
|
||||
Expect(partition.UUID).To(Equal(""))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
func volumeListWithPartitionSpec(partitionSpec keyserverv1alpha1.PartitionSpec) *keyserverv1alpha1.SealedVolumeList {
|
||||
@@ -151,3 +618,25 @@ func volumeListWithPartitionSpec(partitionSpec keyserverv1alpha1.PartitionSpec)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func volumeListWithAttestationSpec(tpmHash string, attestation *keyserverv1alpha1.AttestationSpec) *keyserverv1alpha1.SealedVolumeList {
|
||||
return &keyserverv1alpha1.SealedVolumeList{
|
||||
Items: []keyserverv1alpha1.SealedVolume{
|
||||
{Spec: keyserverv1alpha1.SealedVolumeSpec{
|
||||
TPMHash: tpmHash,
|
||||
Partitions: []keyserverv1alpha1.PartitionSpec{
|
||||
{
|
||||
Label: "COS_PERSISTENT",
|
||||
Secret: &keyserverv1alpha1.SecretSpec{
|
||||
Name: "test-secret",
|
||||
Path: "pass",
|
||||
},
|
||||
},
|
||||
},
|
||||
Quarantined: false,
|
||||
Attestation: attestation,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@@ -2,3 +2,4 @@ package constants
|
||||
|
||||
const TPMSecret = "tpm"
|
||||
const GeneratedByKey = "generated_by"
|
||||
const AKBlobFile = "/etc/kairos/ak.blob"
|
||||
|
451
tests/advanced_scenarios_test.go
Normal file
451
tests/advanced_scenarios_test.go
Normal file
@@ -0,0 +1,451 @@
|
||||
package e2e_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
. "github.com/spectrocloud/peg/matcher"
|
||||
)
|
||||
|
||||
// Advanced scenarios that test complex operational workflows,
|
||||
// performance aspects, and edge cases
|
||||
|
||||
var _ = Describe("Advanced Scenarios E2E Tests", func() {
|
||||
var config string
|
||||
var vmOpts VMOptions
|
||||
var expectedInstallationSuccess bool
|
||||
var testVM VM
|
||||
var tpmHash string
|
||||
|
||||
// VM lifecycle management for reuse optimization
|
||||
var vmInitialized bool
|
||||
|
||||
BeforeEach(func() {
|
||||
expectedInstallationSuccess = true
|
||||
vmOpts = DefaultVMOptions()
|
||||
vmInitialized = false
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if vmInitialized {
|
||||
testVM.GatherLog("/run/immucore/immucore.log")
|
||||
}
|
||||
})
|
||||
|
||||
// Local helper functions using common suite functions
|
||||
ensureVMRunning := func() {
|
||||
if !vmInitialized {
|
||||
By("Starting VM for advanced scenarios tests")
|
||||
_, testVM = startVM(vmOpts)
|
||||
fmt.Printf("\nadvanced scenarios VM.StateDir = %+v\n", testVM.StateDir)
|
||||
testVM.EventuallyConnects(1200)
|
||||
vmInitialized = true
|
||||
}
|
||||
}
|
||||
|
||||
installKairosWithConfig := func(config string) {
|
||||
installKairosWithConfigAdvanced(testVM, config, expectedInstallationSuccess)
|
||||
}
|
||||
|
||||
// Cleanup VM at the very end
|
||||
var _ = AfterSuite(func() {
|
||||
if vmInitialized {
|
||||
cleanupVM(testVM)
|
||||
}
|
||||
})
|
||||
|
||||
When("Testing Multi-Partition Support", Label("remote-multi-partition"), func() {
|
||||
It("should handle multiple partitions on same TPM with different encryption keys", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Get TPM hash
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Step 2: Create SealedVolume with multiple partitions
|
||||
createMultiPartitionSealedVolume(tpmHash, []string{"COS_PERSISTENT", "COS_OEM"})
|
||||
|
||||
// Step 3: Configure Kairos with multiple encrypted partitions
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
- COS_OEM
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
|
||||
// Step 4: Verify both partitions are encrypted
|
||||
By("Verifying both partitions are encrypted")
|
||||
out, err := testVM.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"oem\""), out)
|
||||
|
||||
// Step 5: Verify separate secrets were created for each partition
|
||||
By("Verifying separate secrets were created for each partition")
|
||||
Eventually(func() bool {
|
||||
return secretExistsInNamespace(fmt.Sprintf("%s-cos-persistent", tpmHash), "default") &&
|
||||
secretExistsInNamespace(fmt.Sprintf("%s-cos-oem", tpmHash), "default")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Namespace Isolation", Label("remote-namespace-isolation"), func() {
|
||||
It("should properly isolate SealedVolumes in different namespaces", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Get TPM hash
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Step 2: Create SealedVolumes in different namespaces
|
||||
createSealedVolumeInNamespace(tpmHash, "test-ns-1")
|
||||
createSealedVolumeInNamespace(tpmHash, "test-ns-2")
|
||||
|
||||
// Step 3: Initial setup with default namespace
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
|
||||
// Should fail initially because no SealedVolume in default namespace (test via CLI)
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
|
||||
// Step 4: Create SealedVolume in default namespace
|
||||
By("Creating SealedVolume in default namespace")
|
||||
createSealedVolumeInNamespace(tpmHash, "default")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should now work via CLI
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
|
||||
// Step 5: Verify secrets are created in appropriate namespaces
|
||||
By("Verifying namespace isolation of secrets")
|
||||
Eventually(func() bool {
|
||||
return secretExistsInNamespace(fmt.Sprintf("%s-cos-persistent", tpmHash), "default")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Secrets should not cross namespace boundaries
|
||||
Expect(secretExistsInNamespace(fmt.Sprintf("%s-cos-persistent", tpmHash), "test-ns-1")).To(BeFalse())
|
||||
Expect(secretExistsInNamespace(fmt.Sprintf("%s-cos-persistent", tpmHash), "test-ns-2")).To(BeFalse())
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Network Resilience", Label("remote-network-resilience"), func() {
|
||||
It("should handle network interruptions gracefully", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Initial setup
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Create SealedVolume for enrollment
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false`, tpmHash, tpmHash))
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
timeout: 30s
|
||||
retry_attempts: 3
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 2: Simulate network interruption during boot
|
||||
By("Testing resilience to temporary network outage")
|
||||
|
||||
// We can't easily simulate network interruption in the current test setup,
|
||||
// but we can verify the timeout and retry configuration works by checking logs
|
||||
out, err := testVM.Sudo("journalctl -u kcrypt* --no-pager")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Should see evidence of successful KMS communication
|
||||
Expect(out).To(ContainSubstring("kcrypt"))
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Performance Under Load", Label("remote-performance"), func() {
|
||||
It("should handle multiple concurrent authentication requests", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Setup multiple encrypted partitions to test concurrent access
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
createMultiPartitionSealedVolume(tpmHash, []string{"COS_PERSISTENT", "COS_OEM"})
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
- COS_OEM
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
|
||||
// Step 2: Verify both partitions were decrypted successfully
|
||||
By("Verifying concurrent partition decryption")
|
||||
out, err := testVM.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"oem\""), out)
|
||||
Expect(out).To(MatchRegexp("/dev/mapper.*LABEL=\"COS_PERSISTENT\""), out)
|
||||
Expect(out).To(MatchRegexp("/dev/mapper.*LABEL=\"COS_OEM\""), out)
|
||||
|
||||
// Step 3: Test multiple rapid reboots to stress test the system
|
||||
By("Testing system stability under multiple rapid authentication cycles")
|
||||
for i := 0; i < 3; i++ {
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
time.Sleep(2 * time.Second) // Brief pause between cycles
|
||||
}
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Large PCR Configuration", Label("remote-large-pcr"), func() {
|
||||
It("should handle attestation with many PCRs", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Create SealedVolume with extensive PCR configuration
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Create complex PCR configuration
|
||||
sealedVolumeYaml := fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false
|
||||
attestation:
|
||||
pcrValues:
|
||||
pcrs:
|
||||
"0": "" # BIOS/UEFI - re-enroll
|
||||
"1": "" # Platform Configuration - re-enroll
|
||||
"2": "" # Option ROM Code - re-enroll
|
||||
"3": "" # Option ROM Configuration - re-enroll
|
||||
"4": "" # MBR/GPT - re-enroll
|
||||
"5": "" # Boot Manager - re-enroll
|
||||
"6": "" # Platform State - re-enroll
|
||||
"7": "" # Secure Boot State - re-enroll
|
||||
"8": "" # Command Line - re-enroll
|
||||
"9": "" # initrd - re-enroll
|
||||
"10": "" # IMA - re-enroll
|
||||
# PCR 11 omitted - will be ignored
|
||||
"12": "" # Kernel Command Line - re-enroll
|
||||
"13": "" # sysvinit - re-enroll
|
||||
"14": "" # systemd - re-enroll
|
||||
"15": "" # System Integrity - re-enroll`, tpmHash, tpmHash)
|
||||
|
||||
kubectlApplyYaml(sealedVolumeYaml)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 2: Verify that many PCRs were successfully enrolled
|
||||
By("Verifying extensive PCR enrollment")
|
||||
Eventually(func() int {
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", tpmHash, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Count non-empty PCR values
|
||||
lines := strings.Split(string(out), "\n")
|
||||
enrolledPCRs := 0
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, "\":") &&
|
||||
!strings.Contains(line, "\": \"\"") &&
|
||||
strings.Contains(line, "\"") {
|
||||
enrolledPCRs++
|
||||
}
|
||||
}
|
||||
return enrolledPCRs
|
||||
}, 60*time.Second, 10*time.Second).Should(BeNumerically(">=", 10))
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Resource Cleanup", Label("remote-cleanup"), func() {
|
||||
It("should properly cleanup resources when SealedVolumes are deleted", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Create and verify initial setup
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false`, tpmHash, tpmHash))
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 2: Verify secret was created
|
||||
secretName := fmt.Sprintf("%s-cos-persistent", tpmHash)
|
||||
Eventually(func() bool {
|
||||
return secretExistsInNamespace(secretName, "default")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Step 3: Delete SealedVolume and verify orphaned secret handling
|
||||
By("Testing resource cleanup after SealedVolume deletion")
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Secret should still exist (policy decision - secrets are not auto-deleted)
|
||||
Expect(secretExistsInNamespace(secretName, "default")).To(BeTrue())
|
||||
|
||||
// Step 4: Try to retrieve passphrase without SealedVolume (should fail)
|
||||
By("Testing passphrase retrieval after SealedVolume deletion")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should fail to get passphrase without SealedVolume
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
|
||||
// Step 5: Manual secret cleanup for test hygiene
|
||||
cmd := exec.Command("kubectl", "delete", "secret", secretName, "--ignore-not-found=true")
|
||||
cmd.CombinedOutput()
|
||||
|
||||
})
|
||||
})
|
||||
})
|
@@ -374,19 +374,6 @@ func printInstallationOutput(message string, callerSkip ...int) {
|
||||
Fail(message, callerSkip[0]+1)
|
||||
}
|
||||
|
||||
func kubectlApplyYaml(yamlData string) {
|
||||
yamlFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(yamlFile.Name())
|
||||
|
||||
err = os.WriteFile(yamlFile.Name(), []byte(yamlData), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
cmd := exec.Command("kubectl", "apply", "-f", yamlFile.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
}
|
||||
|
||||
func getChallengerServerCert() string {
|
||||
cmd := exec.Command(
|
||||
"kubectl", "get", "secret", "-n", "default", "kms-tls",
|
||||
|
485
tests/selective_enrollment_test.go
Normal file
485
tests/selective_enrollment_test.go
Normal file
@@ -0,0 +1,485 @@
|
||||
package e2e_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
. "github.com/spectrocloud/peg/matcher"
|
||||
)
|
||||
|
||||
// These tests focus on selective enrollment scenarios and VM reuse optimization
|
||||
// Instead of spinning up a new VM for each test case, we reuse VMs across
|
||||
// sequential scenarios to reduce test execution time.
|
||||
|
||||
var _ = Describe("Selective Enrollment E2E Tests", func() {
|
||||
var config string
|
||||
var vmOpts VMOptions
|
||||
var expectedInstallationSuccess bool
|
||||
var testVM VM
|
||||
var tpmHash string
|
||||
|
||||
// VM lifecycle management for reuse optimization
|
||||
var vmInitialized bool
|
||||
|
||||
BeforeEach(func() {
|
||||
expectedInstallationSuccess = true
|
||||
vmOpts = DefaultVMOptions()
|
||||
vmInitialized = false
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if vmInitialized {
|
||||
testVM.GatherLog("/run/immucore/immucore.log")
|
||||
}
|
||||
})
|
||||
|
||||
// Local helper functions using common suite functions
|
||||
ensureVMRunning := func() {
|
||||
if !vmInitialized {
|
||||
By("Starting VM for selective enrollment tests")
|
||||
_, testVM = startVM(vmOpts)
|
||||
fmt.Printf("\nselective enrollment VM.StateDir = %+v\n", testVM.StateDir)
|
||||
testVM.EventuallyConnects(1200)
|
||||
vmInitialized = true
|
||||
}
|
||||
}
|
||||
|
||||
installKairosWithConfig := func(config string) {
|
||||
installKairosWithConfigAdvanced(testVM, config, expectedInstallationSuccess)
|
||||
}
|
||||
|
||||
// Cleanup VM at the very end
|
||||
var _ = AfterSuite(func() {
|
||||
if vmInitialized {
|
||||
cleanupVM(testVM)
|
||||
}
|
||||
})
|
||||
|
||||
When("Testing Pure TOFU Enrollment Flow", Label("remote-tofu"), func() {
|
||||
It("should perform complete TOFU enrollment and subsequent successful authentications", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Get TPM hash but don't create any SealedVolume (pure TOFU)
|
||||
tpmHash = getTPMHash(testVM)
|
||||
|
||||
// Ensure no pre-existing SealedVolume
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Step 2: Configure Kairos for remote KMS without pre-created SealedVolume
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 3: Verify SealedVolume was auto-created with TOFU enrollment
|
||||
By("Verifying SealedVolume was auto-created with attestation data")
|
||||
Eventually(func() bool {
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", tpmHash, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Check that attestation data was populated (not empty)
|
||||
return strings.Contains(string(out), "attestation:") &&
|
||||
strings.Contains(string(out), "ekPublicKey:") &&
|
||||
strings.Contains(string(out), "akPublicKey:")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Step 4: Verify secret was created
|
||||
By("Verifying encryption secret was auto-generated")
|
||||
Eventually(func() bool {
|
||||
return secretExists(fmt.Sprintf("%s-cos-persistent", tpmHash))
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Step 5: Test subsequent authentication works
|
||||
By("Testing subsequent authentication with learned attestation data")
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Quarantine Management", Label("remote-quarantine"), func() {
|
||||
It("should handle quarantine, rejection, and recovery flows using the same VM", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Initial enrollment
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash) // Ensure clean state
|
||||
|
||||
// Create SealedVolume for TOFU enrollment
|
||||
createSealedVolumeWithAttestation(tpmHash, nil)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 2: Quarantine the TPM
|
||||
quarantineTPM(tpmHash)
|
||||
|
||||
// Give some time for the change to propagate
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Step 3: Verify quarantined TPM is rejected via CLI (no risky reboot)
|
||||
By("Testing that quarantined TPM is rejected via CLI")
|
||||
|
||||
// Give some time for quarantine to propagate
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should fail to retrieve passphrase when quarantined
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
|
||||
// Step 4: Test recovery by unquarantining
|
||||
By("Testing recovery by unquarantining TPM")
|
||||
unquarantineTPM(tpmHash)
|
||||
|
||||
// Give some time for the change to propagate
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should now be able to retrieve passphrase again
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing PCR Management Scenarios", Label("remote-pcr-mgmt"), func() {
|
||||
It("should handle PCR re-enrollment, omission, and mixed states using the same VM", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Initial enrollment with specific PCR enforcement
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Create SealedVolume with specific PCR values enforced
|
||||
attestationConfig := map[string]interface{}{
|
||||
"pcrValues": map[string]string{
|
||||
"0": "specific-pcr0-value", // Will be enforced
|
||||
"7": "", // Will be re-enrolled
|
||||
// PCR 11 omitted - will be ignored
|
||||
},
|
||||
}
|
||||
createSealedVolumeWithAttestation(tpmHash, attestationConfig)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 2: Verify PCR 7 was re-enrolled (updated from empty to actual value)
|
||||
By("Verifying PCR 7 was re-enrolled with actual value")
|
||||
Eventually(func() bool {
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", tpmHash, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// PCR 7 should now have a non-empty value
|
||||
return strings.Contains(string(out), "\"7\":") &&
|
||||
!strings.Contains(string(out), "\"7\": \"\"")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Step 3: Test PCR enforcement by changing enforced PCR (should fail via CLI)
|
||||
By("Testing PCR enforcement by modifying enforced PCR 0")
|
||||
updateSealedVolumeAttestation(tpmHash, "pcrValues.pcrs.0", "wrong-pcr0-value")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should fail to retrieve passphrase with wrong PCR value
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
|
||||
// Step 4: Test PCR re-enrollment by setting to empty
|
||||
By("Testing PCR re-enrollment by setting PCR 0 to empty")
|
||||
updateSealedVolumeAttestation(tpmHash, "pcrValues.pcrs.0", "")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should now re-enroll and work via CLI
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
|
||||
// Step 5: Verify PCR 0 was re-enrolled with new value
|
||||
By("Verifying PCR 0 was re-enrolled with current value")
|
||||
Eventually(func() bool {
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", tpmHash, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// PCR 0 should now have a new non-empty value
|
||||
return strings.Contains(string(out), "\"0\":") &&
|
||||
!strings.Contains(string(out), "\"0\": \"\"") &&
|
||||
!strings.Contains(string(out), "\"0\": \"wrong-pcr0-value\"")
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing AK Management", Label("remote-ak-mgmt"), func() {
|
||||
It("should handle AK re-enrollment and enforcement using the same VM", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Initial enrollment with AK re-enrollment mode
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Create SealedVolume with empty AK (re-enrollment mode)
|
||||
attestationConfig := map[string]interface{}{
|
||||
"akPublicKey": "", // Will be re-enrolled
|
||||
"ekPublicKey": "", // Will be re-enrolled
|
||||
}
|
||||
createSealedVolumeWithAttestation(tpmHash, attestationConfig)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 2: Verify AK and EK were re-enrolled
|
||||
By("Verifying AK and EK were re-enrolled with actual values")
|
||||
var learnedAK, learnedEK string
|
||||
Eventually(func() bool {
|
||||
cmd := exec.Command("kubectl", "get", "sealedvolume", tpmHash, "-o", "yaml")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract learned AK and EK for later enforcement test
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, "akPublicKey:") && !strings.Contains(line, "akPublicKey: \"\"") {
|
||||
parts := strings.Split(line, "akPublicKey:")
|
||||
if len(parts) > 1 {
|
||||
learnedAK = strings.TrimSpace(strings.Trim(parts[1], "\""))
|
||||
}
|
||||
}
|
||||
if strings.Contains(line, "ekPublicKey:") && !strings.Contains(line, "ekPublicKey: \"\"") {
|
||||
parts := strings.Split(line, "ekPublicKey:")
|
||||
if len(parts) > 1 {
|
||||
learnedEK = strings.TrimSpace(strings.Trim(parts[1], "\""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return learnedAK != "" && learnedEK != ""
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Step 3: Test AK enforcement by setting wrong AK
|
||||
By("Testing AK enforcement by setting wrong AK value")
|
||||
updateSealedVolumeAttestation(tpmHash, "akPublicKey", "wrong-ak-value")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should fail to retrieve passphrase with wrong AK
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
|
||||
// Step 4: Restore correct AK and verify it works via CLI
|
||||
By("Restoring correct AK and verifying authentication works")
|
||||
updateSealedVolumeAttestation(tpmHash, "akPublicKey", learnedAK)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Should now work with correct AK
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Secret Reuse Scenarios", Label("remote-secret-reuse"), func() {
|
||||
It("should reuse existing secrets when SealedVolume is recreated", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Initial enrollment to create secret
|
||||
tpmHash = getTPMHash(testVM)
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
createSealedVolumeWithAttestation(tpmHash, nil)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 2: Get the generated secret
|
||||
secretName := fmt.Sprintf("%s-cos-persistent", tpmHash)
|
||||
Eventually(func() bool {
|
||||
return secretExists(secretName)
|
||||
}, 30*time.Second, 5*time.Second).Should(BeTrue())
|
||||
|
||||
// Get secret data for comparison
|
||||
cmd := exec.Command("kubectl", "get", "secret", secretName, "-o", "yaml")
|
||||
originalSecretData, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Step 3: Delete SealedVolume but keep secret
|
||||
deleteSealedVolume(tpmHash)
|
||||
|
||||
// Verify secret still exists
|
||||
Expect(secretExists(secretName)).To(BeTrue())
|
||||
|
||||
// Step 4: Recreate SealedVolume and verify secret reuse
|
||||
By("Recreating SealedVolume and verifying secret reuse")
|
||||
createSealedVolumeWithAttestation(tpmHash, nil)
|
||||
|
||||
// Should reuse existing secret
|
||||
rebootAndConnect(testVM)
|
||||
verifyEncryptedPartition(testVM)
|
||||
|
||||
// Step 5: Verify the same secret is being used
|
||||
cmd = exec.Command("kubectl", "get", "secret", secretName, "-o", "yaml")
|
||||
newSecretData, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The secret data should be identical (reused, not regenerated)
|
||||
Expect(string(newSecretData)).To(Equal(string(originalSecretData)))
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
|
||||
When("Testing Error Handling and Edge Cases", Label("remote-edge-cases"), func() {
|
||||
It("should handle various error conditions properly", func() {
|
||||
ensureVMRunning()
|
||||
|
||||
// Step 1: Test invalid TPM hash rejection
|
||||
By("Testing invalid TPM hash rejection")
|
||||
invalidHash := "invalid-tpm-hash-12345"
|
||||
createSealedVolumeWithAttestation(invalidHash, nil)
|
||||
|
||||
config = fmt.Sprintf(`#cloud-config
|
||||
|
||||
hostname: metal-{{ trunc 4 .MachineID }}
|
||||
users:
|
||||
- name: kairos
|
||||
passwd: kairos
|
||||
|
||||
install:
|
||||
encrypted_partitions:
|
||||
- COS_PERSISTENT
|
||||
grub_options:
|
||||
extra_cmdline: "rd.neednet=1"
|
||||
reboot: false
|
||||
|
||||
kcrypt:
|
||||
challenger:
|
||||
challenger_server: "http://%s"
|
||||
`, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
installKairosWithConfig(config)
|
||||
|
||||
// Should fail due to TPM hash mismatch (test via CLI, no risky reboot)
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", false)
|
||||
|
||||
// Cleanup invalid SealedVolume
|
||||
deleteSealedVolume(invalidHash)
|
||||
|
||||
// Step 2: Test with correct TPM hash to verify system works
|
||||
tpmHash = getTPMHash(testVM)
|
||||
createSealedVolumeWithAttestation(tpmHash, nil)
|
||||
|
||||
// Test with correct hash should work
|
||||
expectPassphraseRetrieval(testVM, "COS_PERSISTENT", true)
|
||||
|
||||
cleanupTestResources(tpmHash)
|
||||
})
|
||||
})
|
||||
})
|
@@ -8,6 +8,8 @@ import (
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -232,3 +234,300 @@ func getFreePort() (port int, err error) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// Common Test Helper Functions
|
||||
// ========================================
|
||||
|
||||
// Helper to install Kairos with given config
|
||||
func installKairosWithConfig(vm VM, config string) {
|
||||
configFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(configFile.Name())
|
||||
|
||||
err = os.WriteFile(configFile.Name(), []byte(config), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = vm.Scp(configFile.Name(), "config.yaml", "0744")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
By("Installing Kairos with config")
|
||||
installationOutput, err := vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
Expect(err).ToNot(HaveOccurred(), installationOutput)
|
||||
}
|
||||
|
||||
// Helper to reboot and wait for connection
|
||||
func rebootAndConnect(vm VM) {
|
||||
By("Rebooting VM")
|
||||
vm.Reboot()
|
||||
By("Waiting for VM to be connectable")
|
||||
vm.EventuallyConnects(1200)
|
||||
}
|
||||
|
||||
// Helper to verify encrypted partition exists
|
||||
func verifyEncryptedPartition(vm VM) {
|
||||
By("Verifying encrypted partition exists")
|
||||
out, err := vm.Sudo("blkid")
|
||||
Expect(err).ToNot(HaveOccurred(), out)
|
||||
Expect(out).To(MatchRegexp("TYPE=\"crypto_LUKS\" PARTLABEL=\"persistent\""), out)
|
||||
Expect(out).To(MatchRegexp("/dev/mapper.*LABEL=\"COS_PERSISTENT\""), out)
|
||||
}
|
||||
|
||||
// Helper to get TPM hash from VM
|
||||
func getTPMHash(vm VM) string {
|
||||
By("Getting TPM hash from VM")
|
||||
hash, err := vm.Sudo("/system/discovery/kcrypt-discovery-challenger")
|
||||
Expect(err).ToNot(HaveOccurred(), hash)
|
||||
return strings.TrimSpace(hash)
|
||||
}
|
||||
|
||||
// Helper to test passphrase retrieval via CLI (returns true if successful, false if failed)
|
||||
func checkPassphraseRetrieval(vm VM, partitionLabel string) bool {
|
||||
By(fmt.Sprintf("Testing passphrase retrieval for partition %s via CLI", partitionLabel))
|
||||
|
||||
// Configure the CLI to use the challenger server
|
||||
cliCmd := fmt.Sprintf(`/system/discovery/kcrypt-discovery-challenger get \
|
||||
--partition-label=%s \
|
||||
--challenger-server="http://%s" \
|
||||
2>/dev/null`, partitionLabel, os.Getenv("KMS_ADDRESS"))
|
||||
|
||||
out, err := vm.Sudo(cliCmd)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Passphrase retrieval failed: %v", err))
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if we got a passphrase (non-empty output)
|
||||
passphrase := strings.TrimSpace(out)
|
||||
success := len(passphrase) > 0
|
||||
|
||||
if success {
|
||||
By("Passphrase retrieval successful")
|
||||
} else {
|
||||
By("Passphrase retrieval failed - empty response")
|
||||
}
|
||||
|
||||
return success
|
||||
}
|
||||
|
||||
// Helper to test passphrase retrieval with expectation (for cleaner test logic)
|
||||
func expectPassphraseRetrieval(vm VM, partitionLabel string, shouldSucceed bool) {
|
||||
success := checkPassphraseRetrieval(vm, partitionLabel)
|
||||
if shouldSucceed {
|
||||
Expect(success).To(BeTrue(), "Passphrase retrieval should have succeeded")
|
||||
} else {
|
||||
Expect(success).To(BeFalse(), "Passphrase retrieval should have failed")
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to create SealedVolume with specific attestation configuration
|
||||
func createSealedVolumeWithAttestation(tpmHash string, attestationConfig map[string]interface{}) {
|
||||
sealedVolumeYaml := fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false`, tpmHash, tpmHash)
|
||||
|
||||
if attestationConfig != nil {
|
||||
sealedVolumeYaml += "\n attestation:"
|
||||
for key, value := range attestationConfig {
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
sealedVolumeYaml += fmt.Sprintf("\n %s: \"%s\"", key, v)
|
||||
case map[string]string:
|
||||
sealedVolumeYaml += fmt.Sprintf("\n %s:", key)
|
||||
for k, val := range v {
|
||||
sealedVolumeYaml += "\n pcrs:"
|
||||
sealedVolumeYaml += fmt.Sprintf("\n \"%s\": \"%s\"", k, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating SealedVolume with attestation config: %+v", attestationConfig))
|
||||
kubectlApplyYaml(sealedVolumeYaml)
|
||||
}
|
||||
|
||||
// Helper to update SealedVolume attestation configuration
|
||||
func updateSealedVolumeAttestation(tpmHashParam string, field, value string) {
|
||||
By(fmt.Sprintf("Updating SealedVolume %s field %s to %s", tpmHashParam, field, value))
|
||||
patch := fmt.Sprintf(`{"spec":{"attestation":{"%s":"%s"}}}`, field, value)
|
||||
cmd := exec.Command("kubectl", "patch", "sealedvolume", tpmHashParam, "--type=merge", "-p", patch)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to quarantine TPM
|
||||
func quarantineTPM(tpmHash string) {
|
||||
By(fmt.Sprintf("Quarantining TPM %s", tpmHash))
|
||||
patch := `{"spec":{"quarantined":true}}`
|
||||
cmd := exec.Command("kubectl", "patch", "sealedvolume", tpmHash, "--type=merge", "-p", patch)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to unquarantine TPM
|
||||
func unquarantineTPM(tpmHashParam string) {
|
||||
By(fmt.Sprintf("Unquarantining TPM %s", tpmHashParam))
|
||||
patch := `{"spec":{"quarantined":false}}`
|
||||
cmd := exec.Command("kubectl", "patch", "sealedvolume", tpmHashParam, "--type=merge", "-p", patch)
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to delete SealedVolume
|
||||
func deleteSealedVolume(tmpHashParam string) {
|
||||
By(fmt.Sprintf("Deleting SealedVolume %s", tmpHashParam))
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", tmpHashParam, "--ignore-not-found=true")
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to delete SealedVolume from all namespaces
|
||||
func deleteSealedVolumeAllNamespaces(tpmHashParam string) {
|
||||
By(fmt.Sprintf("Deleting SealedVolume %s from all namespaces", tpmHashParam))
|
||||
cmd := exec.Command("kubectl", "delete", "sealedvolume", tpmHashParam, "--ignore-not-found=true", "--all-namespaces")
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to check if secret exists
|
||||
func secretExists(secretName string) bool {
|
||||
cmd := exec.Command("kubectl", "get", "secret", secretName, "--ignore-not-found=true")
|
||||
out, err := cmd.CombinedOutput()
|
||||
return err == nil && len(out) > 0 && !strings.Contains(string(out), "NotFound")
|
||||
}
|
||||
|
||||
// Helper to check if secret exists in namespace
|
||||
func secretExistsInNamespace(secretName, namespace string) bool {
|
||||
cmd := exec.Command("kubectl", "get", "secret", secretName, "-n", namespace, "--ignore-not-found=true")
|
||||
out, err := cmd.CombinedOutput()
|
||||
return err == nil && len(out) > 0 && !strings.Contains(string(out), "NotFound")
|
||||
}
|
||||
|
||||
// Helper to apply YAML to Kubernetes
|
||||
func kubectlApplyYaml(yamlData string) {
|
||||
yamlFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(yamlFile.Name())
|
||||
|
||||
err = os.WriteFile(yamlFile.Name(), []byte(yamlData), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
cmd := exec.Command("kubectl", "apply", "-f", yamlFile.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
Expect(err).ToNot(HaveOccurred(), string(out))
|
||||
}
|
||||
|
||||
// Helper to create SealedVolume with multi-partition configuration
|
||||
func createMultiPartitionSealedVolume(tpmHash string, partitions []string) {
|
||||
sealedVolumeYaml := fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: default
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:`, tpmHash, tpmHash)
|
||||
|
||||
for _, partition := range partitions {
|
||||
sealedVolumeYaml += fmt.Sprintf(`
|
||||
- label: %s`, partition)
|
||||
}
|
||||
|
||||
sealedVolumeYaml += "\n quarantined: false"
|
||||
|
||||
By(fmt.Sprintf("Creating multi-partition SealedVolume for partitions: %v", partitions))
|
||||
kubectlApplyYaml(sealedVolumeYaml)
|
||||
}
|
||||
|
||||
// Helper to create SealedVolume in specific namespace
|
||||
func createSealedVolumeInNamespace(tpmHash, namespace string) {
|
||||
// First create the namespace if it doesn't exist
|
||||
kubectlApplyYaml(fmt.Sprintf(`---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: %s`, namespace))
|
||||
|
||||
sealedVolumeYaml := fmt.Sprintf(`---
|
||||
apiVersion: keyserver.kairos.io/v1alpha1
|
||||
kind: SealedVolume
|
||||
metadata:
|
||||
name: "%s"
|
||||
namespace: %s
|
||||
spec:
|
||||
TPMHash: "%s"
|
||||
partitions:
|
||||
- label: COS_PERSISTENT
|
||||
quarantined: false`, tpmHash, namespace, tpmHash)
|
||||
|
||||
By(fmt.Sprintf("Creating SealedVolume in namespace %s", namespace))
|
||||
kubectlApplyYaml(sealedVolumeYaml)
|
||||
}
|
||||
|
||||
// Helper to cleanup test resources
|
||||
func cleanupTestResources(tpmHash string) {
|
||||
if tpmHash != "" {
|
||||
deleteSealedVolumeAllNamespaces(tpmHash)
|
||||
|
||||
// Cleanup associated secrets in all namespaces
|
||||
cmd := exec.Command("kubectl", "delete", "secret", tpmHash, "--ignore-not-found=true", "--all-namespaces")
|
||||
cmd.CombinedOutput()
|
||||
|
||||
cmd = exec.Command("kubectl", "delete", "secret", fmt.Sprintf("%s-cos-persistent", tpmHash), "--ignore-not-found=true", "--all-namespaces")
|
||||
cmd.CombinedOutput()
|
||||
|
||||
// Cleanup test namespaces
|
||||
cmd = exec.Command("kubectl", "delete", "namespace", "test-ns-1", "--ignore-not-found=true")
|
||||
cmd.CombinedOutput()
|
||||
|
||||
cmd = exec.Command("kubectl", "delete", "namespace", "test-ns-2", "--ignore-not-found=true")
|
||||
cmd.CombinedOutput()
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to install Kairos with config (handles both success and failure cases)
|
||||
func installKairosWithConfigAdvanced(vm VM, config string, expectSuccess bool) {
|
||||
configFile, err := os.CreateTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.Remove(configFile.Name())
|
||||
|
||||
err = os.WriteFile(configFile.Name(), []byte(config), 0744)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = vm.Scp(configFile.Name(), "config.yaml", "0744")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if expectSuccess {
|
||||
By("Installing Kairos with config")
|
||||
installationOutput, err := vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
Expect(err).ToNot(HaveOccurred(), installationOutput)
|
||||
} else {
|
||||
By("Installing Kairos with config (expecting failure)")
|
||||
vm.Sudo("/bin/bash -c 'set -o pipefail && kairos-agent manual-install --device auto config.yaml 2>&1 | tee manual-install.txt'")
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to cleanup VM and TPM emulator
|
||||
func cleanupVM(vm VM) {
|
||||
By("Cleaning up test VM")
|
||||
err := vm.Destroy(func(vm VM) {
|
||||
// Stop TPM emulator
|
||||
tpmPID, err := os.ReadFile(path.Join(vm.StateDir, "tpm", "pid"))
|
||||
if err == nil && len(tpmPID) != 0 {
|
||||
pid, err := strconv.Atoi(string(tpmPID))
|
||||
if err == nil {
|
||||
syscall.Kill(pid, syscall.SIGKILL)
|
||||
}
|
||||
}
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
Reference in New Issue
Block a user