Initial import

This commit is contained in:
Ettore Di Giacinto 2022-10-09 22:32:56 +00:00
parent dd50775dfa
commit 05d48347d7
55 changed files with 3849 additions and 0 deletions

4
.dockerignore Normal file
View File

@ -0,0 +1,4 @@
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore build and test binaries.
bin/
testbin/

63
.github/workflows/image.yml vendored Normal file
View File

@ -0,0 +1,63 @@
---
name: 'build container images'
on:
push:
branches:
- main
tags:
- '*'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=quay.io/kairos/kcrypt-challenger
VERSION=latest
SHORTREF=${GITHUB_SHA::8}
# If this is git tag, use the tag name as a docker tag
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
fi
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}"
# If the VERSION looks like a version number, assume that
# this is the most recent version of the image and also
# tag it 'latest'.
if [[ $VERSION =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
fi
# Set output parameters.
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@main
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@main
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_PASSWORD }}
- name: Build
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

27
.gitignore vendored Normal file
View File

@ -0,0 +1,27 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin
testbin/*
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~
/helm-chart

28
Dockerfile Normal file
View File

@ -0,0 +1,28 @@
# Build the manager binary
FROM golang:1.18 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY pkg/ pkg/
COPY controllers/ controllers/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]

260
Makefile Normal file
View File

@ -0,0 +1,260 @@
# VERSION defines the project version for the bundle.
# Update this value when you upgrade the version of your project.
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.0.1
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)
# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
endif
# DEFAULT_CHANNEL defines the default channel used in the bundle.
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
# To re-generate a bundle for any other default channel without changing the default setup, you can:
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
ifneq ($(origin DEFAULT_CHANNEL), undefined)
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.
# This variable is used to construct full image tags for bundle and catalog images.
#
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
# kairos.io/kcrypt-controller-bundle:$VERSION and kairos.io/kcrypt-controller-catalog:$VERSION.
IMAGE_TAG_BASE ?= quay.io/kairos/kcrypt-controller
# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command
BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests
# You can enable this value if you would like to use SHA Based Digests
# To enable set flag to true
USE_IMAGE_DIGESTS ?= false
ifeq ($(USE_IMAGE_DIGESTS), true)
BUNDLE_GEN_FLAGS += --use-image-digests
endif
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.24.2
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
# Setting SHELL to bash allows bash commands to be executed by recipes.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
.PHONY: all
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
.PHONY: manifests
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
.PHONY: generate
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
.PHONY: fmt
fmt: ## Run go fmt against code.
go fmt ./...
.PHONY: vet
vet: ## Run go vet against code.
go vet ./...
.PHONY: test
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
##@ Build
.PHONY: build
build: generate fmt vet ## Build manager binary.
go build -o bin/manager main.go
.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
.PHONY: docker-build
docker-build: test ## Build docker image with the manager.
docker build -t ${IMG} .
.PHONY: docker-push
docker-push: ## Push docker image with the manager.
docker push ${IMG}
##@ Deployment
ifndef ignore-not-found
ignore-not-found = true
endif
.PHONY: install
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
.PHONY: uninstall
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
##@ Build Dependencies
## Location to install dependencies to
LOCALBIN ?= $(shell pwd)/bin
$(LOCALBIN):
mkdir -p $(LOCALBIN)
## Tool Binaries
KUSTOMIZE ?= $(LOCALBIN)/kustomize
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
ENVTEST ?= $(LOCALBIN)/setup-envtest
## Tool Versions
KUSTOMIZE_VERSION ?= v3.8.7
CONTROLLER_TOOLS_VERSION ?= v0.9.2
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
.PHONY: kustomize
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
$(KUSTOMIZE): $(LOCALBIN)
test -s $(LOCALBIN)/kustomize || { curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); }
.PHONY: controller-gen
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
$(CONTROLLER_GEN): $(LOCALBIN)
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
.PHONY: envtest
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
.PHONY: bundle
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
operator-sdk generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS)
operator-sdk bundle validate ./bundle
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
.PHONY: bundle-push
bundle-push: ## Push the bundle image.
$(MAKE) docker-push IMG=$(BUNDLE_IMG)
.PHONY: opm
OPM = ./bin/opm
opm: ## Download opm locally if necessary.
ifeq (,$(wildcard $(OPM)))
ifeq (,$(shell which opm 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPM)) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\
chmod +x $(OPM) ;\
}
else
OPM = $(shell which opm)
endif
endif
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
# These images MUST exist in a registry and be pull-able.
BUNDLE_IMGS ?= $(BUNDLE_IMG)
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
ifneq ($(origin CATALOG_BASE_IMG), undefined)
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
endif
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
.PHONY: catalog-build
catalog-build: opm ## Build a catalog image.
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
# Push the catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)
CLUSTER_NAME?="kairos-challenger-e2e"
kind-setup:
kind create cluster --name ${CLUSTER_NAME} || true
$(MAKE) kind-setup-image
kind-setup-image: docker-build
kind load docker-image --name $(CLUSTER_NAME) $(IMG)
kind-prepare-tests: kind-setup install undeploy-dev deploy-dev
.PHONY: deploy-dev
deploy-dev: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/dev | kubectl apply -f -
.PHONY: undeploy-dev
undeploy-dev: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/dev | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
kubesplit: manifests kustomize
rm -rf helm-chart
mkdir helm-chart
$(KUSTOMIZE) build config/default | kubesplit -helm helm-chart

19
PROJECT Normal file
View File

@ -0,0 +1,19 @@
domain: kairos.io
layout:
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: kcrypt-controller
repo: github.com/kairos-io/kairos-challenger
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: kairos.io
group: keyserver
kind: SealedVolume
path: github.com/kairos-io/kairos-challenger/api/v1alpha1
version: v1alpha1
version: "3"

30
README.md Normal file
View File

@ -0,0 +1,30 @@
# kcrypt-challenger
| :exclamation: | This is experimental! |
|-|:-|
This is the Kairos kcrypt-challenger Kubernetes Native Extension.
To install, use helm:
```
# Adds the kairos repo to helm
$ helm repo add kairos https://kairos-io.github.io/helm-charts
"kairos" has been added to your repositories
$ helm repo update
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "kairos" chart repository
Update Complete. ⎈Happy Helming!⎈
# Install the CRD chart
$ helm install kairos-crd kairos/kairos-crds
NAME: kairos-crd
LAST DEPLOYED: Tue Sep 6 20:35:34 2022
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
# Installs challenger
$ helm install kairos-challenger kairos/challenger
```

View File

@ -0,0 +1,36 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the keyserver v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=keyserver.kairos.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "keyserver.kairos.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,68 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// SealedVolumeSpec defines the desired state of SealedVolume
type SealedVolumeSpec struct {
TPMHash string `json:"TPMHash,omitempty"`
Label string `json:"label,omitempty"`
Passphrase *SecretSpec `json:"passphraseRef,omitempty"`
Quarantined bool `json:"quarantined,omitempty"`
}
type SecretSpec struct {
Name string `json:"name,omitempty"`
Path string `json:"path,omitempty"`
}
// SealedVolumeStatus defines the observed state of SealedVolume
type SealedVolumeStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// SealedVolume is the Schema for the sealedvolumes API
type SealedVolume struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SealedVolumeSpec `json:"spec,omitempty"`
Status SealedVolumeStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// SealedVolumeList contains a list of SealedVolume
type SealedVolumeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SealedVolume `json:"items"`
}
func init() {
SchemeBuilder.Register(&SealedVolume{}, &SealedVolumeList{})
}

View File

@ -0,0 +1,135 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SealedVolume) DeepCopyInto(out *SealedVolume) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SealedVolume.
func (in *SealedVolume) DeepCopy() *SealedVolume {
if in == nil {
return nil
}
out := new(SealedVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SealedVolume) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SealedVolumeList) DeepCopyInto(out *SealedVolumeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]SealedVolume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SealedVolumeList.
func (in *SealedVolumeList) DeepCopy() *SealedVolumeList {
if in == nil {
return nil
}
out := new(SealedVolumeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SealedVolumeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SealedVolumeSpec) DeepCopyInto(out *SealedVolumeSpec) {
*out = *in
if in.Passphrase != nil {
in, out := &in.Passphrase, &out.Passphrase
*out = new(SecretSpec)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SealedVolumeSpec.
func (in *SealedVolumeSpec) DeepCopy() *SealedVolumeSpec {
if in == nil {
return nil
}
out := new(SealedVolumeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SealedVolumeStatus) DeepCopyInto(out *SealedVolumeStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SealedVolumeStatus.
func (in *SealedVolumeStatus) DeepCopy() *SealedVolumeStatus {
if in == nil {
return nil
}
out := new(SealedVolumeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretSpec) DeepCopyInto(out *SecretSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec.
func (in *SecretSpec) DeepCopy() *SecretSpec {
if in == nil {
return nil
}
out := new(SecretSpec)
in.DeepCopyInto(out)
return out
}

89
cmd/discovery/main.go Normal file
View File

@ -0,0 +1,89 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/jaypipes/ghw/pkg/block"
"github.com/kairos-io/go-tpm"
"github.com/kairos-io/kairos/pkg/machine"
"github.com/kairos-io/kcrypt/pkg/bus"
"gopkg.in/yaml.v3"
"github.com/mudler/go-pluggable"
)
func main() {
if len(os.Args) >= 2 && bus.IsEventDefined(os.Args[1]) {
checkErr(start())
}
pubhash, _ := tpm.GetPubHash()
fmt.Print(pubhash)
}
func checkErr(err error) {
if err != nil {
fmt.Println(err)
os.Exit(1)
}
os.Exit(0)
}
// echo '{ "data": "{ \\"label\\": \\"LABEL\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
func start() error {
factory := pluggable.NewPluginFactory()
connectionDetails := &struct {
Server string
}{}
var server string
d, err := machine.DotToYAML("/proc/cmdline")
if err == nil { // best-effort
yaml.Unmarshal(d, connectionDetails) //nolint:errcheck
}
server = connectionDetails.Server
if os.Getenv("WSS_SERVER") != "" {
server = os.Getenv("WSS_SERVER")
}
// Input: bus.EventInstallPayload
// Expected output: map[string]string{}
factory.Add(bus.EventDiscoveryPassword, func(e *pluggable.Event) pluggable.EventResponse {
b := &block.Partition{}
err := json.Unmarshal([]byte(e.Data), b)
if err != nil {
return pluggable.EventResponse{
Error: fmt.Sprintf("failed reading partitions: %s", err.Error()),
}
}
msg, err := tpm.Get(server, tpm.WithAdditionalHeader("label", b.Label))
if err != nil {
return pluggable.EventResponse{
Error: fmt.Sprintf("failed contacting from wss server: %s", err.Error()),
}
}
result := map[string]interface{}{}
err = json.Unmarshal(msg, &result)
if err != nil {
return pluggable.EventResponse{
Error: fmt.Sprintf("failed reading from wss server: %s", err.Error()),
}
}
p, ok := result["passphrase"]
if !ok {
return pluggable.EventResponse{
Error: "not found",
}
}
return pluggable.EventResponse{
Data: fmt.Sprint(p),
}
})
return factory.Run(pluggable.EventType(os.Args[1]), os.Stdin, os.Stdout)
}

View File

@ -0,0 +1,59 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
name: sealedvolumes.keyserver.kairos.io
spec:
group: keyserver.kairos.io
names:
kind: SealedVolume
listKind: SealedVolumeList
plural: sealedvolumes
singular: sealedvolume
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: SealedVolume is the Schema for the sealedvolumes API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: SealedVolumeSpec defines the desired state of SealedVolume
properties:
TPMHash:
type: string
label:
type: string
passphraseRef:
properties:
name:
type: string
path:
type: string
type: object
quarantined:
type: boolean
type: object
status:
description: SealedVolumeStatus defines the observed state of SealedVolume
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,21 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/keyserver.kairos.io_sealedvolumes.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_sealedvolumes.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_sealedvolumes.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,19 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

View File

@ -0,0 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: sealedvolumes.keyserver.kairos.io

View File

@ -0,0 +1,16 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: sealedvolumes.keyserver.kairos.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -0,0 +1,74 @@
# Adds namespace to all resources.
namespace: kcrypt-controller-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: kcrypt-controller-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

View File

@ -0,0 +1,39 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=0"
ports:
- containerPort: 8443
protocol: TCP
name: https
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- name: manager
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"

View File

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
args:
- "--config=controller_manager_config.yaml"
volumeMounts:
- name: manager-config
mountPath: /controller_manager_config.yaml
subPath: controller_manager_config.yaml
volumes:
- name: manager-config
configMap:
name: manager-config

View File

@ -0,0 +1,74 @@
# Adds namespace to all resources.
namespace: kcrypt-controller-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: kcrypt-controller-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
- pull.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

View File

@ -0,0 +1,39 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=0"
ports:
- containerPort: 8443
protocol: TCP
name: https
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- name: manager
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"

View File

@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
imagePullPolicy: IfNotPresent
args:
- "--config=controller_manager_config.yaml"
volumeMounts:
- name: manager-config
mountPath: /controller_manager_config.yaml
subPath: controller_manager_config.yaml
volumes:
- name: manager-config
configMap:
name: manager-config

12
config/dev/pull.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
imagePullPolicy: IfNotPresent

View File

@ -0,0 +1,21 @@
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: 8fc84668.kairos.io
# leaderElectionReleaseOnCancel defines if the leader should step down volume
# when the Manager ends. This requires the binary to immediately end when the
# Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
# speeds up voluntary leader transitions as the new leader don't have to wait
# LeaseDuration time first.
# In the default scaffold provided, the program ends immediately after
# the manager stops, so would be fine to enable this option. However,
# if you are doing or is intended to do any operation such as perform cleanups
# after the manager stops then its usage might be unsafe.
# leaderElectionReleaseOnCancel: true

View File

@ -0,0 +1,16 @@
resources:
- manager.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- files:
- controller_manager_config.yaml
name: manager-config
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: controller
newTag: latest

View File

@ -0,0 +1,85 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
spec:
selector:
matchLabels:
control-plane: controller-manager
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
control-plane: controller-manager
spec:
securityContext:
runAsNonRoot: true
# TODO(user): For common cases that do not require escalating privileges
# it is recommended to ensure that all your Pods/Containers are restrictive.
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
# seccompProfile:
# type: RuntimeDefault
containers:
- command:
- /manager
args:
- --leader-elect
image: controller:latest
name: manager
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: kcrypt-escrow-server
namespace: system
spec:
type: NodePort
ports:
- name: wss
port: 8082
protocol: TCP
targetPort: wss

View File

@ -0,0 +1,27 @@
# These resources constitute the fully configured set of manifests
# used to generate the 'manifests/' directory in a bundle.
resources:
- bases/kcrypt-controller.clusterserviceversion.yaml
- ../default
- ../samples
- ../scorecard
# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
# These patches remove the unnecessary "cert" volume and its manager container volumeMount.
#patchesJson6902:
#- target:
# group: apps
# version: v1
# kind: Deployment
# name: controller-manager
# namespace: system
# patch: |-
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/containers/1/volumeMounts/0
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/volumes/0

View File

@ -0,0 +1,2 @@
resources:
- monitor.yaml

View File

@ -0,0 +1,20 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-monitor
namespace: system
spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
control-plane: controller-manager

View File

@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
control-plane: controller-manager

View File

@ -0,0 +1,18 @@
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
# if your manager will use a service account that exists at
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
# subjects if changing service account names.
- service_account.yaml
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 4 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml

View File

@ -0,0 +1,37 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

42
config/rbac/role.yaml Normal file
View File

@ -0,0 +1,42 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- get
- list
- watch
- apiGroups:
- keyserver.kairos.io
resources:
- sealedvolumes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- keyserver.kairos.io
resources:
- sealedvolumes/finalizers
verbs:
- update
- apiGroups:
- keyserver.kairos.io
resources:
- sealedvolumes/status
verbs:
- get
- patch
- update

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@ -0,0 +1,24 @@
# permissions for end users to edit sealedvolumes.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sealedvolume-editor-role
rules:
- apiGroups:
- keyserver.kairos.io
resources:
- sealedvolumes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- keyserver.kairos.io
resources:
- sealedvolumes/status
verbs:
- get

View File

@ -0,0 +1,20 @@
# permissions for end users to view sealedvolumes.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sealedvolume-viewer-role
rules:
- apiGroups:
- keyserver.kairos.io
resources:
- sealedvolumes
verbs:
- get
- list
- watch
- apiGroups:
- keyserver.kairos.io
resources:
- sealedvolumes/status
verbs:
- get

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: controller-manager
namespace: system

View File

@ -0,0 +1,6 @@
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: sealedvolume-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,4 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- keyserver_v1alpha1_sealedvolume.yaml
#+kubebuilder:scaffold:manifestskustomizesamples

View File

@ -0,0 +1,7 @@
apiVersion: scorecard.operatorframework.io/v1alpha3
kind: Configuration
metadata:
name: config
stages:
- parallel: true
tests: []

View File

@ -0,0 +1,16 @@
resources:
- bases/config.yaml
patchesJson6902:
- path: patches/basic.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
- path: patches/olm.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
#+kubebuilder:scaffold:patchesJson6902

View File

@ -0,0 +1,10 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: basic
test: basic-check-spec-test

View File

@ -0,0 +1,50 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-bundle-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-crds-have-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-crds-have-resources-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-spec-descriptors-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.23.0
labels:
suite: olm
test: olm-status-descriptors-test

View File

@ -0,0 +1,63 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
)
// SealedVolumeReconciler reconciles a SealedVolume object
type SealedVolumeReconciler struct {
client.Client
Scheme *runtime.Scheme
}
//+kubebuilder:rbac:groups=keyserver.kairos.io,resources=sealedvolumes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=keyserver.kairos.io,resources=sealedvolumes/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=keyserver.kairos.io,resources=sealedvolumes/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=secrets,verbs=create;get;list;watch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the SealedVolume object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile
func (r *SealedVolumeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = log.FromContext(ctx)
// TODO(user): your logic here
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *SealedVolumeReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&keyserverv1alpha1.SealedVolume{}).
Complete(r)
}

83
controllers/suite_test.go Normal file
View File

@ -0,0 +1,83 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"path/filepath"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
var err error
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = keyserverv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

View File

@ -0,0 +1,22 @@
# echo '{ "data": "{ \\"label\\": \\"COS_PERSISTENT\\" }"}' | sudo -E WSS_SERVER="http://localhost:8082/challenge" ./challenger "discovery.password"
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: default
type: Opaque
stringData:
pass: "awesome-passphrase"
---
apiVersion: keyserver.kairos.io/v1alpha1
kind: SealedVolume
metadata:
name: test
namespace: default
spec:
TPMHash: "something"
label: "label"
passphraseRef:
name: mysecret
path: pass
quarantined: false

119
go.mod Normal file
View File

@ -0,0 +1,119 @@
module github.com/kairos-io/kairos-challenger
go 1.18
require (
github.com/gorilla/websocket v1.5.0
github.com/jaypipes/ghw v0.9.0
github.com/kairos-io/go-tpm v0.0.0-20221007215323-700d855876c5
github.com/kairos-io/kairos v1.1.2
github.com/kairos-io/kcrypt v0.0.0-20221006145351-cabc24dc37a7
github.com/mudler/go-pluggable v0.0.0-20220716112424-189d463e3ff3
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.20.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/apimachinery v0.24.2
k8s.io/client-go v0.24.2
sigs.k8s.io/controller-runtime v0.12.2
)
require (
cloud.google.com/go v0.93.3 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/atomicgo/cursor v0.0.1 // indirect
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denisbrodbeck/machineid v1.0.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/certificate-transparency-go v1.1.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-attestation v0.4.3 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/go-tpm v0.3.3 // indirect
github.com/google/go-tpm-tools v0.3.2 // indirect
github.com/google/go-tspi v0.2.1-0.20190423175329-115dea689aad // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/gookit/color v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/itchyny/gojq v0.12.8 // indirect
github.com/itchyny/timefmt-go v0.1.3 // indirect
github.com/joho/godotenv v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/pterm/pterm v0.12.41 // indirect
github.com/qeesung/image2ascii v1.0.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/wayneashleyberry/terminal-dimensions v1.1.0 // indirect
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
golang.org/x/net v0.0.0-20220630215102-69896b714898 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 // indirect
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
howett.net/plist v1.0.0 // indirect
k8s.io/api v0.24.2 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/component-base v0.24.2 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

1621
go.sum Normal file

File diff suppressed because it is too large Load Diff

15
hack/boilerplate.go.txt Normal file
View File

@ -0,0 +1,15 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

130
main.go Normal file
View File

@ -0,0 +1,130 @@
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"os"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
"github.com/kairos-io/kairos-challenger/controllers"
"github.com/kairos-io/kairos-challenger/pkg/challenger"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(keyserverv1alpha1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var challengerAddr string
var namespace string
flag.StringVar(&namespace, "namespace", "kcrypt", "The namespace the challenger will read secrets from.")
flag.StringVar(&challengerAddr, "challenger-bind-address", ":8082", "The address the challenger server endpoint binds to.")
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "8fc84668.kairos.io",
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
// speeds up voluntary leader transitions as the new leader don't have to wait
// LeaseDuration time first.
//
// In the default scaffold provided, the program ends immediately after
// the manager stops, so would be fine to enable this option. However,
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
reconciler := &controllers.SealedVolumeReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
if err = reconciler.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "SealedVolume")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
clientset, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
setupLog.Error(err, "unable to get clientset")
os.Exit(1)
}
go challenger.Start(context.Background(), clientset, reconciler, namespace, challengerAddr)
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

View File

@ -0,0 +1,149 @@
package challenger
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
keyserverv1alpha1 "github.com/kairos-io/kairos-challenger/api/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/client"
tpm "github.com/kairos-io/go-tpm"
"github.com/kairos-io/kairos-challenger/controllers"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
func writeRead(conn *websocket.Conn, input []byte) ([]byte, error) {
writer, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
return nil, err
}
if _, err := writer.Write(input); err != nil {
return nil, err
}
if err := writer.Close(); err != nil {
return nil, err
}
_, reader, err := conn.NextReader()
if err != nil {
return nil, err
}
return ioutil.ReadAll(reader)
}
func Start(ctx context.Context, kclient *kubernetes.Clientset, reconciler *controllers.SealedVolumeReconciler, namespace, address string) {
fmt.Println("Challenger started at", address)
s := http.Server{
Addr: address,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
m := http.NewServeMux()
m.HandleFunc("/challenge", func(w http.ResponseWriter, r *http.Request) {
conn, _ := upgrader.Upgrade(w, r, nil) // error ignored for sake of simplicity
for {
fmt.Println("Received connection")
volumeList := &keyserverv1alpha1.SealedVolumeList{}
if err := reconciler.List(ctx, volumeList, &client.ListOptions{Namespace: namespace}); err != nil {
fmt.Println("Failed listing volumes")
fmt.Println(err)
continue
}
token := r.Header.Get("Authorization")
label := r.Header.Get("label")
ek, at, err := tpm.GetAttestationData(token)
if err != nil {
fmt.Println("Failed getting tpm token")
fmt.Println("error", err.Error())
return
}
hashEncoded, err := tpm.DecodePubHash(ek)
if err != nil {
fmt.Println("error decoding pubhash", err.Error())
return
}
found := false
var volume keyserverv1alpha1.SealedVolume
for _, v := range volumeList.Items {
if hashEncoded == v.Spec.TPMHash && v.Spec.Label == label {
found = true
volume = v
}
}
if !found {
fmt.Println("No TPM Hash found for", hashEncoded)
conn.Close()
// conn.Close()
// return
continue //will iterate until a TPM is available
}
secret, challenge, err := tpm.GenerateChallenge(ek, at)
if err != nil {
fmt.Println("error", err.Error())
return
}
resp, _ := writeRead(conn, challenge)
if err := tpm.ValidateChallenge(secret, resp); err != nil {
fmt.Println("error validating challenge", err.Error(), string(resp))
return
}
writer, _ := conn.NextWriter(websocket.BinaryMessage)
if !volume.Spec.Quarantined {
secret, err := kclient.CoreV1().Secrets(namespace).Get(ctx, volume.Spec.Passphrase.Name, v1.GetOptions{})
if err == nil {
passphrase := secret.Data[volume.Spec.Passphrase.Path]
json.NewEncoder(writer).Encode(map[string]string{"passphrase": string(passphrase)})
}
} else {
conn.Close()
return
}
}
},
)
s.Handler = m
go func() {
err := s.ListenAndServe()
if err != nil {
panic(err)
}
}()
go func() {
<-ctx.Done()
s.Shutdown(ctx)
}()
}