Drop tests and cleanup earthly file (#447)

This commit is contained in:
Itxaka
2023-07-14 12:46:18 +02:00
committed by GitHub
parent 2c432078e2
commit 9f56a5578b
19 changed files with 9 additions and 2399 deletions

View File

@@ -4,7 +4,6 @@ on:
push:
branches:
- main
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ github.repository }}
@@ -51,72 +50,14 @@ jobs:
- name: Install Cosign
uses: sigstore/cosign-installer@main
- name: Login to Quay Registry
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
run: echo ${{ secrets.QUAY_PASSWORD }} | docker login -u ${{ secrets.QUAY_USERNAME }} --password-stdin quay.io
- name: Build PR 🔧
if: ${{ github.event_name == 'pull_request' }}
env:
FLAVOR: ${{ matrix.flavor }}
IMAGE: quay.io/kairos/kairos-${{ matrix.flavor }}:latest
run: |
./earthly.sh +ci --IMAGE=$IMAGE --FLAVOR=$FLAVOR
sudo mv build/* .
sudo rm -rf build
- name: Build main 🔧
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
env:
FLAVOR: ${{ matrix.flavor }}
IMAGE: quay.io/kairos/kairos-${{ matrix.flavor }}:latest
run: |
./earthly.sh +all --IMAGE=$IMAGE --FLAVOR=$FLAVOR
sudo mv build/* .
sudo rm -rf build
- uses: actions/upload-artifact@v3
with:
name: kairos-${{ matrix.flavor }}.iso.zip
path: |
*.iso
*.sha256
if-no-files-found: error
- uses: actions/upload-artifact@v3
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
with:
name: kairos-${{ matrix.flavor }}.sbom.zip
path: |
*.syft.json
*.spdx.json
if-no-files-found: error
- uses: actions/upload-artifact@v3
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
with:
name: kairos-${{ matrix.flavor }}.initrd.zip
path: |
*-initrd
if-no-files-found: error
- uses: actions/upload-artifact@v3
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
with:
name: kairos-${{ matrix.flavor }}.squashfs.zip
path: |
*.squashfs
if-no-files-found: error
- uses: actions/upload-artifact@v3
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
with:
name: kairos-${{ matrix.flavor }}.kernel.zip
path: |
*-kernel
*-initrd
if-no-files-found: error
- uses: actions/upload-artifact@v3
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
with:
name: kairos-${{ matrix.flavor }}.ipxe.zip
path: |
*.ipxe
if-no-files-found: error
- name: Push to quay
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
env:
COSIGN_YES: true
IMAGE: "quay.io/kairos/kairos-${{ matrix.flavor }}"
@@ -124,158 +65,3 @@ jobs:
run: |
docker push "$IMAGE:$TAG"
cosign sign $(docker image inspect --format='{{index .RepoDigests 0}}' "$IMAGE:$TAG")
- name: Push to testing
run: |
docker tag quay.io/kairos/kairos-${{ matrix.flavor }}:latest ttl.sh/kairos-${{ matrix.flavor }}-${{ github.sha }}:8h
docker push ttl.sh/kairos-${{ matrix.flavor }}-${{ github.sha }}:8h
decentralized-and-upgrade_k8s:
needs: build
runs-on: self-hosted
strategy:
fail-fast: true
max-parallel: 1
matrix:
flavor: ["opensuse-leap", "alpine-opensuse-leap"]
suite: ["upgrade-k8s", "decentralized-k8s"]
steps:
- uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
with:
name: kairos-${{ matrix.flavor }}.iso.zip
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '^1.20'
- run: |
# install qemu-system-x86_64 and qemu-img
sudo apt update
sudo apt install -y qemu-system-x86 qemu-utils qemu-kvm acl
# Allow the "runner" user to access /dev/kvm
# Might not be the best solution but adding to the kvm group didn't work
# https://askubuntu.com/a/1081326
sudo setfacl -m u:runner:rwx /dev/kvm
- env:
USE_QEMU: true
KVM: true
MEMORY: 4000
CPUS: 2
DRIVE_SIZE: 30000
run: |
ls -liah
export ISO=$PWD/$(ls *.iso)
./.github/run_test.sh ${{ matrix.suite }}
upgrade_latest_k8s:
needs:
- build
runs-on: self-hosted
strategy:
fail-fast: false
matrix:
flavor: ["alpine-opensuse-leap", "opensuse-leap"]
steps:
- uses: actions/checkout@v3
- uses: robinraju/release-downloader@v1.8
with:
# A flag to set the download target as latest release
# The default value is 'false'
latest: true
repository: "kairos-io/provider-kairos"
fileName: "kairos-${{ matrix.flavor }}-*v1.25*.iso"
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '^1.20'
- run: |
# install qemu-system-x86_64 and qemu-img
sudo apt update
sudo apt install -y qemu-system-x86 qemu-utils qemu-kvm acl
# Allow the "runner" user to access /dev/kvm
# Might not be the best solution but adding to the kvm group didn't work
# https://askubuntu.com/a/1081326
sudo setfacl -m u:runner:rwx /dev/kvm
- env:
USE_QEMU: true
KVM: true
run: |
ls -liah
export ISO=$PWD/$(ls kairos-${{ matrix.flavor }}-*.iso | tail -n1 )
export CONTAINER_IMAGE=ttl.sh/kairos-${{ matrix.flavor }}-${{ github.sha }}:8h
./.github/run_test.sh "upgrade-latest-with-kubernetes"
- uses: actions/upload-artifact@v3
if: failure()
with:
name: ${{ matrix.flavor }}-upgrade-test.logs.zip
path: tests/**/logs/*
if-no-files-found: warn
provider_upgrade:
needs:
- build
runs-on: self-hosted
steps:
- uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
with:
name: kairos-opensuse-leap.iso.zip
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '^1.20'
- run: |
# install qemu-system-x86_64 and qemu-img
sudo apt update
sudo apt install -y qemu-system-x86 qemu-utils qemu-kvm acl
# Allow the "runner" user to access /dev/kvm
# Might not be the best solution but adding to the kvm group didn't work
# https://askubuntu.com/a/1081326
sudo setfacl -m u:runner:rwx /dev/kvm
- env:
USE_QEMU: true
KVM: true
run: |
ls -liah
export ISO=$PWD/$(ls *.iso)
./.github/run_test.sh "provider-upgrade"
install_qrcode:
needs:
- build
runs-on: self-hosted
steps:
- uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
with:
name: kairos-opensuse-leap.iso.zip
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '^1.20'
- run: |
# install qemu-system-x86_64 and qemu-img
sudo apt update
sudo apt install -y qemu-system-x86 qemu-utils qemu-kvm acl
# Allow the "runner" user to access /dev/kvm
# Might not be the best solution but adding to the kvm group didn't work
# https://askubuntu.com/a/1081326
sudo setfacl -m u:runner:rwx /dev/kvm
- env:
USE_QEMU: true
KVM: true
run: |
ls -liah
export ISO=$PWD/$(ls *.iso)
./.github/run_test.sh "qrcode-install"
- uses: actions/upload-artifact@v3
if: failure()
with:
name: opensuse-leap-qrcode-test.logs.zip
path: tests/**/logs/*
if-no-files-found: warn

View File

@@ -20,13 +20,16 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '^1.18'
- name: Run Build
run: |
./earthly.sh +dist
go-version-file: go.mod
cache-dependency-path: go.sum
- name: Install earthly
uses: Luet-lab/luet-install-action@v1
with:
repository: quay.io/kairos/packages
packages: utils/earthly
- name: Run tests
run: |
./earthly.sh +test
earthly +test
- name: Codecov
uses: codecov/codecov-action@v3
with:

159
Earthfile
View File

@@ -26,9 +26,6 @@ ARG OSBUILDER_IMAGE=quay.io/kairos/osbuilder-tools:$OSBUILDER_VERSION
## External deps pinned versions
ARG LUET_VERSION=0.33.0
ARG GOLANGCILINT_VERSION=v1.52-alpine
ARG HADOLINT_VERSION=2.12.0-alpine
ARG SHELLCHECK_VERSION=v0.9.0
# renovate: datasource=docker depName=golang
ARG GO_VERSION=1.20
@@ -84,10 +81,9 @@ go-deps:
test:
FROM +go-deps
WORKDIR /build
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
COPY (kairos+luet/luet) /usr/bin/luet
COPY . .
RUN ginkgo run --fail-fast --slow-spec-threshold 30s --covermode=atomic --coverprofile=coverage.out -p -r ./internal
RUN go run github.com/onsi/ginkgo/v2/ginkgo --fail-fast --covermode=atomic --coverprofile=coverage.out -p -r ./internal
SAVE ARTIFACT coverage.out AS LOCAL coverage.out
BUILD_GOLANG:
@@ -127,19 +123,6 @@ version:
ARG VERSION=$(cat VERSION)
SAVE ARTIFACT VERSION VERSION
dist:
ARG GO_VERSION
FROM golang:$GO_VERSION
RUN echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | tee /etc/apt/sources.list.d/goreleaser.list
RUN apt update
RUN apt install -y goreleaser
WORKDIR /build
COPY . .
COPY +version/VERSION ./
RUN echo $(cat VERSION)
RUN VERSION=$(cat VERSION) goreleaser build --rm-dist --skip-validate --snapshot
SAVE ARTIFACT /build/dist/* AS LOCAL dist/
docker:
ARG FLAVOR
ARG VARIANT
@@ -216,12 +199,6 @@ kairos:
RUN git clone https://github.com/kairos-io/kairos /kairos && cd /kairos && git checkout "$KAIROS_VERSION"
SAVE ARTIFACT /kairos/
get-kairos-scripts:
FROM alpine
WORKDIR /build
COPY +kairos/kairos/ ./
SAVE ARTIFACT /build/scripts AS LOCAL scripts
iso:
ARG OSBUILDER_IMAGE
ARG ISO_NAME=${OS_ID}
@@ -329,7 +306,6 @@ ipxe-iso:
SAVE ARTIFACT /build/ipxe/src/bin/ipxe.iso iso AS LOCAL build/${ISO_NAME}-ipxe.iso.ipxe
SAVE ARTIFACT /build/ipxe/src/bin/ipxe.usb usb AS LOCAL build/${ISO_NAME}-ipxe-usb.img.ipxe
## Security targets
trivy:
FROM aquasec/trivy
@@ -340,136 +316,3 @@ trivy-scan:
FROM +docker
COPY +trivy/trivy /trivy
RUN /trivy filesystem --severity $SEVERITY --exit-code 1 --no-progress /
linux-bench:
ARG GO_VERSION
FROM golang:$GO_VERSION
GIT CLONE https://github.com/aquasecurity/linux-bench /linux-bench-src
RUN cd /linux-bench-src && CGO_ENABLED=0 go build -o linux-bench . && mv linux-bench /
SAVE ARTIFACT /linux-bench /linux-bench
# The target below should run on a live host instead.
# However, some checks are relevant as well at container level.
# It is good enough for a quick assessment.
linux-bench-scan:
FROM +docker
GIT CLONE https://github.com/aquasecurity/linux-bench /build/linux-bench
WORKDIR /build/linux-bench
COPY +linux-bench/linux-bench /build/linux-bench/linux-bench
RUN /build/linux-bench/linux-bench
# Generic targets
# usage e.g. ./earthly.sh +datasource-iso --CLOUD_CONFIG=tests/assets/qrcode.yaml
datasource-iso:
ARG OSBUILDER_IMAGE
ARG CLOUD_CONFIG
FROM $OSBUILDER_IMAGE
RUN zypper in -y mkisofs
WORKDIR /build
RUN touch meta-data
COPY ./${CLOUD_CONFIG} user-data
RUN cat user-data
RUN mkisofs -output ci.iso -volid cidata -joliet -rock user-data meta-data
SAVE ARTIFACT /build/ci.iso iso.iso AS LOCAL build/datasource.iso
# usage e.g. ./earthly.sh +run-qemu-tests --FLAVOR=alpine --FROM_ARTIFACTS=true
run-qemu-tests:
FROM opensuse/leap
WORKDIR /test
RUN zypper in -y qemu-x86 qemu-arm qemu-tools go
ARG FLAVOR
ARG TEST_SUITE=autoinstall-test
ARG FROM_ARTIFACTS
ENV FLAVOR=$FLAVOR
ENV SSH_PORT=60022
ENV CREATE_VM=true
ARG CLOUD_CONFIG="/tests/tests/assets/autoinstall.yaml"
ENV USE_QEMU=true
ENV GOPATH="/go"
ENV CLOUD_CONFIG=$CLOUD_CONFIG
IF [ "$FROM_ARTIFACTS" = "true" ]
COPY . .
ENV ISO=/test/build/kairos.iso
ENV DATASOURCE=/test/build/datasource.iso
ELSE
COPY ./tests .
COPY +iso/kairos.iso kairos.iso
COPY ( +datasource-iso/iso.iso --CLOUD_CONFIG=$CLOUD_CONFIG) datasource.iso
ENV ISO=/test/kairos.iso
ENV DATASOURCE=/test/datasource.iso
END
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
ENV CLOUD_INIT=$CLOUD_CONFIG
RUN PATH=$PATH:$GOPATH/bin ginkgo --label-filter "$TEST_SUITE" --fail-fast -r ./tests/
edgevpn:
ARG EDGEVPN_VERSION=latest
FROM quay.io/mudler/edgevpn:$EDGEVPN_VERSION
SAVE ARTIFACT /usr/bin/edgevpn /edgevpn
# usage e.g.
# ./earthly.sh +run-proxmox-tests --PROXMOX_USER=root@pam --PROXMOX_PASS=xxx --PROXMOX_ENDPOINT=https://192.168.1.72:8006/api2/json --PROXMOX_ISO=/test/build/kairos-opensuse-v0.0.0-79fd363-k3s.iso --PROXMOX_NODE=proxmox
run-proxmox-tests:
FROM golang:alpine
WORKDIR /test
RUN apk add xorriso
ARG FLAVOR
ARG TEST_SUITE=proxmox-ha-test
ARG FROM_ARTIFACTS
ARG PROXMOX_USER
ARG PROXMOX_PASS
ARG PROXMOX_ENDPOINT
ARG PROXMOX_STORAGE=local
ARG PROXMOX_ISO
ARG PROXMOX_NODE
ENV GOPATH="/go"
RUN go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
COPY +edgevpn/edgevpn /usr/bin/edgevpn
COPY . .
RUN PATH=$PATH:$GOPATH/bin ginkgo --label-filter "$TEST_SUITE" --fail-fast -r ./tests/e2e/
lint:
BUILD +hadolint
BUILD +renovate-validator
BUILD +shellcheck-lint
BUILD +golangci-lint
BUILD +yamllint
hadolint:
FROM hadolint/hadolint:${HADOLINT_VERSION}
COPY . /work
WORKDIR /work
RUN find . -name "Dockerfile*" -print | xargs -r -n1 hadolint
renovate-validator:
FROM renovate/renovate
COPY . /work
WORKDIR /work
ENV RENOVATE_VERSION="35"
RUN renovate-config-validator
shellcheck-lint:
FROM koalaman/shellcheck-alpine:${SHELLCHECK_VERSION}
COPY . /work
WORKDIR /work
RUN find . -name "*.sh" -print | xargs -r -n1 shellcheck
golangci-lint:
FROM golangci/golangci-lint:${GOLANGCILINT_VERSION}
COPY . /work
WORKDIR /work
RUN golangci-lint run --timeout 360s
yamllint:
FROM cytopia/yamllint
COPY . /work
WORKDIR /work
RUN find . -name "*.yml" -or -name "*.yaml" -print | xargs -r -n1

View File

@@ -1,13 +0,0 @@
#node-config
install:
auto: true
device: /dev/vda
stages:
initramfs:
- name: "Set user and password"
users:
kairos:
passwd: "kairos"
hostname: kairos-{{ trunc 4 .Random }}

View File

@@ -1,12 +0,0 @@
#cloud-config
install:
reboot: true
stages:
initramfs:
- name: "Set user and password"
users:
kairos:
passwd: "kairos"
hostname: kairos-{{ trunc 4 .Random }}

View File

@@ -1,10 +0,0 @@
#!/bin/sh
for n in $(k3s kubectl get namespace -A | tr -s ' ' | cut -f1 -d' ' | tail -n +2); do
for p in $(k3s kubectl get pods -n "$n" | tr -s ' ' | cut -f1 -d' ' | tail -n +2); do
echo ---------------------------
echo "$n" - "$p"
echo ---------------------------
k3s kubectl logs "$p" -n "$n"
done
done

View File

@@ -1,9 +0,0 @@
stages:
initramfs:
- name: "Set user and password"
users:
kairos:
passwd: "kairos"
hostname: kairos-{{ trunc 4 .Random }}
kairos:
network_token: "b3RwOgogIGRodDoKICAgIGludGVydmFsOiA5MDAwCiAgICBrZXk6IDI0SUpPS1pGS0g1R0tYUTNSNkdaQkNaS0lPTTNSWU9OT0pNRjIyRFFTM0VNT1BUWEdTTFEKICAgIGxlbmd0aDogMzIKICBjcnlwdG86CiAgICBpbnRlcnZhbDogOTAwMAogICAga2V5OiBQNklPMllEMzZaVFZRTkdUNk5ZREJNS0s0V09aQjJJN0RQMkRUQzRLVVU1UEhaTjRYSzVBCiAgICBsZW5ndGg6IDMyCnJvb206IEdPMkNRVDVYN080VFM3VEpFUVEyTDRXSTJZNURHNzZHUlNGU05FUVVZN1FBSjZSRzVUUFEKcmVuZGV6dm91czoga0taUE1pQU9TZWN6R1lDRXdTSEV0V21XUGZUaGxOa3QKbWRuczogT0FWY25LZXlPT3ZJQlhtQ0ZBYmhNSkxlbndJTEFxY2sKbWF4X21lc3NhZ2Vfc2l6ZTogMjA5NzE1MjAK"

View File

@@ -1,131 +0,0 @@
#cloud-config
stages:
initramfs:
- name: Set user and password
users:
kairos:
passwd: kairos
k3s:
enabled: true
write_files:
- path: /var/lib/rancher/k3s/server/manifests/suc.yaml
permissions: "0644"
content: |
apiVersion: v1
kind: Namespace
metadata:
name: system-upgrade
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: system-upgrade
namespace: system-upgrade
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system-upgrade
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: system-upgrade
namespace: system-upgrade
---
apiVersion: v1
data:
SYSTEM_UPGRADE_CONTROLLER_DEBUG: "false"
SYSTEM_UPGRADE_CONTROLLER_THREADS: "2"
SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: "900"
SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: "99"
SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: Always
SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: rancher/kubectl:v1.21.9
SYSTEM_UPGRADE_JOB_PRIVILEGED: "true"
SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: "900"
SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m
kind: ConfigMap
metadata:
name: default-controller-env
namespace: system-upgrade
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: system-upgrade-controller
namespace: system-upgrade
spec:
selector:
matchLabels:
upgrade.cattle.io/controller: system-upgrade-controller
template:
metadata:
labels:
upgrade.cattle.io/controller: system-upgrade-controller
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- env:
- name: SYSTEM_UPGRADE_CONTROLLER_NAME
valueFrom:
fieldRef:
fieldPath: metadata.labels['upgrade.cattle.io/controller']
- name: SYSTEM_UPGRADE_CONTROLLER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- configMapRef:
name: default-controller-env
image: quay.io/kairos/system-upgrade-controller:v0.9.1
imagePullPolicy: IfNotPresent
name: system-upgrade-controller
volumeMounts:
- mountPath: /etc/ssl
name: etc-ssl
- mountPath: /etc/pki
name: etc-pki
- mountPath: /etc/ca-certificates
name: etc-ca-certificates
- mountPath: /tmp
name: tmp
serviceAccountName: system-upgrade
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/controlplane
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoExecute
key: node-role.kubernetes.io/etcd
operator: Exists
volumes:
- hostPath:
path: /etc/ssl
type: Directory
name: etc-ssl
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/ca-certificates
type: DirectoryOrCreate
name: etc-ca-certificates
- emptyDir: {}
name: tmp

View File

@@ -1,21 +0,0 @@
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: os-upgrade
namespace: system-upgrade
labels:
k3s-upgrade: server
spec:
concurrency: 1
#version: latest
version: "opensuse-v1.23.5-44"
nodeSelector:
matchExpressions:
- {key: kubernetes.io/hostname, operator: Exists}
serviceAccountName: system-upgrade
cordon: false
upgrade:
image: quay.io/kairos/kairos
command:
- "/usr/sbin/suc-upgrade"

View File

@@ -1,277 +0,0 @@
// nolint
package mos
import (
"fmt"
"os"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
. "github.com/spectrocloud/peg/matcher"
)
var _ = Describe("kairos decentralized k8s test", Label("decentralized-k8s"), func() {
var vms []VM
var configPath string
BeforeEach(func() {
iso := os.Getenv("ISO")
_, vm1 := startVM(iso)
_, vm2 := startVM(iso)
vms = append(vms, vm1, vm2)
configPath = cloudConfig()
vmForEach("waiting until ssh is possible", vms, func(vm VM) {
vm.EventuallyConnects(1200)
})
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
gatherLogs(vms[0])
}
vmForEach("destroying vm", vms, func(vm VM) {
vm.Destroy(nil)
})
os.RemoveAll(configPath)
})
It("installs to disk with custom config", func() {
vmForEach("checking if it has default service active", vms, func(vm VM) {
if isFlavor(vm, "alpine") {
out, _ := vm.Sudo("rc-status")
Expect(out).Should(ContainSubstring("kairos-agent"))
} else {
out, _ := vm.Sudo("systemctl status kairos")
Expect(out).Should(ContainSubstring("loaded (/etc/systemd/system/kairos.service; enabled; vendor preset: disabled)"))
}
})
vmForEach("installing", vms, func(vm VM) {
err := vm.Scp(configPath, "/tmp/config.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
out, _ := vm.Sudo("kairos-agent manual-install --device auto /tmp/config.yaml")
Expect(out).Should(ContainSubstring("Running after-install hook"), out)
By("waiting until it reboots to installed system")
Eventually(func() string {
v, _ := vm.Sudo("kairos-agent state get boot")
return strings.TrimSpace(v)
}, 30*time.Minute, 10*time.Second).Should(ContainSubstring("active_boot"))
})
vmForEach("checking default services are on after first boot", vms, func(vm VM) {
if isFlavor(vm, "alpine") {
Eventually(func() string {
out, _ := vm.Sudo("rc-status")
return out
}, 30*time.Second, 10*time.Second).Should(And(
ContainSubstring("kairos-agent")))
} else {
Eventually(func() string {
out, _ := vm.Sudo("systemctl status kairos-agent")
return out
}, 30*time.Second, 10*time.Second).Should(ContainSubstring(
"loaded (/etc/systemd/system/kairos-agent.service; enabled; vendor preset: disabled)"))
Eventually(func() string {
out, _ := vm.Sudo("systemctl status systemd-timesyncd")
return out
}, 30*time.Second, 10*time.Second).Should(ContainSubstring(
"loaded (/usr/lib/systemd/system/systemd-timesyncd.service; enabled; vendor preset: disabled)"))
}
})
vmForEach("checking if it has correct grub menu entries", vms, func(vm VM) {
if !isFlavor(vm, "alpine") {
state, _ := vm.Sudo("blkid -L COS_STATE")
state = strings.TrimSpace(state)
out, err := vm.Sudo("blkid")
Expect(err).ToNot(HaveOccurred(), out)
out, err = vm.Sudo("mkdir -p /tmp/mnt/STATE")
Expect(err).ToNot(HaveOccurred(), out)
out, err = vm.Sudo("mount " + state + " /tmp/mnt/STATE")
Expect(err).ToNot(HaveOccurred(), out)
out, err = vm.Sudo("cat /tmp/mnt/STATE/grubmenu")
Expect(err).ToNot(HaveOccurred(), out)
Expect(out).Should(ContainSubstring("Kairos remote recovery"))
grub, err := vm.Sudo("cat /tmp/mnt/STATE/grub_oem_env")
Expect(err).ToNot(HaveOccurred(), grub)
Expect(grub).Should(ContainSubstring("default_menu_entry=Kairos"))
out, err = vm.Sudo("umount /tmp/mnt/STATE")
Expect(err).ToNot(HaveOccurred(), out)
}
})
vmForEach("checking if k3s was configured", vms, func(vm VM) {
out, err := vm.Sudo("cat /run/cos/live_mode")
Expect(err).To(HaveOccurred(), out)
if isFlavor(vm, "alpine") {
// Skip for now as agent doesn't log anymore as it cannot behave both as a one-off and a daemon
/*
Eventually(func() string {
out, _ = vm.Sudo("sudo cat /var/log/kairos/agent.log")
return out
}, 20*time.Minute, 1*time.Second).Should(
Or(
ContainSubstring("Configuring k3s-agent"),
ContainSubstring("Configuring k3s"),
), out)
*/
} else {
Eventually(func() string {
out, _ = vm.Sudo("systemctl status kairos-agent")
return out
}, 30*time.Minute, 1*time.Second).Should(
Or(
ContainSubstring("Configuring k3s-agent"),
ContainSubstring("Configuring k3s"),
), out)
}
})
vmForEach("checking if it has default image sizes", vms, func(vm VM) {
for _, p := range []string{"active.img", "passive.img"} {
out, err := vm.Sudo(`stat -c "%s" /run/initramfs/cos-state/cOS/` + p)
Expect(err).ToNot(HaveOccurred(), out)
Expect(out).Should(ContainSubstring("3145728000"))
}
})
vmForEach("checking if it has a working kubeconfig", vms, func(vm VM) {
var out string
Eventually(func() string {
out, _ = vm.Sudo("kairos get-kubeconfig")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("https:"), out)
Eventually(func() string {
vm.Sudo("kairos get-kubeconfig > kubeconfig")
out, _ = vm.Sudo("KUBECONFIG=kubeconfig kubectl get nodes -o wide")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("Ready"), out)
})
vmForEach("checking roles", vms, func(vm VM) {
var out string
uuid, err := vm.Sudo("kairos-agent uuid")
Expect(err).ToNot(HaveOccurred(), uuid)
Expect(uuid).ToNot(Equal(""))
Eventually(func() string {
out, _ = vm.Sudo("kairos role list")
return out
}, 900*time.Second, 10*time.Second).Should(And(
ContainSubstring(uuid),
ContainSubstring("worker"),
ContainSubstring("master"),
HaveMinMaxRole("master", 1, 1),
HaveMinMaxRole("worker", 1, 1),
), out)
})
vmForEach("checking if it has machines with different IPs", vms, func(vm VM) {
var out string
Eventually(func() string {
out, _ = vm.Sudo(`curl http://localhost:8080/api/machines`)
return out
}, 900*time.Second, 10*time.Second).Should(And(
ContainSubstring("10.1.0.1"),
ContainSubstring("10.1.0.2"),
), out)
})
vmForEach("checking if it can propagate dns and it is functional", vms, func(vm VM) {
if !isFlavor(vm, "alpine") {
// FIXUP: DNS needs reboot to take effect
vm.Reboot(1200)
out := ""
Eventually(func() string {
var err error
out, err = vm.Sudo(`curl -X POST http://localhost:8080/api/dns --header "Content-Type: application/json" -d '{ "Regex": "foo.bar", "Records": { "A": "2.2.2.2" } }'`)
Expect(err).ToNot(HaveOccurred(), out)
out, _ = vm.Sudo("dig +short foo.bar")
return strings.TrimSpace(out)
}, 900*time.Second, 10*time.Second).Should(Equal("2.2.2.2"), out)
Eventually(func() string {
out, _ = vm.Sudo("dig +short google.com")
return strings.TrimSpace(out)
}, 900*time.Second, 10*time.Second).ShouldNot(BeEmpty(), out)
}
})
vmForEach("checking if it upgrades to a specific version", vms, func(vm VM) {
version, err := vm.Sudo("source /etc/os-release; echo $VERSION")
Expect(err).ToNot(HaveOccurred(), version)
out, err := vm.Sudo("kairos-agent upgrade --image quay.io/kairos/kairos-opensuse:v1.0.0-rc2-k3sv1.21.14-k3s1")
Expect(err).ToNot(HaveOccurred(), out)
Expect(out).To(ContainSubstring("Upgrade completed"))
out, err = vm.Sudo("sync")
Expect(err).ToNot(HaveOccurred(), out)
By("rebooting to the upgraded system")
vm.Reboot(1200)
version2, err := vm.Sudo(getVersionCmd)
Expect(err).ToNot(HaveOccurred(), version2)
Expect(version).ToNot(Equal(version2))
})
})
})
func HaveMinMaxRole(name string, min, max int) types.GomegaMatcher {
return WithTransform(
func(actual interface{}) (int, error) {
switch s := actual.(type) {
case string:
return strings.Count(s, name), nil
default:
return 0, fmt.Errorf("HaveRoles expects a string, but got %T", actual)
}
}, SatisfyAll(
BeNumerically(">=", min),
BeNumerically("<=", max)))
}
func vmForEach(description string, vms []VM, action func(vm VM)) {
for i, vm := range vms {
By(fmt.Sprintf("%s [%s]", description, strconv.Itoa(i+1)))
action(vm)
}
}
func cloudConfig() string {
token, err := kairosCli("generate-token")
Expect(err).ToNot(HaveOccurred())
configBytes, err := os.ReadFile("assets/config.yaml")
Expect(err).ToNot(HaveOccurred())
config := fmt.Sprintf(`%s
p2p:
network_token: %s
dns: true
`, string(configBytes), token)
f, err := os.CreateTemp("", "kairos-config-*.yaml")
Expect(err).ToNot(HaveOccurred())
defer f.Close()
_, err = f.WriteString(config)
Expect(err).ToNot(HaveOccurred())
return f.Name()
}

View File

@@ -1,73 +0,0 @@
package mos
import (
"net"
"time"
"golang.org/x/crypto/ssh"
)
type Conn struct {
net.Conn
ReadTimeout time.Duration
WriteTimeout time.Duration
}
func (c *Conn) Read(b []byte) (int, error) {
err := c.Conn.SetReadDeadline(time.Now().Add(c.ReadTimeout))
if err != nil {
return 0, err
}
return c.Conn.Read(b)
}
func (c *Conn) Write(b []byte) (int, error) {
err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout))
if err != nil {
return 0, err
}
return c.Conn.Write(b)
}
// SSHDialTimeout dials in SSH with a timeout. It sends periodic keepalives such as the remote host don't make the go client sit on Wait()
func SSHDialTimeout(network, addr string, config *ssh.ClientConfig, timeout time.Duration) (*ssh.Client, error) {
conn, err := net.DialTimeout(network, addr, timeout)
if err != nil {
return nil, err
}
timeoutConn := &Conn{conn, timeout, timeout}
c, chans, reqs, err := ssh.NewClientConn(timeoutConn, addr, config)
if err != nil {
return nil, err
}
client := ssh.NewClient(c, chans, reqs)
// this sends keepalive packets every 2 seconds
// there's no useful response from these, so we can just abort if there's an error
go func() {
t := time.NewTicker(2 * time.Second)
defer t.Stop()
for range t.C {
_, _, err := client.Conn.SendRequest("keepalive@golang.org", true, nil)
if err != nil {
return
}
}
}()
return client, nil
}
type SSHConn struct {
username string
password string
host string
}
func NewSSH(user, pass, host string) *SSHConn {
return &SSHConn{user, pass, host}
}
func (s *SSHConn) Command(cmd string) (out string, err error) {
return SSHCommand(s.username, s.password, s.host, cmd)
}

View File

@@ -1,235 +0,0 @@
package mos
import (
"fmt"
"strings"
"time"
"github.com/luthermonson/go-proxmox"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
)
func genConfig(kubeVIP string, rsa string, token string, mdns, vpnCreate, vpnUSE, kubevip bool) string {
return fmt.Sprintf(`#cloud-config
install:
auto: true
device: "auto"
reboot: true
hostname: "test-{{ trunc 4 .MachineID }}"
users:
- name: "kairos"
passwd: "kairos"
ssh_authorized_keys:
- "%s"
## Sets the Elastic IP for KubeVIP
kubevip:
eip: "%s"
enable: %t
p2p:
disable_dht: %t # Enabled by default
vpn:
create: %t # defaults to true
use: %t # defaults to true
auto:
# Auto enables automatic master/worker role assignment
enable: true
# HA enables automatic HA roles assignment.
# A master cluster init is always required,
# Any additional master_node is configured as part of the
# HA control plane.
# If auto is disabled, HA has no effect.
ha:
# Enables HA
enable: true
# number of HA/master node (beside the one used for init)
master_nodes: 2
network_token: "%s"`, rsa, kubeVIP, kubevip, mdns, vpnCreate, vpnUSE, token)
}
func startVMS(cloudConfig []byte, number int) (VMIDS []int) {
r := RandStringRunes(5)
isoTestName := fmt.Sprintf("%s.iso", r)
node, client, err := getNode()
Expect(err).ToNot(HaveOccurred())
storage, err := node.Storage("local")
Expect(err).ToNot(HaveOccurred())
err = uploadCloudInitISO(
isoTestName,
cloudConfig, storage,
)
Expect(err).ToNot(HaveOccurred())
cluster, err := client.Cluster()
Expect(err).ToNot(HaveOccurred())
for i := 0; i < number; i++ {
nextid, err := cluster.NextID()
Expect(err).ToNot(HaveOccurred())
// See: https://pve.proxmox.com/pve-docs/api-viewer/index.html#/nodes/{node}/qemu
t, err := node.NewVirtualMachine(nextid,
proxmox.VirtualMachineOption{Name: "serial0", Value: "socket"},
proxmox.VirtualMachineOption{Name: "memory", Value: "2048"},
proxmox.VirtualMachineOption{Name: "cores", Value: "1"},
proxmox.VirtualMachineOption{Name: "boot", Value: "order=scsi0;ide0;net0"},
proxmox.VirtualMachineOption{Name: "net0", Value: "virtio,bridge=vmbr0,firewall=1"},
proxmox.VirtualMachineOption{Name: "scsi0", Value: "local-lvm:60,size=60G"},
proxmox.VirtualMachineOption{Name: "ide0", Value: locateISOFmt(isoName())},
proxmox.VirtualMachineOption{Name: "ide1", Value: locateISOFmt(isoTestName)},
)
waitTask(t, err)
v, _ := node.VirtualMachine(nextid)
t, err = v.Start()
waitTask(t, err)
VMIDS = append(VMIDS, nextid)
}
return
}
var _ = Describe("kairos decentralized k8s test", Label("proxmox-ha-test"), func() {
// Used VMs ID to prune
VMIDS := []int{}
// ControlVM info that we need during the test run
var dev, ip, pubkey string
BeforeEach(func() {
var err error
dev, err = ControlVM.Command(`ls -l /sys/class/net/ | grep -v virtual | cut -d " " -f9`)
Expect(err).ToNot(HaveOccurred())
fmt.Println("Found ControlVM dev at", dev)
ip, err = ControlVM.Command(fmt.Sprintf(`ip a s %s | awk -F'[/ ]+' '/inet[^6]/{print $3}'`, strings.TrimSpace(dev)))
Expect(err).ToNot(HaveOccurred(), ip)
fmt.Println("Found ControlVM IP at", ip)
pubkey, err = ControlVM.Command(`cat .ssh/id_rsa.pub`)
Expect(err).ToNot(HaveOccurred())
pubkey = strings.TrimSpace(pubkey)
fmt.Println("Found ControlVM pubkey ", pubkey)
})
AfterEach(func() {
node, _, err := getNode()
Expect(err).ToNot(HaveOccurred())
for _, vm := range VMIDS {
v, err := node.VirtualMachine(vm)
Expect(err).ToNot(HaveOccurred())
t, err := v.Stop()
waitTask(t, err)
t, err = v.Delete()
waitTask(t, err)
}
VMIDS = []int{}
if CurrentGinkgoTestDescription().Failed {
// gatherLogs()
}
})
Context("HA", func() {
It("setups automatically an EmbeddedDB cluster with mdns", func() {
// Get Possible IP to use for KubeVIP
freeIP, err := ControlVM.Command(fmt.Sprintf(`my_net=%s
for i in $(seq 1 254);
do
ip="$my_net.$i"
ping -c2 $ip | grep -q "Unreachable"; [[ "$?" == "0" ]] && echo $ip && break
done`, strings.Join(strings.Split(ip, ".")[0:3], ".")))
Expect(err).ToNot(HaveOccurred(), freeIP)
freeIP = strings.TrimSpace(freeIP)
fmt.Println("Found Free IP at", freeIP)
networkToken, err := genToken()
Expect(err).ToNot(HaveOccurred())
VMIDS = append(VMIDS, startVMS([]byte(genConfig(freeIP, pubkey, networkToken, true, false, false, true)), 4)...)
By("Waiting for HA control-plane to be available", func() {
ping(freeIP, ControlVM)
})
Eventually(func() string {
out, err := ControlVM.Command(fmt.Sprintf("ssh -oStrictHostKeyChecking=no kairos@%s kairos role list", freeIP))
if err != nil {
fmt.Println(err, out)
}
return out
}, time.Duration(time.Duration(650)*time.Second), time.Duration(30*time.Second)).Should(And(
ContainSubstring("worker"),
ContainSubstring("master/ha"),
ContainSubstring("master/clusterinit"),
HaveMinMaxRole("master/clusterinit", 1, 1),
HaveMinMaxRole("master/ha", 2, 2),
))
out, err := ControlVM.Command(fmt.Sprintf("ssh -oStrictHostKeyChecking=no kairos@%s sudo cat /etc/systemd/system.conf.d/edgevpn-kairos.env", freeIP))
Expect(err).ToNot(HaveOccurred())
Expect(out).To(ContainSubstring(`EDGEVPNDHT="false"`))
})
It("setups automatically an EmbeddedDB cluster with dht", func() {
out, err := ControlVM.Command("sudo cat /etc/os-release")
Expect(err).ToNot(HaveOccurred(), out)
if strings.Contains(out, "alpine") {
Skip("test assumes systemd on the nodes")
}
networkToken, err := genToken()
Expect(err).ToNot(HaveOccurred())
VMIDS = append(VMIDS, startVMS([]byte(genConfig("", pubkey, networkToken, false, true, true, false)), 4)...)
startVPN(networkToken, ControlVM)
defer stopVPN(ControlVM)
//
// 10.1.0.1 will be our IP, and DHCP will assign then 10.1.0.2 to one of the nodes of the cluster.
By("Waiting for HA control-plane to be available", func() {
ping("10.1.0.2", ControlVM)
})
Eventually(func() string {
out, err := ControlVM.Command(fmt.Sprintf("ssh -oStrictHostKeyChecking=no kairos@%s kairos role list", "10.1.0.2"))
if err != nil {
fmt.Println(err, out)
}
return out
}, time.Duration(time.Duration(650)*time.Second), time.Duration(30*time.Second)).Should(And(
ContainSubstring("worker"),
ContainSubstring("master/ha"),
ContainSubstring("master/clusterinit"),
HaveMinMaxRole("master/clusterinit", 1, 1),
HaveMinMaxRole("master/ha", 2, 2),
))
out, err = ControlVM.Command(fmt.Sprintf("ssh -oStrictHostKeyChecking=no kairos@%s sudo cat /etc/systemd/system.conf.d/edgevpn-kairos.env", "10.1.0.2"))
Expect(err).ToNot(HaveOccurred())
Expect(out).ToNot(ContainSubstring(`EDGEVPNDHT="false"`))
})
})
})
func HaveMinMaxRole(name string, min, max int) types.GomegaMatcher {
return WithTransform(
func(actual interface{}) (int, error) {
switch s := actual.(type) {
case string:
return strings.Count(s, name), nil
default:
return 0, fmt.Errorf("HaveRoles expects a string, but got %T", actual)
}
}, SatisfyAll(
BeNumerically(">=", min),
BeNumerically("<=", max)))
}

View File

@@ -1,245 +0,0 @@
package mos
import (
"crypto/tls"
"fmt"
"math/rand"
"net/http"
"os"
"path/filepath"
"time"
. "github.com/onsi/gomega"
"github.com/bramvdbogaerde/go-scp"
"github.com/kairos-io/kairos-sdk/utils"
"golang.org/x/crypto/ssh"
"github.com/luthermonson/go-proxmox"
)
var randGen *rand.Rand
func init() {
randGen = rand.New(rand.NewSource(time.Now().UnixNano()))
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func RandStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[randGen.Intn(len(letterRunes))]
}
return string(b)
}
func datasourceISO(cc []byte, output string) error {
temp, err := os.MkdirTemp("", "datasource")
if err != nil {
return err
}
defer os.RemoveAll(temp)
os.WriteFile(filepath.Join(temp, "meta-data"), []byte{}, os.ModePerm)
os.WriteFile(filepath.Join(temp, "user-data"), cc, os.ModePerm)
out, err := utils.SH(fmt.Sprintf("cd %s && mkisofs -output %s -volid cidata -joliet -rock user-data meta-data", temp, output))
if err != nil {
return fmt.Errorf("failed %s: %w", out, err)
}
return nil
}
// use as:
// node, err := getNode()
// storage, err := node.Storage("local")
// if err != nil {
// panic(err)
// }
// uploadCloudInitISO(
// "foo.iso",
// []byte(`#cloud-config
// install:
// auto: true
// device: "auto"
// reboot: true
// k3s:
// enable: true
// `), storage,
//
// )
func uploadCloudInitISO(isoname string, cc []byte, storage *proxmox.Storage) error {
temp, err := os.MkdirTemp("", "datasource")
if err != nil {
return err
}
defer os.RemoveAll(temp)
if err := datasourceISO(cc, filepath.Join(temp, isoname)); err != nil {
return err
}
tup, err := storage.Upload("iso", filepath.Join(temp, isoname))
if err != nil {
return err
}
return tup.WaitFor(300)
}
func NewSCPClient(user, pass, host string) scp.Client {
sshConfig := sshConfig(user, pass)
return scp.NewClientWithTimeout(host, sshConfig, 10*time.Second)
}
func SSHCommand(user, pass, host, cmd string) (string, error) {
client, session, err := NewClient(user, pass, host)
if err != nil {
return "", err
}
defer client.Close()
out, err := session.CombinedOutput(cmd)
if err != nil {
return string(out), err
}
return string(out), err
}
// NewClient returns a new ssh client associated to a machine
func NewClient(user, pass, host string) (*ssh.Client, *ssh.Session, error) {
sshConfig := sshConfig(user, pass)
client, err := SSHDialTimeout("tcp", host, sshConfig, 30*time.Second)
if err != nil {
return nil, nil, err
}
session, err := client.NewSession()
if err != nil {
client.Close()
return nil, nil, err
}
return client, session, nil
}
func sshConfig(user, pass string) *ssh.ClientConfig {
sshConfig := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{ssh.Password(pass)},
Timeout: 30 * time.Second, // max time to establish connection
}
sshConfig.HostKeyCallback = ssh.InsecureIgnoreHostKey()
return sshConfig
}
func getNode() (*proxmox.Node, *proxmox.Client, error) {
insecureHTTPClient := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
client := proxmox.NewClient(os.Getenv("PROXMOX_ENDPOINT"),
proxmox.WithClient(&insecureHTTPClient),
//proxmox.WithAPIToken(tokenID, secret),
proxmox.WithLogins(os.Getenv("PROXMOX_USER"), os.Getenv("PROXMOX_PASS")),
)
version, err := client.Version()
if err != nil {
panic(err)
}
fmt.Println(version.Release) // 6.3
statuses, err := client.Nodes()
if err != nil {
return nil, nil, err
}
for _, st := range statuses {
fmt.Println(st.Node)
}
node, err := client.Node(os.Getenv("PROXMOX_NODE"))
return node, client, err
}
func EventuallyConnects(user, pass, host string, t ...int) {
dur := 360
if len(t) > 0 {
dur = t[0]
}
EventuallyWithOffset(1, func() string {
out, err := SSHCommand(user, pass, host, "echo ping")
if err != nil {
fmt.Println(err)
}
return out
}, time.Duration(time.Duration(dur)*time.Second), time.Duration(30*time.Second)).Should(Equal("ping\n"))
}
// pixiecore:
// docker run -d --name pixiecore --net=host -v $PWD:/files quay.io/pixiecore/pixiecore boot /files/kairos-opensuse-${VERSION}-kernel /files/kairos-opensuse-${VERSION}-initrd --cmdline="rd.neednet=1 ip=dhcp rd.cos.disable root=live:{{ ID \"/files/kairos-opensuse-${VERSION}.squashfs\" }} netboot nodepair.enable config_url={{ ID \"/files/config.yaml\" }} console=tty1 console=ttyS0 console=tty0"
func stopVPN(ControlVM *SSHConn) {
out, err := ControlVM.Command("sudo /bin/bash -c 'systemctl stop vpn && rm -rf /etc/systemd/system/vpn.service && systemctl daemon-reload && rm -rf /usr/local/vpn.sh'")
ExpectWithOffset(1, err).ToNot(HaveOccurred(), out)
fmt.Println(out)
}
func startVPN(networkToken string, ControlVM *SSHConn) {
//
out, err := ControlVM.Command("sudo modprobe tun")
ExpectWithOffset(1, err).ToNot(HaveOccurred(), out)
// NOTE: This requires systemd
// Get the controlVM on the same VPN so it can reach the cluster
out, err = ControlVM.Command(fmt.Sprintf(`cat << EOF > /tmp/vpn.sh
#!/bin/bash
EDGEVPNTOKEN=%s sudo -E edgevpn --log-level debug
EOF`, networkToken))
ExpectWithOffset(1, err).ToNot(HaveOccurred(), out)
out, err = ControlVM.Command("sudo mv /tmp/vpn.sh /usr/local/vpn.sh && sudo chmod +x /usr/local/vpn.sh")
ExpectWithOffset(1, err).ToNot(HaveOccurred(), out)
out, err = ControlVM.Command(`cat << EOF > /tmp/vpn.service
[Unit]
Description=vpn
[Service]
Type=simple
Restart=always
RestartSec=1
ExecStart=/usr/local/vpn.sh
[Install]
WantedBy=multi-user.target
EOF`)
ExpectWithOffset(1, err).ToNot(HaveOccurred(), out)
out, err = ControlVM.Command("sudo /bin/bash -c 'mv /tmp/vpn.service /etc/systemd/system/vpn.service && systemctl daemon-reload && systemctl start vpn'")
ExpectWithOffset(1, err).ToNot(HaveOccurred(), out)
fmt.Println(out)
}
func ping(ip string, ControlVM *SSHConn) {
EventuallyWithOffset(1, func() string {
out, err := ControlVM.Command(fmt.Sprintf("ping %s -c 3", ip))
if err != nil {
fmt.Println(err)
}
return out
}, time.Duration(time.Duration(650)*time.Second), time.Duration(30*time.Second)).Should(ContainSubstring("3 received"))
}

View File

@@ -1,189 +0,0 @@
package mos
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/kairos-io/kairos-sdk/utils"
"github.com/luthermonson/go-proxmox"
process "github.com/mudler/go-processmanager"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "kairos E2E proxmox Test Suite")
}
func genToken() (string, error) {
return utils.SH("/usr/bin/edgevpn -b -g")
}
var ControlVM *SSHConn
func waitTask(t *proxmox.Task, err error) {
Expect(err).ToNot(HaveOccurred())
Expect(t).ToNot(BeNil())
err = t.Wait(1*time.Second, 10*time.Second)
Expect(err).ToNot(HaveOccurred())
}
func deleteVMs(node *proxmox.Node) {
fmt.Println("delete all")
vms, err := node.VirtualMachines()
if err != nil {
Fail(err.Error())
}
// Delete all VMs running
for _, vm := range vms {
v, _ := node.VirtualMachine(int(vm.VMID))
fmt.Println(v)
d, _ := json.Marshal(v)
fmt.Println(string(d))
fmt.Printf("Deleting %s\n", vm.Name)
t, err := vm.Stop()
waitTask(t, err)
t, err = vm.Delete()
waitTask(t, err)
}
}
var _ = AfterSuite(func() {
node, _, _ := getNode()
deleteVMs(node)
})
// accessors
func proxmoxISO() string {
return os.Getenv("PROXMOX_ISO")
}
func isoName() string {
return filepath.Base(proxmoxISO())
}
func locateISOFmt(s string) string {
return fmt.Sprintf("local:iso/%s,media=cdrom", s)
}
var _ = BeforeSuite(func() {
// We need to setup the router VM where we will connect to
// First upload the ISOs needed (OS and cidata)
node, client, err := getNode()
if err != nil {
Fail(err.Error())
}
storage, err := node.Storage(os.Getenv("PROXMOX_STORAGE"))
if err != nil {
Fail(err.Error())
}
temp, err := os.MkdirTemp("", "datasource")
if err != nil {
Fail(err.Error())
}
defer os.RemoveAll(temp)
token, err := genToken()
if err != nil {
Fail(err.Error() + token)
}
fmt.Println("start vpn")
fmt.Println("Upload cloud init")
err = uploadCloudInitISO(
"control.iso",
[]byte(fmt.Sprintf(`#cloud-config
stages:
boot.after:
- commands:
- EDGEVPNTOKEN=%s edgevpn service-add "ssh" "127.0.0.1:22"
users:
- name: "kairos"
passwd: "kairos"
install:
auto: true
device: "auto"
reboot: true
k3s:
enable: true`, token)), storage,
)
if err != nil {
Fail(err.Error())
}
iso, err := storage.ISO(isoName())
if err != nil || iso == nil {
fmt.Println("Upload target iso")
tup, err := storage.Upload("iso", proxmoxISO())
if err != nil {
Fail(err.Error())
}
if err := tup.WaitFor(300); err != nil {
Fail(err.Error())
}
}
deleteVMs(node)
// Create control VM and wait for it to be available
cluster, err := client.Cluster()
Expect(err).ToNot(HaveOccurred())
nextid, err := cluster.NextID()
Expect(err).ToNot(HaveOccurred())
fmt.Println("Next ID", nextid)
// See: https://pve.proxmox.com/pve-docs/api-viewer/index.html#/nodes/{node}/qemu
t, err := node.NewVirtualMachine(nextid,
proxmox.VirtualMachineOption{Name: "serial0", Value: "socket"},
proxmox.VirtualMachineOption{Name: "memory", Value: "2048"},
proxmox.VirtualMachineOption{Name: "cores", Value: "1"},
proxmox.VirtualMachineOption{Name: "boot", Value: "order=scsi0;ide0;net0"},
proxmox.VirtualMachineOption{Name: "net0", Value: "virtio,bridge=vmbr0,firewall=1"},
proxmox.VirtualMachineOption{Name: "scsi0", Value: "local-lvm:60,size=60G"},
proxmox.VirtualMachineOption{Name: "ide0", Value: locateISOFmt(isoName())},
proxmox.VirtualMachineOption{Name: "ide1", Value: locateISOFmt("control.iso")},
)
Expect(err).ToNot(HaveOccurred())
Expect(t).ToNot(BeNil())
err = t.Wait(1*time.Second, 10*time.Second)
Expect(err).ToNot(HaveOccurred())
v, _ := node.VirtualMachine(nextid)
v.Start()
edgevpn := process.New(
process.WithEnvironment(fmt.Sprintf("EDGEVPNTOKEN=%s", token)),
process.WithName("/usr/bin/edgevpn"), process.WithArgs("service-connect", "--log-level", "debug", "ssh", "0.0.0.0:9090"), process.WithStateDir(temp))
err = edgevpn.Run()
// fmt.Println("start vpn", err)
//defer edgevpn.Stop()
ControlVM = NewSSH("kairos", "kairos", "127.0.0.1:9090")
Eventually(func() string {
out, err := ControlVM.Command("kairos-agent state get boot")
if err != nil {
fmt.Println(err)
}
return out
}, time.Duration(time.Duration(920)*time.Second), time.Duration(30*time.Second)).Should(Equal("active_boot"))
out, err := ControlVM.Command(`ssh-keygen -t rsa -q -f "$HOME/.ssh/id_rsa" -N ""`)
Expect(err).ToNot(HaveOccurred(), out)
fmt.Println(out)
})

View File

@@ -1,143 +0,0 @@
// nolint
package mos
import (
"bytes"
"fmt"
"image"
"image/png"
"os"
"strings"
"time"
"github.com/lmittmann/ppm"
"github.com/makiuchi-d/gozxing"
"github.com/makiuchi-d/gozxing/qrcode"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/spectrocloud/peg/matcher"
)
var _ = Describe("kairos qr code install", Label("qrcode-install"), func() {
var vm VM
BeforeEach(func() {
iso := os.Getenv("ISO")
_, vm = startVM(iso)
vm.EventuallyConnects(1200)
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
gatherLogs(vm)
}
vm.Destroy(nil)
})
It("installs to disk with custom config", func() {
By("checking if is has default service active")
if isFlavor(vm, "alpine") {
out, _ := vm.Sudo("rc-status")
Expect(out).Should(ContainSubstring("kairos"))
Expect(out).Should(ContainSubstring("kairos-agent"))
} else {
// Eventually(func() string {
// out, _ := machine.SSHCommand("sudo systemctl status kairos-agent")
// return out
// }, 30*time.Second, 10*time.Second).Should(ContainSubstring("no network token"))
out, _ := vm.Sudo("systemctl status kairos")
Expect(out).Should(ContainSubstring("loaded (/etc/systemd/system/kairos.service; enabled; vendor preset: disabled)"))
}
By("checking cmdline")
v, err := vm.Sudo("cat /proc/cmdline")
Expect(err).ToNot(HaveOccurred(), v)
Expect(v).To(ContainSubstring("rd.cos.disable"))
var fileName string
By("waiting until the qr code is shown")
Eventually(func() string {
fileName = getQRImage(vm)
return fileName
}, 10*time.Minute, 10*time.Second).ShouldNot(BeEmpty())
By("find the correct device (qemu vs vbox)")
device, err := vm.Sudo(`[[ -e /dev/sda ]] && echo "/dev/sda" || echo "/dev/vda"`)
Expect(err).ToNot(HaveOccurred(), device)
By("registering with a screenshot")
out, err := kairosCtlCli(
fmt.Sprintf("register --device %s --config %s %s",
strings.TrimSpace(device),
"./assets/config.yaml",
fileName),
)
Expect(err).ToNot(HaveOccurred(), out)
By("waiting until it reboots to installed system")
Eventually(func() string {
v, _ := vm.Sudo("kairos-agent state get boot")
return strings.TrimSpace(v)
}, 30*time.Minute, 10*time.Second).Should(ContainSubstring("active_boot"))
Eventually(func() string {
v, _ := vm.Sudo("cat /proc/cmdline")
return v
}, 10*time.Minute, 10*time.Second).ShouldNot(ContainSubstring("rd.cos.disable"))
})
})
// getQRImage returns the path to a screenshot with a QR code or empty
// if no QR code is found
func getQRImage(vm VM) string {
var fileName string
image.RegisterFormat("ppm", "ppm", ppm.Decode, ppm.DecodeConfig)
var err error
fileName, err = vm.Screenshot()
if err != nil {
os.RemoveAll(fileName)
}
Expect(err).ToNot(HaveOccurred())
// open and decode image file
file, err := os.Open(fileName)
if err != nil {
os.RemoveAll(fileName)
}
img, _, err := image.Decode(file)
if err != nil {
os.RemoveAll(fileName)
}
Expect(err).ToNot(HaveOccurred())
// prepare BinaryBitmap
bmp, err := gozxing.NewBinaryBitmapFromImage(img)
if err != nil {
os.RemoveAll(fileName)
}
Expect(err).ToNot(HaveOccurred())
// decode image
qrReader := qrcode.NewQRCodeReader()
_, err = qrReader.Decode(bmp, nil)
if err != nil {
os.RemoveAll(fileName)
return ""
}
// Encode to png because go-nodepair doesn't understand `ppm`
// Relevant: https://github.com/mudler/go-nodepair/pull/1
buf := new(bytes.Buffer)
err = png.Encode(buf, img)
Expect(err).ToNot(HaveOccurred())
// Replace with png data
err = os.WriteFile(fileName, buf.Bytes(), os.ModePerm)
Expect(err).ToNot(HaveOccurred())
return fileName
}

View File

@@ -1,57 +0,0 @@
// nolint
package mos
import (
"encoding/json"
"os"
"github.com/mudler/go-pluggable"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/spectrocloud/peg/matcher"
"golang.org/x/mod/semver"
)
var _ = Describe("provider upgrade test", Label("provider-upgrade"), func() {
var vm VM
BeforeEach(func() {
iso := os.Getenv("ISO")
_, vm = startVM(iso)
vm.EventuallyConnects(1200)
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
gatherLogs(vm)
}
vm.Destroy(nil)
})
Context("agent.available_releases event", func() {
It("returns the available versions ordered, excluding '.img' tags", func() {
resultStr, _ := vm.Sudo(`echo '{}' | /system/providers/agent-provider-kairos agent.available_releases`)
var result pluggable.EventResponse
err := json.Unmarshal([]byte(resultStr), &result)
Expect(err).ToNot(HaveOccurred())
Expect(result.Data).ToNot(BeEmpty())
var versions []string
json.Unmarshal([]byte(result.Data), &versions)
Expect(versions).ToNot(BeEmpty())
sorted := make([]string, len(versions))
copy(sorted, versions)
semver.Sort(sorted)
for _, t := range sorted {
Expect(t).ToNot(ContainSubstring(".img"))
}
Expect(sorted).To(Equal(versions))
})
})
})

View File

@@ -1,268 +0,0 @@
package mos
import (
"context"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path"
"strconv"
"testing"
"github.com/google/uuid"
"github.com/kairos-io/kairos-sdk/utils"
process "github.com/mudler/go-processmanager"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/spectrocloud/peg/matcher"
"github.com/spectrocloud/peg/pkg/machine"
"github.com/spectrocloud/peg/pkg/machine/types"
)
var kubectl = func(vm VM, s string) (string, error) {
return vm.Sudo("k3s kubectl " + s)
}
var getVersionCmd = ". /etc/os-release; [ ! -z \"$KAIROS_VERSION\" ] && echo $KAIROS_VERSION || echo $VERSION"
func TestSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "kairos Test Suite")
}
func isFlavor(vm VM, flavor string) bool {
out, err := vm.Sudo(fmt.Sprintf("cat /etc/os-release | grep ID=%s", flavor))
return err == nil && out != ""
}
func detachAndReboot() {
vbox, ok := Machine.(*machine.VBox)
if ok {
vbox.DetachCD()
vbox.Restart()
} else {
Reboot()
}
}
var sshPort string
func user() string {
user := os.Getenv("SSH_USER")
if user == "" {
user = "kairos"
}
return user
}
func pass() string {
pass := os.Getenv("SSH_PASS")
if pass == "" {
pass = "kairos"
}
return pass
}
func startVM(iso string) (context.Context, VM) {
var sshPort, spicePort int
vmName := uuid.New().String()
stateDir, err := os.MkdirTemp("", "stateDir-*")
Expect(err).ToNot(HaveOccurred())
fmt.Printf("Statedir: %s\n", stateDir)
sshPort, err = getFreePort()
Expect(err).ToNot(HaveOccurred())
memory := os.Getenv("MEMORY")
if memory == "" {
memory = "2000"
}
cpus := os.Getenv("CPUS")
if cpus == "" {
cpus = "1"
}
driveSize := os.Getenv("DRIVE_SIZE")
if driveSize == "" {
driveSize = "25000"
}
opts := []types.MachineOption{
types.QEMUEngine,
types.WithISO(iso),
types.WithMemory(memory),
types.WithDriveSize(driveSize),
types.WithCPU(cpus),
types.WithSSHPort(strconv.Itoa(sshPort)),
types.WithID(vmName),
types.WithSSHUser(user()),
types.WithSSHPass(pass()),
types.OnFailure(func(p *process.Process) {
out, _ := os.ReadFile(p.StdoutPath())
err, _ := os.ReadFile(p.StderrPath())
status, _ := p.ExitCode()
// We are explicitly killing the qemu process. We don't treat that as an error
// but we just print the output just in case.
fmt.Printf("\nVM Aborted: %s %s Exit status: %s\n", out, err, status)
}),
func(m *types.MachineConfig) error {
m.Args = append(m.Args,
"-chardev", fmt.Sprintf("stdio,mux=on,id=char0,logfile=%s,signal=off", path.Join(stateDir, "serial.log")),
"-serial", "chardev:char0",
"-mon", "chardev=char0",
)
return nil
},
types.WithStateDir(stateDir),
types.WithDataSource(os.Getenv("DATASOURCE")),
}
if os.Getenv("KVM") != "" {
opts = append(opts, func(m *types.MachineConfig) error {
m.Args = append(m.Args,
"-enable-kvm",
)
return nil
})
}
if os.Getenv("USE_QEMU") == "true" {
opts = append(opts, types.QEMUEngine)
// You can connect to it with "spicy" or other tool.
// DISPLAY is already taken on Linux X sessions
if os.Getenv("MACHINE_SPICY") != "" {
spicePort, _ = getFreePort()
for spicePort == sshPort { // avoid collision
spicePort, _ = getFreePort()
}
display := fmt.Sprintf("-spice port=%d,addr=127.0.0.1,disable-ticketing=yes", spicePort)
opts = append(opts, types.WithDisplay(display))
cmd := exec.Command("spicy",
"-h", "127.0.0.1",
"-p", strconv.Itoa(spicePort))
err = cmd.Start()
Expect(err).ToNot(HaveOccurred())
}
} else {
opts = append(opts, types.VBoxEngine)
}
m, err := machine.New(opts...)
Expect(err).ToNot(HaveOccurred())
vm := NewVM(m, stateDir)
ctx, err := vm.Start(context.Background())
if err != nil {
so, e := os.ReadFile(path.Join(stateDir, "stdout"))
if e != nil {
fmt.Printf("Error reading stdout after process failing %s\n", e.Error())
}
se, e := os.ReadFile(path.Join(stateDir, "stderr"))
if e != nil {
fmt.Printf("Error reading stderr after process failing %s\n", e.Error())
}
fmt.Printf("An error occured.\nStderr = %+v\nStdout = %+v\n", string(se), string(so))
}
Expect(err).ToNot(HaveOccurred())
return ctx, vm
}
func getFreePort() (port int, err error) {
var a *net.TCPAddr
if a, err = net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
var l *net.TCPListener
if l, err = net.ListenTCP("tcp", a); err == nil {
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}
}
return
}
func gatherLogs(vm VM) {
vm.Scp("assets/kubernetes_logs.sh", "/tmp/logs.sh", "0770")
vm.Sudo("cat /oem/* > /run/oem.yaml")
vm.Sudo("cat /etc/resolv.conf > /run/resolv.conf")
vm.Sudo("k3s kubectl get pods -A -o json > /run/pods.json")
vm.Sudo("k3s kubectl get events -A -o json > /run/events.json")
vm.Sudo("cat /proc/cmdline > /run/cmdline")
vm.Sudo("chmod 777 /run/events.json")
vm.Sudo("sh /tmp/logs.sh > /run/kube_logs")
vm.Sudo("df -h > /run/disk")
vm.Sudo("mount > /run/mounts")
vm.Sudo("blkid > /run/blkid")
vm.Sudo("dmesg > /run/dmesg.log")
vm.GatherAllLogs(
[]string{
"edgevpn@kairos",
"kairos-agent",
"cos-setup-boot",
"cos-setup-network",
"cos-setup-initramfs",
"cos-setup-reconcile",
"kairos",
"k3s",
"k3s-agent",
},
[]string{
"/var/log/edgevpn.log",
"/var/log/kairos/agent.log",
"/run/pods.json",
"/run/disk",
"/run/mounts",
"/run/kube_logs",
"/run/blkid",
"/run/events.json",
"/run/cmdline",
"/run/oem.yaml",
"/run/resolv.conf",
"/run/dmesg.log",
})
}
func download(s string) {
f2, err := ioutil.TempFile("", "fff")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(f2.Name())
resp, err := http.Get(s)
Expect(err).ToNot(HaveOccurred())
defer resp.Body.Close()
_, err = io.Copy(f2, resp.Body)
Expect(err).ToNot(HaveOccurred())
out, err := utils.SH("tar xvf " + f2.Name())
fmt.Println(out)
Expect(err).ToNot(HaveOccurred(), out)
}
// kairosCli can be used to issue commands to the kairos-provider cli as if
// it was compiled and put in the PATH. This is running the CLI using `go run`
// to ensure we are running the same code that is being tested (and not some
// previously compiled binary).
// This makes the tests self-contained so that they don't rely on previous steps
// to have been run.
func kairosCli(cmd string) (string, error) {
// Ignore "go: downloading <package_here>" output
_, err := utils.SH("go mod tidy")
Expect(err).ToNot(HaveOccurred())
// Now run the actual command to get the output
return utils.SH(fmt.Sprintf("go run ../main.go -- %s", cmd))
}
func kairosCtlCli(cmd string) (string, error) {
return utils.SH(fmt.Sprintf("go run ../cli/kairosctl/main.go -- %s", cmd))
}

View File

@@ -1,152 +0,0 @@
// nolint
package mos
import (
"fmt"
"os"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/spectrocloud/peg/matcher"
)
var _ = Describe("k3s upgrade test", Label("upgrade-k8s"), func() {
var vm VM
BeforeEach(func() {
iso := os.Getenv("ISO")
_, vm = startVM(iso)
vm.EventuallyConnects(1200)
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
gatherLogs(vm)
}
vm.Destroy(nil)
})
It("installs to disk with custom config", func() {
By("checking if it has default service active")
if isFlavor(vm, "alpine") {
out, _ := vm.Sudo("rc-status")
Expect(out).Should(ContainSubstring("kairos"))
Expect(out).Should(ContainSubstring("kairos-agent"))
out, _ = vm.Sudo("ps aux")
Expect(out).Should(ContainSubstring("/usr/sbin/crond"))
} else {
out, _ := vm.Sudo("systemctl status kairos")
Expect(out).Should(ContainSubstring("loaded (/etc/systemd/system/kairos.service; enabled; vendor preset: disabled)"))
out, _ = vm.Sudo("systemctl status logrotate.timer")
Expect(out).Should(ContainSubstring("active (waiting)"))
}
By("copy the config")
err := vm.Scp("assets/single.yaml", "/tmp/config.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
By("find the correct device (qemu vs vbox)")
device, err := vm.Sudo(`[[ -e /dev/sda ]] && echo "/dev/sda" || echo "/dev/vda"`)
Expect(err).ToNot(HaveOccurred(), device)
By("installing")
cmd := fmt.Sprintf("kairos-agent manual-install --device %s /tmp/config.yaml", strings.TrimSpace(device))
out, err := vm.Sudo(cmd)
Expect(err).ToNot(HaveOccurred(), out)
Expect(out).Should(ContainSubstring("Running after-install hook"))
out, err = vm.Sudo("sync")
Expect(err).ToNot(HaveOccurred(), out)
By("rebooting after install")
vm.Reboot()
By("checking default services are on after first boot")
if isFlavor(vm, "alpine") {
Eventually(func() string {
out, _ := vm.Sudo("rc-status")
return out
}, 30*time.Second, 10*time.Second).Should(And(
ContainSubstring("kairos"),
ContainSubstring("kairos-agent")))
} else {
Eventually(func() string {
out, _ := vm.Sudo("systemctl status kairos-agent")
return out
}, 30*time.Second, 10*time.Second).Should(ContainSubstring(
"loaded (/etc/systemd/system/kairos-agent.service; enabled; vendor preset: disabled)"))
Eventually(func() string {
out, _ := vm.Sudo("systemctl status systemd-timesyncd")
return out
}, 30*time.Second, 10*time.Second).Should(ContainSubstring(
"loaded (/usr/lib/systemd/system/systemd-timesyncd.service; enabled; vendor preset: disabled)"))
}
By("checking if kairos-agent has started")
Eventually(func() string {
var out string
if isFlavor(vm, "alpine") {
out, _ = vm.Sudo("rc-service kairos-agent status")
} else {
out, _ = vm.Sudo("systemctl status kairos-agent")
}
return out
}, 900*time.Second, 10*time.Second).Should(Or(ContainSubstring("One time bootstrap starting"), ContainSubstring("status: started")))
By("Checking agent provider correct start")
Eventually(func() string {
out, _ := vm.Sudo("cat /var/log/kairos/agent-provider.log")
return out
}, 900*time.Second, 10*time.Second).Should(Or(ContainSubstring("One time bootstrap starting"), ContainSubstring("Sentinel exists")))
By("Checking k3s is pointing to https")
Eventually(func() string {
out, _ := vm.Sudo("cat /etc/rancher/k3s/k3s.yaml")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("https:"))
By("checking if logs are rotated")
out, err = vm.Sudo("logrotate -vf /etc/logrotate.d/kairos")
Expect(err).ToNot(HaveOccurred())
Expect(out).To(ContainSubstring("log needs rotating"))
_, err = vm.Sudo("ls /var/log/kairos/agent-provider.log.1.gz")
Expect(err).ToNot(HaveOccurred())
By("wait system-upgrade-controller")
Eventually(func() string {
out, _ := kubectl(vm, "get pods -A")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("system-upgrade-controller"))
By("wait for all containers to be in running state")
Eventually(func() string {
out, _ := kubectl(vm, "get pods -A")
fmt.Printf("out = %+v\n", out)
return out
}, 900*time.Second, 10*time.Second).ShouldNot(And(ContainSubstring("Pending"), ContainSubstring("ContainerCreating")))
By("applying upgrade plan")
err = vm.Scp("assets/suc.yaml", "./suc.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
Eventually(func() string {
out, _ := kubectl(vm, "apply -f suc.yaml")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("unchanged"))
Eventually(func() string {
out, _ = kubectl(vm, "get pods -A")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("apply-os-upgrade-on-"), out)
Eventually(func() string {
out, _ = kubectl(vm, "get pods -A")
version, _ := vm.Sudo(getVersionCmd)
return version
}, 30*time.Minute, 10*time.Second).Should(ContainSubstring("v"), out)
})
})

View File

@@ -1,187 +0,0 @@
// nolint
package mos
import (
"fmt"
"io/ioutil"
"os"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/spectrocloud/peg/matcher"
)
func sucYAML(image, version string) string {
return `
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: os-upgrade
namespace: system-upgrade
labels:
k3s-upgrade: server
spec:
concurrency: 1
version: "` + version + `"
nodeSelector:
matchExpressions:
- {key: kubernetes.io/hostname, operator: Exists}
serviceAccountName: system-upgrade
cordon: false
upgrade:
image: "` + image + `"
command:
- "/usr/sbin/suc-upgrade"
`
}
var _ = Describe("k3s upgrade test from k8s", Label("upgrade-latest-with-kubernetes"), func() {
var containerImage string
var vm VM
BeforeEach(func() {
containerImage = os.Getenv("CONTAINER_IMAGE")
iso := os.Getenv("ISO")
_, vm = startVM(iso)
vm.EventuallyConnects(1200)
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
gatherLogs(vm)
}
vm.Destroy(nil)
})
It("installs to disk with custom config", func() {
By("checking if it has default service active")
if containerImage == "" {
Fail("CONTAINER_IMAGE needs to be set")
}
if isFlavor(vm, "alpine") {
out, _ := vm.Sudo("rc-status")
Expect(out).Should(ContainSubstring("kairos"))
Expect(out).Should(ContainSubstring("kairos-agent"))
} else {
// Eventually(func() string {
// out, _ := vm.Sudo("sudo systemctl status kairos-agent")
// return out
// }, 30*time.Second, 10*time.Second).Should(ContainSubstring("no network token"))
out, _ := vm.Sudo("systemctl status kairos")
Expect(out).Should(ContainSubstring("loaded (/etc/systemd/system/kairos.service; enabled; vendor preset: disabled)"))
}
By("copy the config")
err := vm.Scp("assets/single.yaml", "/tmp/config.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
By("find the correct device (qemu vs vbox)")
device, err := vm.Sudo(`[[ -e /dev/sda ]] && echo "/dev/sda" || echo "/dev/vda"`)
Expect(err).ToNot(HaveOccurred(), device)
By("installing")
cmd := fmt.Sprintf("kairos-agent manual-install --device %s /tmp/config.yaml", strings.TrimSpace(device))
out, err := vm.Sudo(cmd)
Expect(err).ToNot(HaveOccurred(), out)
Expect(out).Should(ContainSubstring("Running after-install hook"))
out, err = vm.Sudo("sync")
Expect(err).ToNot(HaveOccurred(), out)
By("rebooting after install")
vm.Reboot()
By("checking default services are on after first boot")
if isFlavor(vm, "alpine") {
Eventually(func() string {
out, _ := vm.Sudo("rc-status")
return out
}, 30*time.Second, 10*time.Second).Should(And(
ContainSubstring("kairos"),
ContainSubstring("kairos-agent")))
} else {
Eventually(func() string {
out, _ := vm.Sudo("systemctl status kairos-agent")
return out
}, 30*time.Second, 10*time.Second).Should(ContainSubstring(
"loaded (/etc/systemd/system/kairos-agent.service; enabled; vendor preset: disabled)"))
Eventually(func() string {
out, _ := vm.Sudo("systemctl status systemd-timesyncd")
return out
}, 30*time.Second, 10*time.Second).Should(ContainSubstring(
"loaded (/usr/lib/systemd/system/systemd-timesyncd.service; enabled; vendor preset: disabled)"))
}
Eventually(func() string {
var out string
if isFlavor(vm, "alpine") {
out, _ = vm.Sudo("rc-service kairos-agent status")
} else {
out, _ = vm.Sudo("systemctl status kairos-agent")
}
return out
}, 900*time.Second, 10*time.Second).Should(Or(ContainSubstring("One time bootstrap starting"), ContainSubstring("status: started")))
By("checking kubeconfig")
Eventually(func() string {
out, _ := vm.Sudo("cat /etc/rancher/k3s/k3s.yaml")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("https:"))
By("checking current version")
currentVersion, err := vm.Sudo(getVersionCmd)
Expect(err).ToNot(HaveOccurred())
Expect(currentVersion).To(ContainSubstring("v"))
By("wait system-upgrade-controller")
Eventually(func() string {
out, _ := kubectl(vm, "get pods -A")
fmt.Printf("out = %+v\n", out)
return out
})
By("wait for all containers to be in running state")
Eventually(func() string {
out, _ := kubectl(vm, "get pods -A")
fmt.Printf("out = %+v\n", out)
return out
}, 900*time.Second, 10*time.Second).ShouldNot(And(ContainSubstring("Pending"), ContainSubstring("ContainerCreating")))
By("triggering an upgrade")
suc := sucYAML(strings.ReplaceAll(containerImage, ":8h", ""), "8h")
err = ioutil.WriteFile("assets/generated.yaml", []byte(suc), os.ModePerm)
Expect(err).ToNot(HaveOccurred())
err = vm.Scp("assets/generated.yaml", "./suc.yaml", "0770")
Expect(err).ToNot(HaveOccurred())
Eventually(func() string {
out, _ = kubectl(vm, "apply -f suc.yaml")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("created"), out)
Eventually(func() string {
out, _ = kubectl(vm, "get pods -A")
return out
}, 900*time.Second, 10*time.Second).Should(ContainSubstring("apply-os-upgrade-on-"), out)
By("checking upgraded version")
Eventually(func() string {
out, _ = kubectl(vm, "get pods -A")
version, err := vm.Sudo(getVersionCmd)
if err != nil || !strings.Contains(version, "v") {
// If we met error, keep going with the Eventually
return currentVersion
}
return version
}, 50*time.Minute, 10*time.Second).ShouldNot(Equal(currentVersion), out)
})
})