Merge pull request #9637 from ldoktor/selective-ci

CI: Select jobs by touched code
This commit is contained in:
Greg Kurz 2024-10-04 11:29:05 +02:00 committed by GitHub
commit 77c5db6267
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 544 additions and 2 deletions

View File

@ -19,12 +19,21 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
kata-containers-ci-on-push: skipper:
if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }} if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }}
uses: ./.github/workflows/gatekeeper-skipper.yaml
with:
commit-hash: ${{ github.event.pull_request.head.sha }}
target-branch: ${{ github.event.pull_request.base.ref }}
kata-containers-ci-on-push:
needs: skipper
if: ${{ needs.skipper.outputs.skip_build != 'yes' }}
uses: ./.github/workflows/ci.yaml uses: ./.github/workflows/ci.yaml
with: with:
commit-hash: ${{ github.event.pull_request.head.sha }} commit-hash: ${{ github.event.pull_request.head.sha }}
pr-number: ${{ github.event.pull_request.number }} pr-number: ${{ github.event.pull_request.number }}
tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }} tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}
target-branch: ${{ github.event.pull_request.base.ref }} target-branch: ${{ github.event.pull_request.base.ref }}
skip-test: ${{ needs.skipper.outputs.skip_test }}
secrets: inherit secrets: inherit

View File

@ -15,6 +15,10 @@ on:
required: false required: false
type: string type: string
default: "" default: ""
skip-test:
required: false
type: string
default: no
jobs: jobs:
build-kata-static-tarball-amd64: build-kata-static-tarball-amd64:
@ -132,6 +136,7 @@ jobs:
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
run-kata-monitor-tests: run-kata-monitor-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64 needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-kata-monitor-tests.yaml uses: ./.github/workflows/run-kata-monitor-tests.yaml
with: with:
@ -140,6 +145,7 @@ jobs:
target-branch: ${{ inputs.target-branch }} target-branch: ${{ inputs.target-branch }}
run-k8s-tests-on-aks: run-k8s-tests-on-aks:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-amd64 needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-aks.yaml uses: ./.github/workflows/run-k8s-tests-on-aks.yaml
with: with:
@ -153,6 +159,7 @@ jobs:
secrets: inherit secrets: inherit
run-k8s-tests-on-amd64: run-k8s-tests-on-amd64:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-amd64 needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-amd64.yaml uses: ./.github/workflows/run-k8s-tests-on-amd64.yaml
with: with:
@ -165,6 +172,7 @@ jobs:
secrets: inherit secrets: inherit
run-kata-coco-tests: run-kata-coco-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image] needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
uses: ./.github/workflows/run-kata-coco-tests.yaml uses: ./.github/workflows/run-kata-coco-tests.yaml
with: with:
@ -177,6 +185,7 @@ jobs:
secrets: inherit secrets: inherit
run-k8s-tests-on-zvsi: run-k8s-tests-on-zvsi:
if: ${{ inputs.skip-test != 'yes' }}
needs: [publish-kata-deploy-payload-s390x, build-and-publish-tee-confidential-unencrypted-image] needs: [publish-kata-deploy-payload-s390x, build-and-publish-tee-confidential-unencrypted-image]
uses: ./.github/workflows/run-k8s-tests-on-zvsi.yaml uses: ./.github/workflows/run-k8s-tests-on-zvsi.yaml
with: with:
@ -189,6 +198,7 @@ jobs:
secrets: inherit secrets: inherit
run-k8s-tests-on-ppc64le: run-k8s-tests-on-ppc64le:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-ppc64le needs: publish-kata-deploy-payload-ppc64le
uses: ./.github/workflows/run-k8s-tests-on-ppc64le.yaml uses: ./.github/workflows/run-k8s-tests-on-ppc64le.yaml
with: with:
@ -200,6 +210,7 @@ jobs:
target-branch: ${{ inputs.target-branch }} target-branch: ${{ inputs.target-branch }}
run-metrics-tests: run-metrics-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64 needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-metrics.yaml uses: ./.github/workflows/run-metrics.yaml
with: with:
@ -208,6 +219,7 @@ jobs:
target-branch: ${{ inputs.target-branch }} target-branch: ${{ inputs.target-branch }}
run-basic-amd64-tests: run-basic-amd64-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64 needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/basic-ci-amd64.yaml uses: ./.github/workflows/basic-ci-amd64.yaml
with: with:
@ -216,6 +228,7 @@ jobs:
target-branch: ${{ inputs.target-branch }} target-branch: ${{ inputs.target-branch }}
run-cri-containerd-tests-s390x: run-cri-containerd-tests-s390x:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-s390x needs: build-kata-static-tarball-s390x
uses: ./.github/workflows/run-cri-containerd-tests-s390x.yaml uses: ./.github/workflows/run-cri-containerd-tests-s390x.yaml
with: with:
@ -224,6 +237,7 @@ jobs:
target-branch: ${{ inputs.target-branch }} target-branch: ${{ inputs.target-branch }}
run-cri-containerd-tests-ppc64le: run-cri-containerd-tests-ppc64le:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-ppc64le needs: build-kata-static-tarball-ppc64le
uses: ./.github/workflows/run-cri-containerd-tests-ppc64le.yaml uses: ./.github/workflows/run-cri-containerd-tests-ppc64le.yaml
with: with:

View File

@ -0,0 +1,52 @@
name: Skipper
# This workflow sets various "skip_*" output values that can be used to
# determine what workflows/jobs are expected to be executed. Sample usage:
#
# skipper:
# uses: ./.github/workflows/gatekeeper-skipper.yaml
# with:
# commit-hash: ${{ github.event.pull_request.head.sha }}
# target-branch: ${{ github.event.pull_request.base.ref }}
#
# your-workflow:
# needs: skipper
# if: ${{ needs.skipper.outputs.skip_build != 'yes' }}
on:
workflow_call:
inputs:
commit-hash:
required: true
type: string
target-branch:
required: false
type: string
default: ""
outputs:
skip_build:
value: ${{ jobs.skipper.outputs.skip_build }}
skip_test:
value: ${{ jobs.skipper.outputs.skip_test }}
skip_static:
value: ${{ jobs.skipper.outputs.skip_static }}
jobs:
skipper:
runs-on: ubuntu-latest
outputs:
skip_build: ${{ steps.skipper.outputs.skip_build }}
skip_test: ${{ steps.skipper.outputs.skip_test }}
skip_static: ${{ steps.skipper.outputs.skip_static }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- id: skipper
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
run: |
python3 tools/testing/gatekeeper/skips.py | tee -a "$GITHUB_OUTPUT"
shell: /usr/bin/bash -x {0}

44
.github/workflows/gatekeeper.yaml vendored Normal file
View File

@ -0,0 +1,44 @@
name: Gatekeeper
# Gatekeeper uses the "skips.py" to determine which job names/regexps are
# required for given PR and waits for them to either complete or fail
# reporting the status.
on:
pull_request_target:
types:
- opened
- synchronize
- reopened
- labeled
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
gatekeeper:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- id: gatekeeper
env:
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMMIT_HASH: ${{ github.event.pull_request.head.sha }}
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
#!/usr/bin/env bash -x
mapfile -t lines < <(python3 tools/testing/gatekeeper/skips.py -t)
export REQUIRED_JOBS="${lines[0]}"
export REQUIRED_REGEXPS="${lines[1]}"
export REQUIRED_LABELS="${lines[2]}"
echo "REQUIRED_JOBS: $REQUIRED_JOBS"
echo "REQUIRED_REGEXPS: $REQUIRED_REGEXPS"
echo "REQUIRED_LABELS: $REQUIRED_LABELS"
python3 tools/testing/gatekeeper/jobs.py
exit $?
shell: /usr/bin/bash -x {0}

View File

@ -12,8 +12,16 @@ concurrency:
name: Static checks self-hosted name: Static checks self-hosted
jobs: jobs:
build-checks: skipper:
if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }} if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }}
uses: ./.github/workflows/gatekeeper-skipper.yaml
with:
commit-hash: ${{ github.event.pull_request.head.sha }}
target-branch: ${{ github.event.pull_request.base.ref }}
build-checks:
needs: skipper
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:

View File

@ -12,7 +12,15 @@ concurrency:
name: Static checks name: Static checks
jobs: jobs:
skipper:
uses: ./.github/workflows/gatekeeper-skipper.yaml
with:
commit-hash: ${{ github.event.pull_request.head.sha }}
target-branch: ${{ github.event.pull_request.base.ref }}
check-kernel-config-version: check-kernel-config-version:
needs: skipper
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- name: Checkout the code - name: Checkout the code
@ -35,12 +43,16 @@ jobs:
fi fi
build-checks: build-checks:
needs: skipper
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
uses: ./.github/workflows/build-checks.yaml uses: ./.github/workflows/build-checks.yaml
with: with:
instance: ubuntu-22.04 instance: ubuntu-22.04
build-checks-depending-on-kvm: build-checks-depending-on-kvm:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
needs: skipper
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -78,6 +90,8 @@ jobs:
static-checks: static-checks:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
needs: skipper
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:

View File

@ -0,0 +1,250 @@
#!/usr/bin/env python3
#
# Copyright (c) 2024 Red Hat Inc.
#
# SPDX-License-Identifier: Apache-2.0
"""
Keeps checking the current PR until all required jobs pass
Env variables:
* REQUIRED_JOBS: comma separated list of required jobs (in form of
"$workflow / $job")
* REQUIRED_REGEXPS: comma separated list of regexps for required jobs
* COMMIT_HASH: Full commit hash we want to be watching
* GITHUB_REPOSITORY: Github repository (user/repo)
Sample execution (GH token can be excluded):
GITHUB_TOKEN="..." REQUIRED_JOBS="skipper / skipper"
REQUIRED_REGEXPS=".*" REQUIRED_LABELS="ok-to-test;bar"
COMMIT_HASH=b8382cea886ad9a8f77d237bcfc0eba0c98775dd
GITHUB_REPOSITORY=kata-containers/kata-containers
PR_NUMBER=123 python3 jobs.py
"""
import os
import re
import sys
import time
import requests
PASS = 0
FAIL = 1
RUNNING = 127
_GH_HEADERS = {"Accept": "application/vnd.github.v3+json"}
if os.environ.get("GITHUB_TOKEN"):
_GH_HEADERS["Authorization"] = f"token {os.environ['GITHUB_TOKEN']}"
_GH_API_URL = f"https://api.github.com/repos/{os.environ['GITHUB_REPOSITORY']}"
_GH_RUNS_URL = f"{_GH_API_URL}/actions/runs"
class Checker:
"""Object to keep watching required GH action workflows"""
def __init__(self):
self.latest_commit_sha = os.getenv("COMMIT_HASH")
self.pr_number = os.getenv("PR_NUMBER")
required_labels = os.getenv("REQUIRED_LABELS")
if required_labels:
self.required_labels = set(required_labels.split(";"))
else:
self.required_labels = []
required_jobs = os.getenv("REQUIRED_JOBS")
if required_jobs:
required_jobs = required_jobs.split(";")
else:
required_jobs = []
required_regexps = os.getenv("REQUIRED_REGEXPS")
self.required_regexps = []
# TODO: Add way to specify minimum amount of tests
# (eg. via \d+: prefix) and check it in status
if required_regexps:
for regexp in required_regexps.split(";"):
self.required_regexps.append(re.compile(regexp))
if not required_jobs and not self.required_regexps:
raise RuntimeError("No REQUIRED_JOBS or REQUIRED_REGEXPS defined")
# Set all required jobs as EXPECTED to enforce waiting for them
self.results = {job: {"status": "EXPECTED", "run_id": -1}
for job in required_jobs}
def record(self, workflow, job):
"""
Records a job run
"""
job_name = f"{workflow} / {job['name']}"
if job_name not in self.results:
for re_job in self.required_regexps:
# Required job via regexp
if re_job.match(job_name):
break
else:
# Not a required job
return
# TODO: Check if multiple re-runs use the same "run_id". If so use
# job['run_attempt'] in case of matching "run_id".
elif job['run_id'] <= self.results[job_name]['run_id']:
# Newer results already stored
print(f"older {job_name} - {job['status']} {job['conclusion']} "
f"{job['id']}", file=sys.stderr)
return
print(f"{job_name} - {job['status']} {job['conclusion']} {job['id']}",
file=sys.stderr)
self.results[job_name] = job
@staticmethod
def _job_status(job):
"""Map job status to our status"""
if job["status"] != "completed":
return RUNNING
if job["conclusion"] != "success":
return job['conclusion']
return PASS
def status(self):
"""
:returns: 0 - all tests passing; 127 - no failures but some
tests in progress; 1 - any failure
"""
running = False
if not self.results:
# No results reported so far
return FAIL
for job in self.results.values():
status = self._job_status(job)
if status == RUNNING:
running |= True
elif status != PASS:
# Status not passed
return FAIL
if running:
return RUNNING
return PASS
def __str__(self):
"""Sumarize the current status"""
good = []
bad = []
warn = []
for name, job in self.results.items():
status = self._job_status(job)
if status == RUNNING:
warn.append(f"WARN: {name} - Still running")
elif status == PASS:
good.append(f"PASS: {name} - success")
else:
bad.append(f"FAIL: {name} - Not passed - {status}")
out = '\n'.join(sorted(good) + sorted(warn) + sorted(bad))
stat = self.status()
if stat == RUNNING:
status = "Some jobs are still running."
elif stat == PASS:
status = "All required jobs passed"
elif not self.results:
status = ("No required jobs for regexps: " +
";".join([_.pattern for _ in self.required_regexps]))
else:
status = "Not all required jobs passed!"
return f"{out}\n\n{status}"
def get_jobs_for_workflow_run(self, run_id):
"""Get jobs from a workflow id"""
total_count = -1
jobs = []
page = 1
while True:
url = f"{_GH_RUNS_URL}/{run_id}/jobs?per_page=100&page={page}"
print(url, file=sys.stderr)
response = requests.get(url, headers=_GH_HEADERS, timeout=60)
response.raise_for_status()
output = response.json()
jobs.extend(output["jobs"])
total_count = max(total_count, output["total_count"])
if len(jobs) >= total_count:
break
page += 1
return jobs
def check_workflow_runs_status(self):
"""
Checks if all required jobs passed
:returns: 0 - all passing; 1 - any failure; 127 some jobs running
"""
# TODO: Check if we need pagination here as well
print(_GH_RUNS_URL, file=sys.stderr)
response = requests.get(
_GH_RUNS_URL,
params={"head_sha": self.latest_commit_sha},
headers=_GH_HEADERS,
timeout=60
)
response.raise_for_status()
workflow_runs = response.json()["workflow_runs"]
for i, run in enumerate(workflow_runs):
jobs = self.get_jobs_for_workflow_run(run["id"])
for job in jobs:
self.record(run["name"], job)
print(self)
return self.status()
def wait_for_required_tests(self):
"""
Wait for all required tests to pass or failfast
:return: 0 - all passing; 1 - any failure
"""
while True:
ret = self.check_workflow_runs_status()
if ret == RUNNING:
running_jobs = len([name
for name, job in self.results.items()
if self._job_status(job) == RUNNING])
print(f"{running_jobs} jobs are still running...")
time.sleep(180)
continue
return ret
def check_required_labels(self):
"""
Check if all expected labels are present
:return: True on success, False on failure
"""
if not self.required_labels:
# No required labels, exit
return True
if not self.pr_number:
print("The PR_NUMBER not specified, skipping the "
f"required-labels-check ({self.required_labels})")
return True
response = requests.get(
f"{_GH_API_URL}/issues/{self.pr_number}",
headers=_GH_HEADERS,
timeout=60
)
response.raise_for_status()
labels = set(_["name"] for _ in response.json()["labels"])
if self.required_labels.issubset(labels):
return True
print(f"To run all required tests the PR{self.pr_number} must have "
f"{', '.join(self.required_labels.difference(labels))} labels "
"set.")
return False
def run(self):
"""
Keep checking the PR until all required jobs finish
:returns: 0 on success; 1 on check failure; 2 when PR missing labels
"""
print(f"Gatekeeper for project={os.environ['GITHUB_REPOSITORY']} and "
f"SHA={self.latest_commit_sha} PR={self.pr_number}")
if not self.check_required_labels():
sys.exit(2)
sys.exit(self.wait_for_required_tests())
if __name__ == "__main__":
Checker().run()

View File

@ -0,0 +1,42 @@
required_tests:
# Always required tests
- "Commit Message Check / Commit Message Check"
required_regexps:
# Always required regexps
paths:
# Mapping of path (python) regexps to set-of-tests (sort by order of importance)
# CI
- "^ci/openshift-ci/": []
- "^\\.github/workflows/": []
# TODO: Expand filters
# Documentation
#- "\\.rst$": ["build"]
#- "\\.md$": ["build"]
# Sources
#- "^src/": ["static", "build", "test"]
mapping:
# Mapping of set-of-tests to required test names and/or test name regexps
# TODO: Modify this according to actual required tests
test:
# Checks the basic functional tests work
regexps: "Kata Containers CI / .*run-basic-amd64-tests.*|Kata Containers CI / .*run-metrics-tests.*"
names:
- Kata Containers CI / kata-containers-ci-on-push / run-k8s-tests-on-ppc64le / run-k8s-tests (qemu, kubeadm)
- Kata Containers CI / kata-containers-ci-on-push / run-k8s-tests-on-aks / run-k8s-tests (ubuntu, qemu, small)
- Kata Containers CI / kata-containers-ci-on-push / run-k8s-tests-with-crio-on-garm / run-k8s-tests (qemu, k0s, garm-ubuntu-2204)
required-labels:
- ok-to-test
# TODO: Add support for "depends" to automatically add dependant set-of-tests
# (eg. "build" is required for "test")
build:
# Checks that the kata-containers static tarball is created
regexps: "Kata Containers CI / .*build-kata-static-tarball.*"
required-labels:
- ok-to-test
static:
# Checks that static checks are passing
regexps: "Static checks.*"
required-labels:
- ok-to-test

View File

@ -0,0 +1,109 @@
#!/usr/bin/env python3
#
# Copyright (c) 2024 Red Hat Inc.
#
# SPDX-License-Identifier: Apache-2.0
"""
Gets changes of the current git to env variable TARGET_BRANCH
and reports feature skips in form of "skip_$feature=yes|no"
or list of required tests (based on argv[1])
"""
from collections import OrderedDict
import os
import re
import subprocess
import sys
import yaml
class Checks:
def __init__(self):
config_path = os.path.join(os.path.dirname(__file__), "required-tests.yaml")
with open(config_path, "r", encoding="utf8") as config_fd:
config = yaml.load(config_fd, Loader=yaml.SafeLoader)
if config.get('required_tests'):
self.required_tests = config['required_tests']
else:
self.required_tests = []
if config.get('required_regexps'):
self.required_regexps = config['required_regexps']
else:
self.required_regexps = []
if config.get('paths'):
self.paths = OrderedDict((re.compile(key), value)
for items in config['paths']
for key, value in items.items())
if config.get('mapping'):
self.mapping = config['mapping']
else:
self.mapping = {}
self.all_set_of_tests = set(self.mapping.keys())
def run(self, tests, target_branch):
"""
Find the required features/tests
:param: tests: report required tests+regexps (bool)
:param: target_branch: branch/commit to compare to
"""
enabled_features = self.get_features(target_branch)
if not tests:
for feature in self.all_set_of_tests:
# Print all features status in "$key=$value" format to allow
# usage with $GITHUB_OUTPUT
print(f"skip_{feature}=" +
('no' if feature in enabled_features else 'yes'))
return 0
required_tests = set(self.required_tests)
required_regexps = set(self.required_regexps)
required_labels = set()
for feature in enabled_features:
values = self.mapping.get(feature, {})
if values.get("names"):
required_tests.update(values["names"])
if values.get("regexps"):
required_regexps.add(values["regexps"])
if values.get("required-labels"):
required_labels.update(values["required-labels"])
print(';'.join(required_tests))
print(';'.join(required_regexps))
print(';'.join(required_labels))
return 0
def get_features(self, target_branch):
"""
Get changed file to `target_branch` and map them to the
to-be-tested set-of-tests
:param target_branch: branch/commit to compare to
:returns: List of set-of-tests
"""
cmd = ["git", "diff", "--name-only", f"origin/{target_branch}"]
changed_files = [_.decode("utf-8")
for _ in subprocess.check_output(cmd).split(b'\n')
if _.strip()]
print('\n'.join(changed_files), file=sys.stderr)
enabled_features = set()
# Go through lines and find what features should be covered
for changed_file in changed_files:
for regexp, features in self.paths.items():
if regexp.search(changed_file):
for feature in features:
enabled_features.add(feature)
# this changed_file was treated, ignore other regexps
break
else:
# Untreated changed_file, run all tests
return self.all_set_of_tests
return enabled_features
if __name__ == "__main__":
if len(sys.argv) == 2:
_TESTS = sys.argv[1] == '-t'
else:
_TESTS = False
sys.exit(Checks().run(_TESTS, os.getenv("TARGET_BRANCH", "main")))