ci: Ensure we check the latest workflow run in gatekeeper

with multiple iterations/reruns we need to use the latest run of each
workflow. For that we can use the "run_id" and only update results of
the same or newer run_ids.

To do that we need to store the "run_id". To avoid adding individual
attributes this commit stores the full job object that contains the
status, conclussion as well as other attributes of the individual jobs,
which might come handy in the future in exchange for slightly bigger
memory overhead (still we only store the latest run of required jobs
only).

Signed-off-by: Lukáš Doktor <ldoktor@redhat.com>
This commit is contained in:
Lukáš Doktor 2024-07-24 09:10:05 +02:00
parent 2ae090b44b
commit 63b6e8a215
No known key found for this signature in database
GPG Key ID: 26B362E47FCF22C1

View File

@ -63,8 +63,9 @@ class Checker:
self.required_regexps.append(re.compile(regexp)) self.required_regexps.append(re.compile(regexp))
if not required_jobs and not self.required_regexps: if not required_jobs and not self.required_regexps:
raise RuntimeError("No REQUIRED_JOBS or REQUIRED_REGEXPS defined") raise RuntimeError("No REQUIRED_JOBS or REQUIRED_REGEXPS defined")
# Set all required jobs as RUNNING to enforce waiting for them # Set all required jobs as EXPECTED to enforce waiting for them
self.results = {job: RUNNING for job in required_jobs} self.results = {job: {"status": "EXPECTED", "run_id": -1}
for job in required_jobs}
def record(self, workflow, job): def record(self, workflow, job):
""" """
@ -79,15 +80,25 @@ class Checker:
else: else:
# Not a required job # Not a required job
return return
# TODO: Check if multiple re-runs use the same "run_id". If so use
# job['run_attempt'] in case of matching "run_id".
elif job['run_id'] <= self.results[job_name]['run_id']:
# Newer results already stored
print(f"older {job_name} - {job['status']} {job['conclusion']} "
f"{job['id']}", file=sys.stderr)
return
print(f"{job_name} - {job['status']} {job['conclusion']} {job['id']}", print(f"{job_name} - {job['status']} {job['conclusion']} {job['id']}",
file=sys.stderr) file=sys.stderr)
self.results[job_name] = job
@staticmethod
def _job_status(job):
"""Map job status to our status"""
if job["status"] != "completed": if job["status"] != "completed":
self.results[job_name] = RUNNING return RUNNING
return
if job["conclusion"] != "success": if job["conclusion"] != "success":
self.results[job_name] = job['conclusion'] return job['conclusion']
return return PASS
self.results[job_name] = PASS
def status(self): def status(self):
""" """
@ -98,7 +109,8 @@ class Checker:
if not self.results: if not self.results:
# No results reported so far # No results reported so far
return FAIL return FAIL
for status in self.results.values(): for job in self.results.values():
status = self._job_status(job)
if status == RUNNING: if status == RUNNING:
running |= True running |= True
elif status != PASS: elif status != PASS:
@ -113,13 +125,14 @@ class Checker:
good = [] good = []
bad = [] bad = []
warn = [] warn = []
for job, status in self.results.items(): for name, job in self.results.items():
status = self._job_status(job)
if status == RUNNING: if status == RUNNING:
warn.append(f"WARN: {job} - Still running") warn.append(f"WARN: {name} - Still running")
elif status == PASS: elif status == PASS:
good.append(f"PASS: {job} - success") good.append(f"PASS: {name} - success")
else: else:
bad.append(f"FAIL: {job} - Not passed - {status}") bad.append(f"FAIL: {name} - Not passed - {status}")
out = '\n'.join(sorted(good) + sorted(warn) + sorted(bad)) out = '\n'.join(sorted(good) + sorted(warn) + sorted(bad))
stat = self.status() stat = self.status()
if stat == RUNNING: if stat == RUNNING:
@ -167,8 +180,7 @@ class Checker:
) )
response.raise_for_status() response.raise_for_status()
workflow_runs = response.json()["workflow_runs"] workflow_runs = response.json()["workflow_runs"]
for i, run in enumerate(workflow_runs):
for run in workflow_runs:
jobs = self.get_jobs_for_workflow_run(run["id"]) jobs = self.get_jobs_for_workflow_run(run["id"])
for job in jobs: for job in jobs:
self.record(run["name"], job) self.record(run["name"], job)
@ -184,9 +196,9 @@ class Checker:
while True: while True:
ret = self.check_workflow_runs_status() ret = self.check_workflow_runs_status()
if ret == RUNNING: if ret == RUNNING:
running_jobs = len([job running_jobs = len([name
for job, status in self.results.items() for name, job in self.results.items()
if status == RUNNING]) if self._job_status(job) == RUNNING])
print(f"{running_jobs} jobs are still running...") print(f"{running_jobs} jobs are still running...")
time.sleep(180) time.sleep(180)
continue continue