runs queue order manipulation and whitespace tightnening

This commit is contained in:
Abel Luck 2026-03-31 10:23:46 +02:00
parent a88eba7dd1
commit 99fd33f770
10 changed files with 478 additions and 121 deletions

View file

@ -107,7 +107,7 @@ class JobRuntime:
self,
*,
log_dir: str | Path,
refresh_callback: Callable[[], None] | None = None,
refresh_callback: Callable[[object], None] | None = None,
graceful_stop_seconds: float = 15.0,
) -> None:
self.log_dir = Path(log_dir)
@ -117,6 +117,7 @@ class JobRuntime:
self._workers: dict[int, RunningWorker] = {}
self._run_lock = threading.Lock()
self._started = False
self._last_runtime_refresh_at = 0.0
def start(self) -> None:
if self._started:
@ -353,7 +354,7 @@ class JobRuntime:
)
worker.process.terminate()
self._trigger_refresh()
self._trigger_refresh("queue-reordered")
return True
def cancel_queued_execution(self, execution_id: int) -> bool:
@ -375,6 +376,75 @@ class JobRuntime:
self._trigger_refresh()
return True
def move_queued_execution(self, execution_id: int, *, direction: str) -> bool:
offset = -1 if direction == "up" else 1
with self._run_lock:
with database.connection_context():
execution_primary_key = getattr(JobExecution, "_meta").primary_key
queued_executions = tuple(
JobExecution.select()
.where(JobExecution.running_status == JobExecutionStatus.PENDING)
.order_by(
JobExecution.created_at.asc(), execution_primary_key.asc()
)
)
current_index = next(
(
index
for index, execution in enumerate(queued_executions)
if _execution_id(execution) == execution_id
),
None,
)
if current_index is None:
return False
target_index = current_index + offset
if target_index < 0 or target_index >= len(queued_executions):
return False
current_execution = queued_executions[current_index]
target_execution = queued_executions[target_index]
current_created_at = _coerce_datetime(
cast(datetime | str, current_execution.created_at)
)
target_created_at = _coerce_datetime(
cast(datetime | str, target_execution.created_at)
)
with database.atomic():
if current_created_at == target_created_at:
adjusted_created_at = target_created_at + timedelta(
microseconds=-1 if offset < 0 else 1
)
(
JobExecution.update(created_at=adjusted_created_at)
.where(
execution_primary_key
== _execution_id(current_execution)
)
.execute()
)
else:
(
JobExecution.update(created_at=target_created_at)
.where(
execution_primary_key
== _execution_id(current_execution)
)
.execute()
)
(
JobExecution.update(created_at=current_created_at)
.where(
execution_primary_key == _execution_id(target_execution)
)
.execute()
)
self._trigger_refresh()
return True
def set_job_enabled(self, job_id: int, *, enabled: bool) -> bool:
with database.connection_context():
with database.atomic():
@ -428,6 +498,8 @@ class JobRuntime:
if any_finished:
self._start_queued_jobs()
self._refresh_running_runtime()
def _apply_stats(self, worker: RunningWorker) -> None:
if not worker.artifacts.stats_path.exists():
return
@ -471,9 +543,27 @@ class JobRuntime:
):
worker.process.kill()
def _trigger_refresh(self) -> None:
def _trigger_refresh(self, event: object = "refresh-event") -> None:
if self.refresh_callback is not None:
self.refresh_callback()
self.refresh_callback(event)
def _refresh_running_runtime(self) -> None:
if not self._has_running_executions():
return
current_time = time.monotonic()
if current_time - self._last_runtime_refresh_at < 1.0:
return
self._last_runtime_refresh_at = current_time
self._trigger_refresh()
def _has_running_executions(self) -> bool:
return (
JobExecution.select()
.where(JobExecution.running_status == JobExecutionStatus.RUNNING)
.exists()
)
def _reconcile_stale_executions(self) -> None:
live_workers = _find_live_workers()
@ -614,13 +704,22 @@ def load_runs_view(
for execution in running_executions
),
"queued": tuple(
_project_queued_execution(execution, reference_time, position=position)
_project_queued_execution(
execution,
reference_time,
position=position,
total_count=len(queued_executions),
)
for position, execution in enumerate(queued_executions, start=1)
),
"upcoming": tuple(
_project_upcoming_job(job, running_by_job.get(job.id), reference_time)
_project_upcoming_job(
job,
running_by_job.get(job.id),
queued_by_job.get(job.id),
reference_time,
)
for job in jobs
if job.id not in queued_by_job
),
"completed": tuple(
_project_completed_execution(execution, resolved_log_dir, reference_time)
@ -772,15 +871,20 @@ def _project_running_execution(
def _project_queued_execution(
execution: JobExecution, reference_time: datetime, *, position: int
execution: JobExecution,
reference_time: datetime,
*,
position: int,
total_count: int,
) -> dict[str, object]:
job = cast(Job, execution.job)
queued_at = _coerce_datetime(cast(datetime | str, execution.created_at))
execution_id = _execution_id(execution)
return {
"source": job.source.name,
"slug": job.source.slug,
"job_id": _job_id(job),
"execution_id": _execution_id(execution),
"execution_id": execution_id,
"queued_at": _humanize_relative_time(reference_time, queued_at),
"queued_at_iso": queued_at.isoformat(),
"queue_position": position,
@ -789,14 +893,27 @@ def _project_queued_execution(
"run_label": "Queued",
"run_disabled": True,
"run_post_path": f"/actions/jobs/{_job_id(job)}/run-now",
"cancel_post_path": (
f"/actions/queued-executions/{_execution_id(execution)}/cancel"
"cancel_post_path": (f"/actions/queued-executions/{execution_id}/cancel"),
"move_up_disabled": position == 1,
"move_up_post_path": (
None
if position == 1
else f"/actions/queued-executions/{execution_id}/move-up"
),
"move_down_disabled": position == total_count,
"move_down_post_path": (
None
if position == total_count
else f"/actions/queued-executions/{execution_id}/move-down"
),
}
def _project_upcoming_job(
job: Job, running_execution: JobExecution | None, reference_time: datetime
job: Job,
running_execution: JobExecution | None,
queued_execution: JobExecution | None,
reference_time: datetime,
) -> dict[str, object]:
job_id = _job_id(job)
trigger = _job_trigger(job)
@ -805,6 +922,12 @@ def _project_upcoming_job(
if job.enabled and running_execution is None
else None
)
run_disabled = running_execution is not None or queued_execution is not None
run_reason = (
"Already running"
if running_execution is not None
else ("Queued" if queued_execution is not None else "Ready")
)
return {
"source": job.source.name,
"slug": job.source.slug,
@ -826,8 +949,8 @@ def _project_upcoming_job(
),
"enabled_label": "Enabled" if job.enabled else "Disabled",
"enabled_tone": "scheduled" if job.enabled else "idle",
"run_disabled": running_execution is not None,
"run_reason": "Already running" if running_execution is not None else "Ready",
"run_disabled": run_disabled,
"run_reason": run_reason,
"toggle_label": "Disable" if job.enabled else "Enable",
"toggle_enabled": not job.enabled,
"run_post_path": f"/actions/jobs/{job_id}/run-now",