1258 lines
50 KiB
Python
1258 lines
50 KiB
Python
# Copyright (c) 2025 Patrick Motsch
|
|
# All rights reserved.
|
|
"""
|
|
System-level Workflow Dashboard API.
|
|
|
|
Provides cross-feature, cross-mandate access to workflow runs AND workflows
|
|
with RBAC scoping: user sees own runs/workflows, mandate admin sees mandate
|
|
runs/workflows, sysadmin sees all.
|
|
"""
|
|
|
|
import asyncio
|
|
import json
|
|
import logging
|
|
import math
|
|
import re
|
|
import time
|
|
from typing import Optional, List
|
|
from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException
|
|
from fastapi.responses import StreamingResponse
|
|
from slowapi import Limiter
|
|
from slowapi.util import get_remote_address
|
|
|
|
from modules.auth.authentication import getRequestContext, RequestContext
|
|
from modules.interfaces.interfaceDbApp import getRootInterface
|
|
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
|
from modules.shared.configuration import APP_CONFIG
|
|
from modules.datamodels.datamodelPagination import PaginationParams, normalize_pagination_dict
|
|
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
|
|
AutoRun, AutoStepLog, AutoWorkflow, AutoTask, AutoVersion,
|
|
)
|
|
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import graphicalEditorDatabase
|
|
from modules.shared.i18nRegistry import apiRouteContext
|
|
|
|
routeApiMsg = apiRouteContext("routeWorkflowDashboard")
|
|
|
|
logger = logging.getLogger(__name__)
|
|
limiter = Limiter(key_func=get_remote_address)
|
|
|
|
router = APIRouter(prefix="/api/system/workflow-runs", tags=["WorkflowDashboard"])
|
|
|
|
|
|
def _getDb() -> DatabaseConnector:
|
|
return DatabaseConnector(
|
|
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
|
|
dbDatabase=graphicalEditorDatabase,
|
|
dbUser=APP_CONFIG.get("DB_USER"),
|
|
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
|
|
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
|
|
userId=None,
|
|
)
|
|
|
|
|
|
def _getUserMandateIds(userId: str) -> list[str]:
|
|
"""Get mandate IDs the user is a member of."""
|
|
rootIface = getRootInterface()
|
|
memberships = rootIface.getUserMandates(userId)
|
|
return [um.mandateId for um in memberships if um.mandateId and um.enabled]
|
|
|
|
|
|
def _getAdminMandateIds(userId: str, mandateIds: list) -> list:
|
|
"""Batch-check which mandates the user is admin for (2 SQL queries total)."""
|
|
if not mandateIds:
|
|
return []
|
|
rootIface = getRootInterface()
|
|
from modules.datamodels.datamodelMembership import UserMandateRole
|
|
allRoles = rootIface.db.getRecordset(UserMandateRole, recordFilter={
|
|
"userId": userId, "mandateId": mandateIds,
|
|
})
|
|
if not allRoles:
|
|
return []
|
|
|
|
roleIds = set()
|
|
roleToMandate: dict = {}
|
|
for r in allRoles:
|
|
row = r if isinstance(r, dict) else r.__dict__
|
|
rid = row.get("roleId")
|
|
mid = row.get("mandateId")
|
|
if rid:
|
|
roleIds.add(rid)
|
|
roleToMandate.setdefault(rid, set()).add(mid)
|
|
|
|
if not roleIds:
|
|
return []
|
|
|
|
from modules.datamodels.datamodelRbac import MandateRole
|
|
roleRecords = rootIface.db.getRecordset(MandateRole, recordFilter={"id": list(roleIds)})
|
|
adminMandates: set = set()
|
|
for role in (roleRecords or []):
|
|
row = role if isinstance(role, dict) else role.__dict__
|
|
if row.get("isAdmin"):
|
|
rid = row.get("id")
|
|
if rid and rid in roleToMandate:
|
|
adminMandates.update(roleToMandate[rid])
|
|
|
|
return [mid for mid in mandateIds if mid in adminMandates]
|
|
|
|
|
|
def _isUserMandateAdmin(userId: str, mandateId: str) -> bool:
|
|
"""Check if user is admin for a specific mandate."""
|
|
adminIds = _getAdminMandateIds(userId, [mandateId])
|
|
return mandateId in adminIds
|
|
|
|
|
|
def _scopedRunFilter(context: RequestContext) -> Optional[dict]:
|
|
"""
|
|
Build a DB filter dict based on RBAC:
|
|
- sysadmin: None (no filter)
|
|
- mandate admin: mandateId IN user's mandates
|
|
- normal user: ownerId = userId
|
|
"""
|
|
if context.isPlatformAdmin:
|
|
return None
|
|
|
|
userId = str(context.user.id) if context.user else None
|
|
if not userId:
|
|
return {"ownerId": "__impossible__"}
|
|
|
|
mandateIds = _getUserMandateIds(userId)
|
|
adminMandateIds = _getAdminMandateIds(userId, mandateIds)
|
|
|
|
if adminMandateIds:
|
|
return {"mandateId": adminMandateIds}
|
|
|
|
return {"ownerId": userId}
|
|
|
|
|
|
def _scopedWorkflowFilter(context: RequestContext) -> Optional[dict]:
|
|
"""
|
|
Build a DB filter for AutoWorkflow based on RBAC:
|
|
- sysadmin: None (no filter, sees all)
|
|
- normal user: mandateId IN user's mandates
|
|
"""
|
|
if context.isPlatformAdmin:
|
|
return None
|
|
|
|
userId = str(context.user.id) if context.user else None
|
|
if not userId:
|
|
return {"mandateId": "__impossible__"}
|
|
|
|
mandateIds = _getUserMandateIds(userId)
|
|
if mandateIds:
|
|
return {"mandateId": mandateIds}
|
|
|
|
return {"mandateId": "__impossible__"}
|
|
|
|
|
|
def _userMayDeleteWorkflow(context: RequestContext, wfMandateId: Optional[str]) -> bool:
|
|
"""Same rules as canDelete on rows in get_system_workflows."""
|
|
if context.isPlatformAdmin:
|
|
return True
|
|
userId = str(context.user.id) if context.user else None
|
|
if not userId or not wfMandateId:
|
|
return False
|
|
userMandateIds = _getUserMandateIds(userId)
|
|
adminMandateIds = _getAdminMandateIds(userId, userMandateIds)
|
|
return wfMandateId in adminMandateIds
|
|
|
|
|
|
def _parsePaginationOr400(pagination: Optional[str]) -> Optional[PaginationParams]:
|
|
"""Parse a JSON pagination query string into PaginationParams.
|
|
|
|
Returns None when the input is empty/None. Raises HTTPException(400) on any
|
|
parse / validation error so the caller can propagate the error to the
|
|
client instead of silently falling back to defaults (which used to mask
|
|
real frontend bugs).
|
|
"""
|
|
if not pagination:
|
|
return None
|
|
try:
|
|
paginationDict = json.loads(pagination)
|
|
except json.JSONDecodeError as e:
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=f"Invalid 'pagination' query: not valid JSON ({e.msg})",
|
|
)
|
|
if not paginationDict:
|
|
return None
|
|
try:
|
|
paginationDict = normalize_pagination_dict(paginationDict)
|
|
return PaginationParams(**paginationDict)
|
|
except Exception as e:
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=f"Invalid 'pagination' payload: {e}",
|
|
)
|
|
|
|
|
|
_RUN_STATS_SUBQUERY = """
|
|
(
|
|
SELECT s."workflowId" AS "workflowId",
|
|
MAX(COALESCE(s."startedAt", s."sysCreatedAt")) AS "lastStartedAt",
|
|
COUNT(s."id")::bigint AS "runCount",
|
|
MAX(CASE WHEN s."status" IN ('running', 'paused') THEN s."id" END) AS "activeRunId"
|
|
FROM "AutoRun" s
|
|
GROUP BY s."workflowId"
|
|
) rs
|
|
"""
|
|
|
|
|
|
def _firstFkSortFieldForWorkflows(pagination) -> Optional[str]:
|
|
"""First sort field that requires FK label resolution (cross-DB), or None."""
|
|
from modules.routes.routeHelpers import _buildLabelResolversFromModel
|
|
if not pagination or not pagination.sort:
|
|
return None
|
|
resolvers = _buildLabelResolversFromModel(AutoWorkflow)
|
|
if not resolvers:
|
|
return None
|
|
for sf in pagination.sort:
|
|
sfField = sf.get("field") if isinstance(sf, dict) else getattr(sf, "field", None)
|
|
if sfField and sfField in resolvers:
|
|
return sfField
|
|
return None
|
|
|
|
|
|
def _batchRunStatsForWorkflowIds(db: DatabaseConnector, workflowIds: List[str]) -> dict:
|
|
"""One grouped query: lastStartedAt, runCount, activeRunId per workflow."""
|
|
if not workflowIds or not db._ensureTableExists(AutoRun):
|
|
return {}
|
|
db._ensure_connection()
|
|
sql = """
|
|
SELECT "workflowId",
|
|
MAX(COALESCE("startedAt", "sysCreatedAt")) AS "lastStartedAt",
|
|
COUNT("id")::bigint AS "runCount",
|
|
MAX(CASE WHEN "status" IN ('running', 'paused') THEN "id" END) AS "activeRunId"
|
|
FROM "AutoRun"
|
|
WHERE "workflowId" = ANY(%s)
|
|
GROUP BY "workflowId"
|
|
"""
|
|
out: dict = {}
|
|
with db.connection.cursor() as cursor:
|
|
cursor.execute(sql, (workflowIds,))
|
|
for row in cursor.fetchall():
|
|
r = dict(row)
|
|
wid = r.get("workflowId")
|
|
if wid:
|
|
out[str(wid)] = r
|
|
return out
|
|
|
|
|
|
def _listingColSql(key: str, wfFieldNames: set) -> Optional[str]:
|
|
if key == "lastStartedAt":
|
|
return 'rs."lastStartedAt"'
|
|
if key == "runCount":
|
|
return 'COALESCE(rs."runCount", 0::bigint)'
|
|
if key == "isRunning":
|
|
return '(rs."activeRunId" IS NOT NULL)'
|
|
if key in wfFieldNames:
|
|
return f'w."{key}"'
|
|
return None
|
|
|
|
|
|
def _listingOrderExpr(key: str, wfFieldNames: set, wfFields: dict) -> Optional[str]:
|
|
if key == "lastStartedAt":
|
|
return 'rs."lastStartedAt"'
|
|
if key == "runCount":
|
|
return 'COALESCE(rs."runCount", 0::bigint)'
|
|
if key == "isRunning":
|
|
return 'CASE WHEN rs."activeRunId" IS NOT NULL THEN 1 ELSE 0 END'
|
|
if key in wfFieldNames:
|
|
colType = wfFields.get(key, "TEXT")
|
|
if colType == "BOOLEAN":
|
|
return f'COALESCE(w."{key}", FALSE)'
|
|
return f'w."{key}"'
|
|
return None
|
|
|
|
|
|
def _appendJoinedListingFilters(whereParts: list, values: list, pagination, wfFields: dict) -> None:
|
|
"""Append WHERE fragments for joined workflow listing (w + rs)."""
|
|
from datetime import datetime as _dt, timezone as _tz
|
|
|
|
wfFieldNames = set(wfFields.keys())
|
|
validCols = wfFieldNames | {"lastStartedAt", "runCount", "isRunning"}
|
|
|
|
if not pagination or not pagination.filters:
|
|
return
|
|
|
|
for key, val in pagination.filters.items():
|
|
if key == "search" and isinstance(val, str) and val.strip():
|
|
term = f"%{val.strip()}%"
|
|
textCols = [c for c, t in wfFields.items() if t == "TEXT"]
|
|
if textCols:
|
|
orParts = [f'COALESCE(w."{c}"::TEXT, \'\') ILIKE %s' for c in textCols]
|
|
whereParts.append(f"({' OR '.join(orParts)})")
|
|
values.extend([term] * len(textCols))
|
|
continue
|
|
|
|
if key not in validCols:
|
|
continue
|
|
|
|
if key == "isRunning":
|
|
if isinstance(val, dict):
|
|
op = val.get("operator", "equals")
|
|
v = val.get("value", "")
|
|
isTrue = str(v).lower() == "true"
|
|
if op in ("equals", "eq"):
|
|
whereParts.append('(rs."activeRunId" IS NOT NULL)' if isTrue else '(rs."activeRunId" IS NULL)')
|
|
elif val is None:
|
|
whereParts.append('(rs."activeRunId" IS NULL)')
|
|
else:
|
|
whereParts.append(
|
|
'(rs."activeRunId" IS NOT NULL)' if str(val).lower() == "true" else '(rs."activeRunId" IS NULL)'
|
|
)
|
|
continue
|
|
|
|
colRef = _listingColSql(key, wfFieldNames)
|
|
if not colRef:
|
|
continue
|
|
|
|
colType = wfFields.get(key, "TEXT") if key in wfFieldNames else (
|
|
"DOUBLE PRECISION" if key == "lastStartedAt" else "BIGINT" if key == "runCount" else "TEXT"
|
|
)
|
|
|
|
if val is None:
|
|
if key == "lastStartedAt":
|
|
whereParts.append(f'({colRef} IS NULL)')
|
|
elif key == "runCount":
|
|
whereParts.append(f'({colRef} = 0)')
|
|
else:
|
|
whereParts.append(f'({colRef} IS NULL OR {colRef}::TEXT = \'\')')
|
|
continue
|
|
|
|
if not isinstance(val, dict):
|
|
if colType == "BOOLEAN" or key == "isRunning":
|
|
whereParts.append(f'COALESCE({colRef}, FALSE) = %s')
|
|
values.append(str(val).lower() == "true")
|
|
else:
|
|
whereParts.append(f'{colRef}::TEXT ILIKE %s')
|
|
values.append(str(val))
|
|
continue
|
|
|
|
op = val.get("operator", "equals")
|
|
v = val.get("value", "")
|
|
if op in ("equals", "eq"):
|
|
if colType == "BOOLEAN":
|
|
whereParts.append(f'COALESCE({colRef}, FALSE) = %s')
|
|
values.append(str(v).lower() == "true")
|
|
else:
|
|
whereParts.append(f'{colRef}::TEXT = %s')
|
|
values.append(str(v))
|
|
elif op == "contains":
|
|
whereParts.append(f'{colRef}::TEXT ILIKE %s')
|
|
values.append(f"%{v}%")
|
|
elif op == "startsWith":
|
|
whereParts.append(f'{colRef}::TEXT ILIKE %s')
|
|
values.append(f"{v}%")
|
|
elif op == "endsWith":
|
|
whereParts.append(f'{colRef}::TEXT ILIKE %s')
|
|
values.append(f"%{v}")
|
|
elif op in ("gt", "gte", "lt", "lte"):
|
|
sqlOp = {"gt": ">", "gte": ">=", "lt": "<", "lte": "<="}[op]
|
|
if colType in ("INTEGER", "DOUBLE PRECISION", "BIGINT") or key in ("lastStartedAt", "runCount"):
|
|
try:
|
|
whereParts.append(f'{colRef}::double precision {sqlOp} %s')
|
|
values.append(float(v))
|
|
except (ValueError, TypeError):
|
|
continue
|
|
else:
|
|
whereParts.append(f'{colRef}::TEXT {sqlOp} %s')
|
|
values.append(str(v))
|
|
elif op == "between":
|
|
fromVal = v.get("from", "") if isinstance(v, dict) else ""
|
|
toVal = v.get("to", "") if isinstance(v, dict) else ""
|
|
if not fromVal and not toVal:
|
|
continue
|
|
isNumericCol = colType in ("INTEGER", "DOUBLE PRECISION", "BIGINT") or key in ("lastStartedAt", "runCount")
|
|
isDateVal = bool(fromVal and re.match(r"^\d{4}-\d{2}-\d{2}$", str(fromVal))) or bool(
|
|
toVal and re.match(r"^\d{4}-\d{2}-\d{2}$", str(toVal))
|
|
)
|
|
if isNumericCol and isDateVal:
|
|
if fromVal and toVal:
|
|
fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp()
|
|
toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace(
|
|
hour=23, minute=59, second=59, tzinfo=_tz.utc
|
|
).timestamp()
|
|
whereParts.append(f"({colRef} >= %s AND {colRef} <= %s)")
|
|
values.extend([fromTs, toTs])
|
|
elif fromVal:
|
|
fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp()
|
|
whereParts.append(f"({colRef} >= %s)")
|
|
values.append(fromTs)
|
|
else:
|
|
toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace(
|
|
hour=23, minute=59, second=59, tzinfo=_tz.utc
|
|
).timestamp()
|
|
whereParts.append(f"({colRef} <= %s)")
|
|
values.append(toTs)
|
|
elif isNumericCol:
|
|
try:
|
|
if fromVal and toVal:
|
|
whereParts.append(
|
|
f"({colRef}::double precision >= %s AND {colRef}::double precision <= %s)"
|
|
)
|
|
values.extend([float(fromVal), float(toVal)])
|
|
elif fromVal:
|
|
whereParts.append(f"{colRef}::double precision >= %s")
|
|
values.append(float(fromVal))
|
|
elif toVal:
|
|
whereParts.append(f"{colRef}::double precision <= %s")
|
|
values.append(float(toVal))
|
|
except (ValueError, TypeError):
|
|
continue
|
|
else:
|
|
if fromVal and toVal:
|
|
whereParts.append(f"({colRef}::TEXT >= %s AND {colRef}::TEXT <= %s)")
|
|
values.extend([str(fromVal), str(toVal)])
|
|
elif fromVal:
|
|
whereParts.append(f"{colRef}::TEXT >= %s")
|
|
values.append(str(fromVal))
|
|
elif toVal:
|
|
whereParts.append(f"{colRef}::TEXT <= %s")
|
|
values.append(str(toVal))
|
|
|
|
|
|
def _buildJoinedWorkflowWhereOrderLimit(
|
|
recordFilter: dict,
|
|
pagination,
|
|
wfFields: dict,
|
|
) -> tuple:
|
|
"""WHERE / ORDER BY / LIMIT for joined AutoWorkflow + run stats listing."""
|
|
wfFieldNames = set(wfFields.keys())
|
|
whereParts: list = []
|
|
values: list = []
|
|
|
|
for field, value in (recordFilter or {}).items():
|
|
if value is None:
|
|
whereParts.append(f'w."{field}" IS NULL')
|
|
elif isinstance(value, list):
|
|
whereParts.append(f'w."{field}" = ANY(%s)')
|
|
values.append(value)
|
|
else:
|
|
whereParts.append(f'w."{field}" = %s')
|
|
values.append(value)
|
|
|
|
_appendJoinedListingFilters(whereParts, values, pagination, wfFields)
|
|
|
|
whereClause = " WHERE " + " AND ".join(whereParts) if whereParts else ""
|
|
|
|
orderParts: list = []
|
|
if pagination and pagination.sort:
|
|
for sf in pagination.sort:
|
|
sfField = sf.get("field") if isinstance(sf, dict) else getattr(sf, "field", None)
|
|
sfDir = sf.get("direction", "asc") if isinstance(sf, dict) else getattr(sf, "direction", "asc")
|
|
if not sfField:
|
|
continue
|
|
expr = _listingOrderExpr(sfField, wfFieldNames, wfFields)
|
|
if not expr:
|
|
continue
|
|
direction = "DESC" if str(sfDir).lower() == "desc" else "ASC"
|
|
orderParts.append(f"{expr} {direction} NULLS LAST")
|
|
if not orderParts:
|
|
orderParts.append('w."sysCreatedAt" DESC NULLS LAST')
|
|
|
|
orderClause = " ORDER BY " + ", ".join(orderParts)
|
|
|
|
limitClause = ""
|
|
if pagination:
|
|
offset = (pagination.page - 1) * pagination.pageSize
|
|
limitClause = f" LIMIT {pagination.pageSize} OFFSET {offset}"
|
|
|
|
return whereClause, orderClause, limitClause, values
|
|
|
|
|
|
def _getWorkflowsJoinedPaginated(
|
|
db: DatabaseConnector,
|
|
recordFilter: dict,
|
|
paginationParams: PaginationParams,
|
|
) -> dict:
|
|
"""SQL listing: AutoWorkflow LEFT JOIN aggregated AutoRun stats (one query + count)."""
|
|
from modules.connectors.connectorDbPostgre import getModelFields, parseRecordFields
|
|
|
|
wfFields = getModelFields(AutoWorkflow)
|
|
whereClause, orderClause, limitClause, values = _buildJoinedWorkflowWhereOrderLimit(
|
|
recordFilter, paginationParams, wfFields,
|
|
)
|
|
countValues = list(values)
|
|
|
|
fromSql = f'"AutoWorkflow" w LEFT JOIN {_RUN_STATS_SUBQUERY.strip()} ON rs."workflowId" = w."id"'
|
|
|
|
countSql = f"SELECT COUNT(*) AS cnt FROM {fromSql}{whereClause}"
|
|
dataSql = f"SELECT w.*, rs.\"lastStartedAt\", rs.\"runCount\", rs.\"activeRunId\" FROM {fromSql}{whereClause}{orderClause}{limitClause}"
|
|
|
|
db._ensure_connection()
|
|
with db.connection.cursor() as cursor:
|
|
cursor.execute(countSql, countValues)
|
|
totalItems = int(cursor.fetchone()["cnt"])
|
|
|
|
cursor.execute(dataSql, values)
|
|
rawRows = [dict(row) for row in cursor.fetchall()]
|
|
|
|
pageSize = paginationParams.pageSize if paginationParams else max(totalItems, 1)
|
|
totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0
|
|
|
|
modelFields = AutoWorkflow.model_fields
|
|
for record in rawRows:
|
|
parseRecordFields(record, wfFields, "table AutoWorkflow joined listing")
|
|
for fieldName, fieldType in wfFields.items():
|
|
if fieldType == "JSONB" and fieldName in record and record[fieldName] is None:
|
|
fieldInfo = modelFields.get(fieldName)
|
|
if fieldInfo:
|
|
fieldAnnotation = fieldInfo.annotation
|
|
if fieldAnnotation == list or (
|
|
hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is list
|
|
):
|
|
record[fieldName] = []
|
|
elif fieldAnnotation == dict or (
|
|
hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is dict
|
|
):
|
|
record[fieldName] = {}
|
|
|
|
return {"items": rawRows, "totalItems": totalItems, "totalPages": totalPages}
|
|
|
|
|
|
def _cascadeDeleteAutoWorkflow(db: DatabaseConnector, workflowId: str) -> None:
|
|
"""Delete AutoWorkflow and dependent rows (same order as interfaceDbApp._cascadeDeleteGraphicalEditorData)."""
|
|
wf_id = workflowId
|
|
for v in db.getRecordset(AutoVersion, recordFilter={"workflowId": wf_id}) or []:
|
|
vid = v.get("id")
|
|
if vid:
|
|
db.recordDelete(AutoVersion, vid)
|
|
for run in db.getRecordset(AutoRun, recordFilter={"workflowId": wf_id}) or []:
|
|
run_id = run.get("id")
|
|
if not run_id:
|
|
continue
|
|
for sl in db.getRecordset(AutoStepLog, recordFilter={"runId": run_id}) or []:
|
|
slid = sl.get("id")
|
|
if slid:
|
|
db.recordDelete(AutoStepLog, slid)
|
|
db.recordDelete(AutoRun, run_id)
|
|
for task in db.getRecordset(AutoTask, recordFilter={"workflowId": wf_id}) or []:
|
|
tid = task.get("id")
|
|
if tid:
|
|
db.recordDelete(AutoTask, tid)
|
|
db.recordDelete(AutoWorkflow, wf_id)
|
|
|
|
|
|
@router.get("")
|
|
@limiter.limit("60/minute")
|
|
def get_workflow_runs(
|
|
request: Request,
|
|
limit: int = Query(50, ge=1, le=200),
|
|
offset: int = Query(0, ge=0),
|
|
status: Optional[str] = Query(None, description="Filter by status"),
|
|
mandateId: Optional[str] = Query(None, description="Filter by mandate"),
|
|
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
|
|
mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"),
|
|
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
|
|
context: RequestContext = Depends(getRequestContext),
|
|
) -> dict:
|
|
"""List workflow runs with RBAC scoping (SQL-paginated)."""
|
|
db = _getDb()
|
|
if not db._ensureTableExists(AutoRun):
|
|
if mode in ("filterValues", "ids"):
|
|
from fastapi.responses import JSONResponse
|
|
return JSONResponse(content=[])
|
|
return {"runs": [], "total": 0, "limit": limit, "offset": offset}
|
|
|
|
if mode == "filterValues":
|
|
if not column:
|
|
from fastapi import HTTPException as _H
|
|
raise _H(status_code=400, detail="column parameter required for mode=filterValues")
|
|
return _enrichedFilterValues(db, context, AutoRun, _scopedRunFilter, column)
|
|
|
|
if mode == "ids":
|
|
from modules.routes.routeHelpers import handleIdsMode
|
|
baseFilter = _scopedRunFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
return handleIdsMode(db, AutoRun, pagination, recordFilter)
|
|
|
|
baseFilter = _scopedRunFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
|
|
if status:
|
|
recordFilter["status"] = status
|
|
if mandateId:
|
|
recordFilter["mandateId"] = mandateId
|
|
|
|
paginationParams = _parsePaginationOr400(pagination)
|
|
if not paginationParams:
|
|
page = (offset // limit) + 1 if limit > 0 else 1
|
|
paginationParams = PaginationParams(
|
|
page=page,
|
|
pageSize=limit,
|
|
sort=[{"field": "startedAt", "direction": "desc"}],
|
|
)
|
|
|
|
from modules.routes.routeHelpers import getRecordsetPaginatedWithFkSort
|
|
result = getRecordsetPaginatedWithFkSort(
|
|
db, AutoRun,
|
|
pagination=paginationParams,
|
|
recordFilter=recordFilter if recordFilter else None,
|
|
)
|
|
pageRuns = result.get("items", []) if isinstance(result, dict) else result.items
|
|
total = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
|
|
|
|
wfIds = list({r.get("workflowId") for r in pageRuns if r.get("workflowId")})
|
|
wfMap: dict = {}
|
|
if wfIds and db._ensureTableExists(AutoWorkflow):
|
|
wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds})
|
|
for wf in (wfs or []):
|
|
wfMap[wf.get("id")] = wf
|
|
|
|
from modules.routes.routeHelpers import enrichRowsWithFkLabels, resolveMandateLabels, resolveInstanceLabels
|
|
|
|
runs = []
|
|
for r in pageRuns:
|
|
row = dict(r)
|
|
wfId = row.get("workflowId")
|
|
wf = wfMap.get(wfId, {})
|
|
row["workflowLabel"] = (
|
|
row.get("label")
|
|
or (wf.get("label") if isinstance(wf, dict) else None)
|
|
or wfId
|
|
)
|
|
fiid = wf.get("featureInstanceId") if isinstance(wf, dict) else None
|
|
row["featureInstanceId"] = fiid
|
|
runs.append(row)
|
|
|
|
enrichRowsWithFkLabels(
|
|
runs,
|
|
labelResolvers={
|
|
"mandateId": resolveMandateLabels,
|
|
"featureInstanceId": resolveInstanceLabels,
|
|
},
|
|
)
|
|
for row in runs:
|
|
row["instanceLabel"] = row.pop("featureInstanceIdLabel", None)
|
|
row["mandateLabel"] = row.pop("mandateIdLabel", None)
|
|
|
|
return {"runs": runs, "total": total, "limit": limit, "offset": offset}
|
|
|
|
|
|
@router.get("/metrics")
|
|
@limiter.limit("60/minute")
|
|
def get_workflow_metrics(
|
|
request: Request,
|
|
context: RequestContext = Depends(getRequestContext),
|
|
) -> dict:
|
|
"""Aggregated metrics across all accessible workflow runs (SQL COUNT).
|
|
|
|
Uses the same RBAC scoping as the runs list and workflows list
|
|
so that metric cards always match the table data.
|
|
"""
|
|
db = _getDb()
|
|
|
|
# --- Workflow counts (same filter as /workflows endpoint) ---
|
|
workflowCount = 0
|
|
activeWorkflows = 0
|
|
if db._ensureTableExists(AutoWorkflow):
|
|
wfBaseFilter = _scopedWorkflowFilter(context)
|
|
wfFilter = dict(wfBaseFilter) if wfBaseFilter else {}
|
|
wfFilter["isTemplate"] = False
|
|
|
|
wfCount = db.getRecordsetPaginated(
|
|
AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1),
|
|
recordFilter=wfFilter if wfFilter else None,
|
|
)
|
|
workflowCount = wfCount.get("totalItems", 0) if isinstance(wfCount, dict) else wfCount.totalItems
|
|
|
|
activeFilter = dict(wfFilter)
|
|
activeFilter["active"] = True
|
|
activeCount = db.getRecordsetPaginated(
|
|
AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1),
|
|
recordFilter=activeFilter,
|
|
)
|
|
activeWorkflows = activeCount.get("totalItems", 0) if isinstance(activeCount, dict) else activeCount.totalItems
|
|
|
|
# --- Run counts (same filter as /runs endpoint) ---
|
|
if not db._ensureTableExists(AutoRun):
|
|
return {
|
|
"totalRuns": 0, "runsByStatus": {}, "totalTokens": 0,
|
|
"totalCredits": 0, "workflowCount": workflowCount,
|
|
"activeWorkflows": activeWorkflows,
|
|
}
|
|
|
|
runBaseFilter = _scopedRunFilter(context)
|
|
|
|
countResult = db.getRecordsetPaginated(
|
|
AutoRun, pagination=PaginationParams(page=1, pageSize=1),
|
|
recordFilter=runBaseFilter,
|
|
)
|
|
totalRuns = countResult.get("totalItems", 0) if isinstance(countResult, dict) else countResult.totalItems
|
|
|
|
runsByStatus: dict = {}
|
|
statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=runBaseFilter)
|
|
for sv in (statusValues or []):
|
|
statusFilter = dict(runBaseFilter) if runBaseFilter else {}
|
|
statusFilter["status"] = sv
|
|
sr = db.getRecordsetPaginated(
|
|
AutoRun, pagination=PaginationParams(page=1, pageSize=1),
|
|
recordFilter=statusFilter,
|
|
)
|
|
runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems
|
|
|
|
totalTokens = 0
|
|
totalCredits = 0.0
|
|
if 0 < totalRuns <= 10000:
|
|
allRuns = db.getRecordset(AutoRun, recordFilter=runBaseFilter, fieldFilter=["costTokens", "costCredits"]) or []
|
|
for r in allRuns:
|
|
totalTokens += r.get("costTokens", 0) or 0
|
|
totalCredits += r.get("costCredits", 0.0) or 0.0
|
|
|
|
return {
|
|
"totalRuns": totalRuns,
|
|
"runsByStatus": runsByStatus,
|
|
"totalTokens": totalTokens,
|
|
"totalCredits": round(totalCredits, 4),
|
|
"workflowCount": workflowCount,
|
|
"activeWorkflows": activeWorkflows,
|
|
}
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# System-level Workflow listing (all workflows the user can see via RBAC)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
@router.get("/workflows")
|
|
@limiter.limit("60/minute")
|
|
def get_system_workflows(
|
|
request: Request,
|
|
active: Optional[bool] = Query(None, description="Filter by active status"),
|
|
mandateId: Optional[str] = Query(None, description="Filter by mandate"),
|
|
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
|
|
mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"),
|
|
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
|
|
context: RequestContext = Depends(getRequestContext),
|
|
) -> dict:
|
|
"""List all workflows the user has access to (RBAC-scoped, cross-instance)."""
|
|
db = _getDb()
|
|
if not db._ensureTableExists(AutoWorkflow):
|
|
if mode in ("filterValues", "ids"):
|
|
from fastapi.responses import JSONResponse
|
|
return JSONResponse(content=[])
|
|
return {"items": [], "pagination": {"currentPage": 1, "pageSize": 25, "totalItems": 0, "totalPages": 0}}
|
|
|
|
if mode == "filterValues":
|
|
if not column:
|
|
from fastapi import HTTPException as _H
|
|
raise _H(status_code=400, detail="column parameter required for mode=filterValues")
|
|
return _enrichedFilterValues(db, context, AutoWorkflow, _scopedWorkflowFilter, column)
|
|
|
|
if mode == "ids":
|
|
from modules.routes.routeHelpers import handleIdsMode
|
|
baseFilter = _scopedWorkflowFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
recordFilter["isTemplate"] = False
|
|
return handleIdsMode(db, AutoWorkflow, pagination, recordFilter)
|
|
|
|
baseFilter = _scopedWorkflowFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
recordFilter["isTemplate"] = False
|
|
|
|
if active is not None:
|
|
recordFilter["active"] = active
|
|
if mandateId:
|
|
recordFilter["mandateId"] = mandateId
|
|
|
|
paginationParams = _parsePaginationOr400(pagination)
|
|
if not paginationParams:
|
|
paginationParams = PaginationParams(
|
|
page=1,
|
|
pageSize=25,
|
|
sort=[{"field": "sysCreatedAt", "direction": "desc"}],
|
|
)
|
|
|
|
from modules.routes.routeHelpers import enrichRowsWithFkLabels, resolveMandateLabels, resolveInstanceLabels
|
|
|
|
featureCodeMap: dict = {}
|
|
|
|
def _resolveInstanceLabelsWithFeatureCode(ids):
|
|
from modules.interfaces.interfaceDbApp import getRootInterface as _getRI
|
|
from modules.interfaces.interfaceFeatures import getFeatureInterface
|
|
rootIf = _getRI()
|
|
featureIf = getFeatureInterface(rootIf.db)
|
|
result = {}
|
|
for iid in ids:
|
|
fi = featureIf.getFeatureInstance(iid)
|
|
if fi:
|
|
result[iid] = fi.label or None
|
|
featureCodeMap[iid] = fi.featureCode
|
|
else:
|
|
logger.warning("getSystemWorkflows: feature-instance not found for id=%s", iid)
|
|
result[iid] = None
|
|
return result
|
|
|
|
userId = str(context.user.id) if context.user else None
|
|
adminMandateIds = []
|
|
if userId and not context.isPlatformAdmin:
|
|
userMandateIds = _getUserMandateIds(userId)
|
|
adminMandateIds = _getAdminMandateIds(userId, userMandateIds)
|
|
|
|
fkSortField = _firstFkSortFieldForWorkflows(paginationParams)
|
|
if fkSortField:
|
|
from modules.routes.routeHelpers import getRecordsetPaginatedWithFkSort, applyFiltersAndSort
|
|
_COMPUTED_FIELDS = {"lastStartedAt", "runCount", "isRunning"}
|
|
hasComputedFilter = bool(
|
|
paginationParams.filters
|
|
and any(k in _COMPUTED_FIELDS for k in paginationParams.filters)
|
|
)
|
|
hasComputedSort = any(
|
|
(s.field if hasattr(s, "field") else s.get("field", "")) in _COMPUTED_FIELDS
|
|
for s in (paginationParams.sort or [])
|
|
)
|
|
dbPagination = paginationParams
|
|
if hasComputedFilter or hasComputedSort:
|
|
dbFilters = {
|
|
k: v for k, v in (paginationParams.filters or {}).items()
|
|
if k not in _COMPUTED_FIELDS
|
|
} or None
|
|
dbSort = [
|
|
s for s in (paginationParams.sort or [])
|
|
if (s.field if hasattr(s, "field") else s.get("field", "")) not in _COMPUTED_FIELDS
|
|
]
|
|
dbPagination = PaginationParams.model_construct(
|
|
page=1,
|
|
pageSize=9999,
|
|
sort=dbSort or [{"field": "sysCreatedAt", "direction": "desc"}],
|
|
filters=dbFilters,
|
|
)
|
|
result = getRecordsetPaginatedWithFkSort(
|
|
db, AutoWorkflow,
|
|
pagination=dbPagination,
|
|
recordFilter=recordFilter if recordFilter else None,
|
|
)
|
|
pageItems = result.get("items", []) if isinstance(result, dict) else result.items
|
|
workflowIds = [w.get("id") for w in pageItems if w.get("id")]
|
|
statsById = _batchRunStatsForWorkflowIds(db, workflowIds)
|
|
items = []
|
|
for w in pageItems:
|
|
row = dict(w)
|
|
wfId = row.get("id")
|
|
st = statsById.get(str(wfId)) if wfId else None
|
|
activeRunId = st.get("activeRunId") if st else None
|
|
row["isRunning"] = bool(activeRunId)
|
|
row["activeRunId"] = activeRunId
|
|
row["runCount"] = int(st.get("runCount") or 0) if st else 0
|
|
row["lastStartedAt"] = float(st["lastStartedAt"]) if st and st.get("lastStartedAt") is not None else None
|
|
wMandateId = row.get("mandateId")
|
|
if context.isPlatformAdmin:
|
|
row["canEdit"] = True
|
|
row["canDelete"] = True
|
|
row["canExecute"] = True
|
|
elif wMandateId and wMandateId in adminMandateIds:
|
|
row["canEdit"] = True
|
|
row["canDelete"] = True
|
|
row["canExecute"] = True
|
|
else:
|
|
row["canEdit"] = False
|
|
row["canDelete"] = False
|
|
row["canExecute"] = False
|
|
row.pop("graph", None)
|
|
items.append(row)
|
|
enrichRowsWithFkLabels(
|
|
items,
|
|
labelResolvers={
|
|
"mandateId": resolveMandateLabels,
|
|
"featureInstanceId": _resolveInstanceLabelsWithFeatureCode,
|
|
},
|
|
)
|
|
for row in items:
|
|
row["instanceLabel"] = row.pop("featureInstanceIdLabel", None)
|
|
row["mandateLabel"] = row.pop("mandateIdLabel", None)
|
|
row["featureCode"] = featureCodeMap.get(row.get("featureInstanceId"))
|
|
if hasComputedFilter or hasComputedSort:
|
|
computedFilters = {
|
|
k: v for k, v in (paginationParams.filters or {}).items()
|
|
if k in _COMPUTED_FIELDS
|
|
}
|
|
computedSort = [
|
|
s for s in (paginationParams.sort or [])
|
|
if (s.field if hasattr(s, "field") else s.get("field", "")) in _COMPUTED_FIELDS
|
|
]
|
|
computedPagination = PaginationParams.model_construct(
|
|
page=paginationParams.page,
|
|
pageSize=paginationParams.pageSize,
|
|
sort=computedSort or [],
|
|
filters=computedFilters or None,
|
|
)
|
|
filtered = applyFiltersAndSort(items, computedPagination)
|
|
totalItems = filtered.get("totalItems", len(items))
|
|
totalPages = filtered.get("totalPages", 1)
|
|
items = filtered.get("items", items)
|
|
else:
|
|
totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
|
|
totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages
|
|
else:
|
|
result = _getWorkflowsJoinedPaginated(
|
|
db, recordFilter if recordFilter else {}, paginationParams,
|
|
)
|
|
pageItems = result.get("items", [])
|
|
totalItems = result.get("totalItems", 0)
|
|
totalPages = result.get("totalPages", 0)
|
|
items = []
|
|
for row in pageItems:
|
|
wMandateId = row.get("mandateId")
|
|
wfId = row.get("id")
|
|
activeRunId = row.get("activeRunId")
|
|
if row.get("runCount") is not None:
|
|
row["runCount"] = int(row["runCount"])
|
|
row["isRunning"] = bool(activeRunId)
|
|
if context.isPlatformAdmin:
|
|
row["canEdit"] = True
|
|
row["canDelete"] = True
|
|
row["canExecute"] = True
|
|
elif wMandateId and wMandateId in adminMandateIds:
|
|
row["canEdit"] = True
|
|
row["canDelete"] = True
|
|
row["canExecute"] = True
|
|
else:
|
|
row["canEdit"] = False
|
|
row["canDelete"] = False
|
|
row["canExecute"] = False
|
|
row.pop("graph", None)
|
|
items.append(row)
|
|
enrichRowsWithFkLabels(
|
|
items,
|
|
labelResolvers={
|
|
"mandateId": resolveMandateLabels,
|
|
"featureInstanceId": _resolveInstanceLabelsWithFeatureCode,
|
|
},
|
|
)
|
|
for row in items:
|
|
row["instanceLabel"] = row.pop("featureInstanceIdLabel", None)
|
|
row["mandateLabel"] = row.pop("mandateIdLabel", None)
|
|
row["featureCode"] = featureCodeMap.get(row.get("featureInstanceId"))
|
|
|
|
return {
|
|
"items": items,
|
|
"pagination": {
|
|
"currentPage": paginationParams.page,
|
|
"pageSize": paginationParams.pageSize,
|
|
"totalItems": totalItems,
|
|
"totalPages": totalPages,
|
|
},
|
|
}
|
|
|
|
|
|
@router.delete("/workflows/{workflowId}")
|
|
@limiter.limit("30/minute")
|
|
def delete_system_workflow(
|
|
request: Request,
|
|
workflowId: str = Path(..., description="AutoWorkflow ID"),
|
|
context: RequestContext = Depends(getRequestContext),
|
|
) -> dict:
|
|
"""
|
|
Delete a workflow by ID without requiring featureInstanceId (orphan / broken FK rows).
|
|
RBAC matches get_system_workflows: SysAdmin or Mandate-Admin for the workflow's mandate.
|
|
Cascades versions, runs, step logs, tasks — same as mandate cascade delete.
|
|
"""
|
|
db = _getDb()
|
|
if not db._ensureTableExists(AutoWorkflow):
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
|
|
|
|
rows = db.getRecordset(AutoWorkflow, recordFilter={"id": workflowId})
|
|
if not rows:
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
|
|
|
|
wf = dict(rows[0]) if rows else {}
|
|
if wf.get("isTemplate"):
|
|
raise HTTPException(status_code=400, detail=routeApiMsg("Cannot delete a template workflow here"))
|
|
|
|
wf_mandate_id = wf.get("mandateId")
|
|
if not _userMayDeleteWorkflow(context, wf_mandate_id):
|
|
raise HTTPException(status_code=403, detail=routeApiMsg("Not allowed to delete this workflow"))
|
|
|
|
try:
|
|
_cascadeDeleteAutoWorkflow(db, workflowId)
|
|
except Exception as e:
|
|
logger.error(f"delete_system_workflow cascade failed: {e}")
|
|
raise HTTPException(status_code=500, detail=routeApiMsg(str(e)))
|
|
|
|
# Callback registry: log + propagate so listener bugs are visible.
|
|
# Cascade is already committed at this point — failure here is a side-effect
|
|
# bug (stale caches, missed notifications), never a "ignore silently" event.
|
|
try:
|
|
from modules.shared.callbackRegistry import callbackRegistry
|
|
callbackRegistry.trigger("graphicalEditor.workflow.changed")
|
|
except Exception as e:
|
|
logger.error(
|
|
f"delete_system_workflow: callbackRegistry.trigger failed for "
|
|
f"workflowId={workflowId}: {e}"
|
|
)
|
|
raise HTTPException(
|
|
status_code=500,
|
|
detail=routeApiMsg(f"Workflow deleted but post-delete callback failed: {e}"),
|
|
)
|
|
|
|
return {"success": True, "id": workflowId}
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Filter-values endpoints (for FormGeneratorTable column filters)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
_SYNTHETIC_TIMESTAMP_FIELDS = {"lastStartedAt"}
|
|
|
|
|
|
def _isTimestampColumn(modelClass, column: str) -> bool:
|
|
"""Check if a column is a timestamp field (PeriodPicker, no discrete values needed)."""
|
|
if column in _SYNTHETIC_TIMESTAMP_FIELDS:
|
|
return True
|
|
fields = getattr(modelClass, "model_fields", {})
|
|
fieldInfo = fields.get(column)
|
|
if not fieldInfo:
|
|
return False
|
|
extra = getattr(fieldInfo, "json_schema_extra", None)
|
|
if isinstance(extra, dict):
|
|
return extra.get("frontend_type") == "timestamp"
|
|
return False
|
|
|
|
|
|
def _enrichedFilterValues(
|
|
db, context: RequestContext, modelClass, scopeFilter, column: str,
|
|
):
|
|
"""Return distinct filter values for FormGeneratorTable column filters.
|
|
|
|
For FK columns (mandateId, featureInstanceId) returns ``{value, label}``
|
|
objects so the frontend can display human-readable labels in the dropdown
|
|
without a separate source fk fetch. Non-FK columns return ``string | null``.
|
|
|
|
Timestamp columns (sysCreatedAt, lastStartedAt) return an empty list because
|
|
the frontend uses a PeriodPicker (range selector) — no discrete values needed.
|
|
|
|
``null`` is included when rows with NULL/empty values exist (enables the
|
|
"(Leer)" filter option).
|
|
|
|
Returns JSONResponse to bypass FastAPI response_model validation.
|
|
"""
|
|
from fastapi.responses import JSONResponse
|
|
from modules.routes.routeHelpers import resolveMandateLabels, resolveInstanceLabels
|
|
|
|
if _isTimestampColumn(modelClass, column):
|
|
return JSONResponse(content=[])
|
|
|
|
if column in ("mandateLabel", "mandateId"):
|
|
baseFilter = scopeFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
if modelClass == AutoWorkflow:
|
|
recordFilter["isTemplate"] = False
|
|
items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["mandateId"]) or []
|
|
allVals = {r.get("mandateId") for r in items}
|
|
mandateIds = sorted(v for v in allVals if v)
|
|
hasEmpty = None in allVals or "" in allVals
|
|
labelMap = resolveMandateLabels(mandateIds) if mandateIds else {}
|
|
result = [{"value": mid, "label": labelMap.get(mid) or f"NA({mid})"} for mid in mandateIds]
|
|
if hasEmpty:
|
|
result.append(None)
|
|
return JSONResponse(content=result)
|
|
|
|
if column in ("instanceLabel", "featureInstanceId"):
|
|
baseFilter = scopeFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
if modelClass == AutoWorkflow:
|
|
recordFilter["isTemplate"] = False
|
|
items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["featureInstanceId"]) or []
|
|
allVals = {r.get("featureInstanceId") for r in items}
|
|
instanceIds = sorted(v for v in allVals if v)
|
|
hasEmpty = None in allVals or "" in allVals
|
|
else:
|
|
items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["workflowId"]) or []
|
|
wfIds = list({r.get("workflowId") for r in items if r.get("workflowId")})
|
|
instanceIds = []
|
|
hasEmpty = False
|
|
if wfIds and db._ensureTableExists(AutoWorkflow):
|
|
wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds}, fieldFilter=["featureInstanceId"]) or []
|
|
allVals = {w.get("featureInstanceId") for w in wfs}
|
|
instanceIds = sorted(v for v in allVals if v)
|
|
hasEmpty = None in allVals or "" in allVals
|
|
labelMap = resolveInstanceLabels(instanceIds) if instanceIds else {}
|
|
result = [{"value": iid, "label": labelMap.get(iid) or f"NA({iid})"} for iid in instanceIds]
|
|
if hasEmpty:
|
|
result.append(None)
|
|
return JSONResponse(content=result)
|
|
|
|
if column == "workflowLabel":
|
|
baseFilter = scopeFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["workflowId", "label"]) or []
|
|
labels = set()
|
|
wfIds = set()
|
|
hasEmpty = False
|
|
for r in items:
|
|
if r.get("label"):
|
|
labels.add(r["label"])
|
|
elif not r.get("workflowId"):
|
|
hasEmpty = True
|
|
if r.get("workflowId"):
|
|
wfIds.add(r["workflowId"])
|
|
if wfIds and db._ensureTableExists(AutoWorkflow):
|
|
wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": list(wfIds)}, fieldFilter=["label"]) or []
|
|
for wf in wfs:
|
|
if wf.get("label"):
|
|
labels.add(wf["label"])
|
|
result = sorted(labels, key=lambda v: v.lower())
|
|
if hasEmpty:
|
|
result.append(None)
|
|
return JSONResponse(content=result)
|
|
|
|
baseFilter = scopeFilter(context)
|
|
recordFilter = dict(baseFilter) if baseFilter else {}
|
|
if modelClass == AutoWorkflow:
|
|
recordFilter["isTemplate"] = False
|
|
return JSONResponse(content=db.getDistinctColumnValues(modelClass, column, recordFilter=recordFilter or None) or [])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Run-specific endpoints (path-param routes MUST come after static routes)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
@router.get("/{runId}/steps")
|
|
@limiter.limit("60/minute")
|
|
def get_run_steps(
|
|
request: Request,
|
|
runId: str = Path(..., description="Run ID"),
|
|
context: RequestContext = Depends(getRequestContext),
|
|
) -> dict:
|
|
"""Get step logs for a specific run (with access check)."""
|
|
db = _getDb()
|
|
if not db._ensureTableExists(AutoRun):
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
|
|
|
|
runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
|
|
if not runs:
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
|
|
run = dict(runs[0])
|
|
|
|
if not context.isPlatformAdmin:
|
|
userId = str(context.user.id) if context.user else None
|
|
runOwner = run.get("ownerId")
|
|
runMandate = run.get("mandateId")
|
|
|
|
if runOwner == userId:
|
|
pass
|
|
elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
|
|
pass
|
|
else:
|
|
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
|
|
|
|
if not db._ensureTableExists(AutoStepLog):
|
|
return {"steps": []}
|
|
|
|
records = db.getRecordset(AutoStepLog, recordFilter={"runId": runId})
|
|
steps = [dict(r) for r in records] if records else []
|
|
steps.sort(key=lambda s: s.get("startedAt") or 0)
|
|
return {"steps": steps}
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# SSE stream for live run tracing (system-level, no instanceId required)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
@router.get("/{runId}/stream")
|
|
async def get_run_stream(
|
|
request: Request,
|
|
runId: str = Path(..., description="Run ID"),
|
|
context: RequestContext = Depends(getRequestContext),
|
|
):
|
|
"""SSE stream for live step-log updates during a workflow run (system-level)."""
|
|
db = _getDb()
|
|
if not db._ensureTableExists(AutoRun):
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
|
|
|
|
runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
|
|
if not runs:
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
|
|
run = dict(runs[0])
|
|
|
|
if not context.isPlatformAdmin:
|
|
userId = str(context.user.id) if context.user else None
|
|
runOwner = run.get("ownerId")
|
|
runMandate = run.get("mandateId")
|
|
if runOwner == userId:
|
|
pass
|
|
elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
|
|
pass
|
|
else:
|
|
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
|
|
|
|
from modules.serviceCenter.core.serviceStreaming.eventManager import get_event_manager
|
|
sseEventManager = get_event_manager()
|
|
queueId = f"run-trace-{runId}"
|
|
sseEventManager.create_queue(queueId)
|
|
|
|
async def _sseGenerator():
|
|
queue = sseEventManager.get_queue(queueId)
|
|
if not queue:
|
|
return
|
|
while True:
|
|
try:
|
|
event = await asyncio.wait_for(queue.get(), timeout=30)
|
|
except asyncio.TimeoutError:
|
|
yield "data: {\"type\": \"keepalive\"}\n\n"
|
|
continue
|
|
if event is None:
|
|
break
|
|
payload = event.get("data", event) if isinstance(event, dict) else event
|
|
yield f"data: {json.dumps(payload, default=str)}\n\n"
|
|
eventType = payload.get("type", "") if isinstance(payload, dict) else ""
|
|
if eventType in ("run_complete", "run_failed"):
|
|
break
|
|
await sseEventManager.cleanup(queueId, delay=10)
|
|
|
|
return StreamingResponse(
|
|
_sseGenerator(),
|
|
media_type="text/event-stream",
|
|
headers={
|
|
"Cache-Control": "no-cache",
|
|
"Connection": "keep-alive",
|
|
"X-Accel-Buffering": "no",
|
|
},
|
|
)
|
|
|
|
|
|
@router.post("/{runId}/stop")
|
|
@limiter.limit("30/minute")
|
|
def stop_workflow_run(
|
|
request: Request,
|
|
runId: str = Path(..., description="Run ID"),
|
|
context: RequestContext = Depends(getRequestContext),
|
|
):
|
|
"""Stop a running workflow execution (system-level)."""
|
|
db = _getDb()
|
|
if not db._ensureTableExists(AutoRun):
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
|
|
|
|
runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
|
|
if not runs:
|
|
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
|
|
run = dict(runs[0])
|
|
|
|
if not context.isPlatformAdmin:
|
|
userId = str(context.user.id) if context.user else None
|
|
runOwner = run.get("ownerId")
|
|
runMandate = run.get("mandateId")
|
|
if runOwner == userId:
|
|
pass
|
|
elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
|
|
pass
|
|
else:
|
|
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
|
|
|
|
from modules.workflows.automation2.executionEngine import requestRunStop
|
|
flagged = requestRunStop(runId)
|
|
|
|
if not flagged:
|
|
currentStatus = run.get("status", "")
|
|
if currentStatus in ("completed", "failed", "stopped"):
|
|
return {"status": currentStatus, "runId": runId, "message": "Run already finished"}
|
|
stopUpdates = {"status": "stopped"}
|
|
if not run.get("completedAt"):
|
|
stopUpdates["completedAt"] = time.time()
|
|
db.recordModify(AutoRun, runId, stopUpdates)
|
|
return {"status": "stopped", "runId": runId, "message": "Run not active in memory, marked as stopped"}
|
|
|
|
return {"status": "stopping", "runId": runId, "message": "Stop signal sent"}
|