From b6be8f391e363042882923524563448ff030f2e6 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Thu, 23 Apr 2026 23:09:38 +0200
Subject: [PATCH 1/7] fixes
---
modules/demoConfigs/_baseDemoConfig.py | 15 +-
modules/demoConfigs/investorDemo2026.py | 73 +++-
modules/demoConfigs/pwgDemo2026.py | 72 +++-
.../features/redmine/serviceRedmineSync.py | 93 +++++
.../features/trustee/routeFeatureTrustee.py | 128 +++++-
modules/routes/routeI18n.py | 37 ++
tests/demo/test_demo_bootstrap.py | 9 +-
tests/demo/test_demo_uc3_chatbot.py | 14 +-
tests/demo/test_pwg_demo_bootstrap.py | 226 +++++++++++
tests/integration/rbac/test_rbac_database.py | 2 +-
tests/test_phase123_basic.py | 314 --------------
tests/test_service_redmine_stats.py | 2 +
.../serviceAgent/test_workflow_tools_crud.py | 383 ++++++++++++++++++
.../services/test_json_extraction_merging.py | 66 +--
.../workflows/test_automation2_graphUtils.py | 7 +-
15 files changed, 1052 insertions(+), 389 deletions(-)
create mode 100644 tests/demo/test_pwg_demo_bootstrap.py
delete mode 100644 tests/test_phase123_basic.py
create mode 100644 tests/unit/serviceAgent/test_workflow_tools_crud.py
diff --git a/modules/demoConfigs/_baseDemoConfig.py b/modules/demoConfigs/_baseDemoConfig.py
index 4d9bdd59..d20d4315 100644
--- a/modules/demoConfigs/_baseDemoConfig.py
+++ b/modules/demoConfigs/_baseDemoConfig.py
@@ -4,11 +4,16 @@ Base class for demo configurations.
Each demo config file in this folder extends _BaseDemoConfig and provides
idempotent load() and remove() methods for setting up / tearing down
a complete demo environment (mandates, users, features, test data, etc.).
+
+Subclasses MUST also declare ``credentials`` so the SysAdmin who triggers a
+demo-load gets the initial username / password pair shown in the UI -- this
+avoids the "where do I find the password?" anti-pattern of having to grep the
+source code.
"""
import logging
from abc import ABC, abstractmethod
-from typing import Dict, Any
+from typing import Any, Dict, List
logger = logging.getLogger(__name__)
@@ -20,6 +25,13 @@ class _BaseDemoConfig(ABC):
label: str = ""
description: str = ""
+ # Each entry describes one bootstrapped login that the demo creates.
+ # Shape: {"role": "Demo-Sachbearbeiter", "username": "pwg.demo",
+ # "email": "pwg.demo@poweron.swiss", "password": "pwg.demo.2026"}
+ # Surfaced via GET /api/admin/demo-config and inside the load() summary
+ # so the AdminDemoConfigPage can display it (no source-code grep needed).
+ credentials: List[Dict[str, str]] = []
+
@abstractmethod
def load(self, db) -> Dict[str, Any]:
"""Create all demo data (idempotent). Returns summary dict."""
@@ -35,4 +47,5 @@ class _BaseDemoConfig(ABC):
"code": self.code,
"label": self.label,
"description": self.description,
+ "credentials": list(self.credentials or []),
}
diff --git a/modules/demoConfigs/investorDemo2026.py b/modules/demoConfigs/investorDemo2026.py
index 058f9001..81956c6d 100644
--- a/modules/demoConfigs/investorDemo2026.py
+++ b/modules/demoConfigs/investorDemo2026.py
@@ -64,6 +64,14 @@ class InvestorDemo2026(_BaseDemoConfig):
"Two mandates (HappyLife AG + Alpina Treuhand AG), one SysAdmin user, "
"trustee with RMA, workspace, graph editor, and neutralization."
)
+ credentials = [
+ {
+ "role": "SysAdmin Demo",
+ "username": _USER["username"],
+ "email": _USER["email"],
+ "password": _USER["password"],
+ }
+ ]
# ------------------------------------------------------------------
# load
@@ -101,6 +109,10 @@ class InvestorDemo2026(_BaseDemoConfig):
logger.error(f"Demo load failed: {e}", exc_info=True)
summary["errors"].append(str(e))
+ # Surface initial credentials so the SysAdmin doesn't have to grep the
+ # source code -- consumed by AdminDemoConfigPage to render a copyable
+ # login box in the result banner.
+ summary["credentials"] = list(self.credentials)
return summary
# ------------------------------------------------------------------
@@ -268,10 +280,17 @@ class InvestorDemo2026(_BaseDemoConfig):
logger.error(f"Failed to create feature '{instanceLabel}' ({code}) in {mandateLabel}: {e}")
def _ensureFeatureAccess(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
- """Grant the demo user admin access to every feature instance in the mandate."""
+ """Grant the demo user admin access on EVERY feature instance of the
+ mandate. Without an explicit ``FeatureAccess`` + ``{code}-admin`` role
+ the user does not see any feature tile in the UI -- so this method
+ ALSO heals a half-broken state by re-copying the per-feature template
+ roles if they are missing (e.g. when the instance was created via an
+ older code path that skipped ``copyTemplateRoles``).
+ """
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole
from modules.datamodels.datamodelRbac import Role
+ from modules.interfaces.interfaceFeatures import getFeatureInterface
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or []
@@ -297,16 +316,50 @@ class InvestorDemo2026(_BaseDemoConfig):
"featureInstanceId": instId,
"roleLabel": adminRoleLabel,
})
- if adminRoles:
- adminRoleId = adminRoles[0].get("id")
- existingRole = db.getRecordset(FeatureAccessRole, recordFilter={
- "featureAccessId": featureAccessId,
- "roleId": adminRoleId,
+
+ # Self-heal: if the per-feature admin role does not exist on this
+ # instance the template roles were never copied -- copy them now.
+ if not adminRoles:
+ logger.warning(
+ "Feature instance %s (%s) is missing role '%s' -- "
+ "re-copying template roles", instId, featureCode, adminRoleLabel,
+ )
+ try:
+ fi = getFeatureInterface(db)
+ fi._copyTemplateRoles(featureCode, mandateId, instId)
+ summary["created"].append(
+ f"Repaired template roles for {featureCode} in {mandateLabel}"
+ )
+ except Exception as repairErr:
+ summary["errors"].append(
+ f"Could not repair template roles for {featureCode} "
+ f"in {mandateLabel}: {repairErr}"
+ )
+ adminRoles = db.getRecordset(Role, recordFilter={
+ "featureInstanceId": instId,
+ "roleLabel": adminRoleLabel,
})
- if not existingRole:
- far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId)
- db.recordCreate(FeatureAccessRole, far)
- logger.info(f"Assigned {adminRoleLabel} role in {mandateLabel}")
+
+ if not adminRoles:
+ summary["errors"].append(
+ f"Admin role '{adminRoleLabel}' not found for feature "
+ f"instance {featureCode} in {mandateLabel} -- demo user "
+ f"will not see this feature."
+ )
+ continue
+
+ adminRoleId = adminRoles[0].get("id")
+ existingRole = db.getRecordset(FeatureAccessRole, recordFilter={
+ "featureAccessId": featureAccessId,
+ "roleId": adminRoleId,
+ })
+ if not existingRole:
+ far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId)
+ db.recordCreate(FeatureAccessRole, far)
+ summary["created"].append(
+ f"Role '{adminRoleLabel}' assigned to demo user in {mandateLabel}"
+ )
+ logger.info(f"Assigned {adminRoleLabel} role in {mandateLabel}")
def _ensureTrusteeRmaConfig(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict):
if not mandateId:
diff --git a/modules/demoConfigs/pwgDemo2026.py b/modules/demoConfigs/pwgDemo2026.py
index e3aeea51..d4661bcf 100644
--- a/modules/demoConfigs/pwgDemo2026.py
+++ b/modules/demoConfigs/pwgDemo2026.py
@@ -67,6 +67,14 @@ class PwgDemo2026(_BaseDemoConfig):
"Graph-Editor mit dem Pilot-Workflow für Jahresmietzinsbestätigungen "
"(als File importiert, active=false). Idempotent."
)
+ credentials = [
+ {
+ "role": "Demo-Sachbearbeiter",
+ "username": _USER["username"],
+ "email": _USER["email"],
+ "password": _USER["password"],
+ }
+ ]
# ------------------------------------------------------------------
# load
@@ -98,6 +106,10 @@ class PwgDemo2026(_BaseDemoConfig):
logger.error(f"PWG demo load failed: {e}", exc_info=True)
summary["errors"].append(str(e))
+ # Surface initial credentials so the SysAdmin doesn't have to grep the
+ # source code -- consumed by AdminDemoConfigPage to render a copyable
+ # login box in the result banner.
+ summary["credentials"] = list(self.credentials)
return summary
# ------------------------------------------------------------------
@@ -253,9 +265,17 @@ class PwgDemo2026(_BaseDemoConfig):
summary["errors"].append(f"Feature '{instanceLabel}' in {mandateLabel}: {e}")
def _ensureFeatureAccess(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
+ """Grant the demo user admin access on EVERY feature instance of the
+ mandate. Without an explicit ``FeatureAccess`` + ``{code}-admin`` role
+ the user does not see any feature tile in the UI -- so this method
+ ALSO heals a half-broken state by re-copying the per-feature template
+ roles if they are missing (e.g. when the instance was created via an
+ older code path that skipped ``copyTemplateRoles``).
+ """
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole
from modules.datamodels.datamodelRbac import Role
+ from modules.interfaces.interfaceFeatures import getFeatureInterface
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or []
@@ -280,15 +300,51 @@ class PwgDemo2026(_BaseDemoConfig):
"featureInstanceId": instId,
"roleLabel": adminRoleLabel,
})
- if adminRoles:
- adminRoleId = adminRoles[0].get("id")
- existingRole = db.getRecordset(FeatureAccessRole, recordFilter={
- "featureAccessId": featureAccessId,
- "roleId": adminRoleId,
+
+ # Self-heal: if the per-feature admin role does not exist on this
+ # instance the template roles were never copied -- copy them now.
+ if not adminRoles:
+ logger.warning(
+ "Feature instance %s (%s) is missing role '%s' -- "
+ "re-copying template roles", instId, featureCode, adminRoleLabel,
+ )
+ try:
+ fi = getFeatureInterface(db)
+ fi._copyTemplateRoles(featureCode, mandateId, instId)
+ summary["created"].append(
+ f"Repaired template roles for {featureCode} in {mandateLabel}"
+ )
+ except Exception as repairErr:
+ summary["errors"].append(
+ f"Could not repair template roles for {featureCode} "
+ f"in {mandateLabel}: {repairErr}"
+ )
+ adminRoles = db.getRecordset(Role, recordFilter={
+ "featureInstanceId": instId,
+ "roleLabel": adminRoleLabel,
})
- if not existingRole:
- far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId)
- db.recordCreate(FeatureAccessRole, far)
+
+ if not adminRoles:
+ # Hard fail surfaced to UI -- without the admin role the user
+ # would silently not see the instance.
+ summary["errors"].append(
+ f"Admin role '{adminRoleLabel}' not found for feature "
+ f"instance {featureCode} in {mandateLabel} -- demo user "
+ f"will not see this feature."
+ )
+ continue
+
+ adminRoleId = adminRoles[0].get("id")
+ existingRole = db.getRecordset(FeatureAccessRole, recordFilter={
+ "featureAccessId": featureAccessId,
+ "roleId": adminRoleId,
+ })
+ if not existingRole:
+ far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId)
+ db.recordCreate(FeatureAccessRole, far)
+ summary["created"].append(
+ f"Role '{adminRoleLabel}' assigned to demo user in {mandateLabel}"
+ )
def _ensureNeutralizationConfig(self, db, mandateId: Optional[str], userId: Optional[str], summary: Dict):
if not mandateId or not userId:
diff --git a/modules/features/redmine/serviceRedmineSync.py b/modules/features/redmine/serviceRedmineSync.py
index 6d086ac0..2c631630 100644
--- a/modules/features/redmine/serviceRedmineSync.py
+++ b/modules/features/redmine/serviceRedmineSync.py
@@ -79,6 +79,16 @@ async def runSync(
async with _lockFor(featureInstanceId):
started = time.monotonic()
+
+ # CRITICAL: ensure the schema cache (especially the per-status
+ # ``isClosed`` map) is populated BEFORE we iterate issues. Redmine's
+ # /issues.json endpoint only returns ``{id, name}`` for the status
+ # object -- the closed/open flag lives in /issue_statuses.json. If
+ # the cache is empty here, every freshly-synced ticket would land
+ # with ``isClosed=False`` and the Stats page would be useless.
+ await _ensureSchemaWarm(currentUser, mandateId, featureInstanceId)
+ cfg = iface.getConfig(featureInstanceId) # re-read to get warm cache
+
full = force or cfg.lastSyncAt is None
updated_from_iso: Optional[str] = None
if not full and cfg.lastSyncAt is not None:
@@ -107,6 +117,15 @@ async def runSync(
tickets_upserted += _upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch)
relations_upserted += _replaceRelations(iface, featureInstanceId, issue, now_epoch)
+ # Self-healing pass: re-apply ``isClosed`` to every mirrored ticket
+ # using the now-warm schema cache. Fixes pre-existing rows that were
+ # synced before the cache was populated (cheap; mirror-local only).
+ flags_fixed = _rebuildIsClosedFromSchema(iface, featureInstanceId, now_epoch)
+ if flags_fixed:
+ logger.info(
+ f"runSync({featureInstanceId}): corrected isClosed on {flags_fixed} mirror rows"
+ )
+
duration_ms = int((time.monotonic() - started) * 1000)
iface.recordSyncSuccess(
featureInstanceId,
@@ -240,6 +259,80 @@ def _replaceRelations(
return inserted
+# ---------------------------------------------------------------------------
+# Schema cache warm-up + post-sync isClosed correction
+# ---------------------------------------------------------------------------
+
+async def _ensureSchemaWarm(
+ currentUser: User,
+ mandateId: Optional[str],
+ featureInstanceId: str,
+) -> None:
+ """Make sure ``cfg.schemaCache['statuses']`` exists with the per-status
+ ``isClosed`` flag. Called at the start of every sync because Redmine's
+ ``/issues.json`` doesn't expose ``is_closed`` on the inline status
+ object, so we MUST resolve it via the schema.
+ """
+ iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
+ cfg = iface.getConfig(featureInstanceId)
+ if cfg is None:
+ return
+ statuses = (cfg.schemaCache or {}).get("statuses") or []
+ if statuses:
+ return
+ # Lazy import to avoid a circular dependency at module load.
+ from modules.features.redmine.serviceRedmine import getProjectMeta
+ try:
+ await getProjectMeta(currentUser, mandateId, featureInstanceId, forceRefresh=True)
+ except Exception as e:
+ logger.warning(
+ f"_ensureSchemaWarm({featureInstanceId}): could not warm schema cache: {e} "
+ "-- isClosed flags may be inaccurate until next successful schema fetch."
+ )
+
+
+def _rebuildIsClosedFromSchema(iface, featureInstanceId: str, nowEpoch: float) -> int:
+ """Walk the mirror once and fix ``isClosed`` (and ``closedOnTs``) for any
+ ticket whose stored value disagrees with the current schema cache.
+
+ Returns the number of rows that were actually corrected. A no-op when
+ the schema cache has no statuses (logged once, then the caller can
+ decide whether to retry).
+ """
+ cfg = iface.getConfig(featureInstanceId)
+ if cfg is None:
+ return 0
+ statuses = (cfg.schemaCache or {}).get("statuses") or []
+ if not statuses:
+ return 0
+ closed_ids = {int(s.get("id")) for s in statuses if s.get("id") is not None and s.get("isClosed")}
+ rows = iface.listMirroredTickets(featureInstanceId)
+ corrections = 0
+ for row in rows:
+ sid = row.get("statusId")
+ if sid is None:
+ continue
+ should_be_closed = int(sid) in closed_ids
+ if bool(row.get("isClosed")) == should_be_closed:
+ continue
+ # Only the closed/open flag (and the derived closedOnTs) are
+ # touched here -- everything else came from Redmine and stays.
+ update = {
+ "isClosed": bool(should_be_closed),
+ "closedOnTs": float(row.get("updatedOnTs")) if (should_be_closed and row.get("updatedOnTs") is not None) else None,
+ "syncedAt": nowEpoch,
+ }
+ try:
+ iface.upsertMirroredTicket(featureInstanceId, int(row.get("redmineId")), {**row, **update})
+ corrections += 1
+ except Exception as e:
+ logger.warning(
+ f"_rebuildIsClosedFromSchema({featureInstanceId}): could not fix ticket "
+ f"#{row.get('redmineId')}: {e}"
+ )
+ return corrections
+
+
# ---------------------------------------------------------------------------
# Pure helpers
# ---------------------------------------------------------------------------
diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py
index 3a6bfab0..d040c37d 100644
--- a/modules/features/trustee/routeFeatureTrustee.py
+++ b/modules/features/trustee/routeFeatureTrustee.py
@@ -1562,38 +1562,84 @@ async def refresh_chart_of_accounts(
return {"message": f"Chart of accounts refreshed: {len(charts)} entries", "count": len(charts)}
-@router.post("/{instanceId}/accounting/sync")
-@limiter.limit("5/minute")
-async def sync_positions_to_accounting(
- request: Request,
- instanceId: str = Path(..., description="Feature Instance ID"),
- data: Dict[str, Any] = Body(...),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Sync positions to the accounting system. Body: { positionIds: [...] }"""
- mandateId = _validateInstanceAccess(instanceId, context)
- interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
+TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE = "trusteeAccountingPush"
+
+
+async def _trusteeAccountingPushJobHandler(job: Dict[str, Any], progressCb) -> Dict[str, Any]:
+ """BackgroundJob handler: pushes a batch of positions to the external
+ accounting system. Runs in the worker without blocking the original HTTP
+ request, so the user can continue navigating while the sync runs.
+
+ Reads inputs from `job["payload"]` (`positionIds`) and reports incremental
+ progress via `progressCb(percent, message)`. The job result has the same
+ shape that the legacy synchronous endpoint used to return.
+ """
+ from modules.security.rootAccess import getRootUser
from .accounting.accountingBridge import AccountingBridge
+
+ instanceId = job["featureInstanceId"]
+ mandateId = job["mandateId"]
+ payload = job.get("payload") or {}
+ positionIds: List[str] = list(payload.get("positionIds") or [])
+ if not positionIds:
+ return {"total": 0, "success": 0, "skipped": 0, "errors": 0, "results": []}
+
+ rootUser = getRootUser()
+ interface = getInterface(rootUser, mandateId=mandateId, featureInstanceId=instanceId)
bridge = AccountingBridge(interface)
- positionIds = data.get("positionIds", [])
- if not positionIds:
- raise HTTPException(status_code=400, detail=routeApiMsg("positionIds required"))
+ results = []
+ total = len(positionIds)
+ progressCb(2, f"Sync wird vorbereitet ({total} Position(en))...")
+
+ # Resolve connector + plain config once to avoid decryption rate-limits
+ # (mirrors the optimisation in pushBatchToAccounting). We push positions
+ # one-by-one inside the job so we can emit incremental progress and so
+ # one bad row never aborts the rest.
+ from .accounting.accountingBridge import SyncResult
+ try:
+ connector, plainConfig, configRecord = await bridge._resolveConnectorAndConfig(instanceId)
+ except Exception as resolveErr:
+ logger.exception("Accounting push: failed to resolve connector/config")
+ progressCb(100, "Verbindungsaufbau fehlgeschlagen.")
+ raise resolveErr
+
+ if not connector or not plainConfig:
+ results = [SyncResult(success=False, errorMessage="No active accounting configuration found") for _ in positionIds]
+ progressCb(100, "Keine aktive Buchhaltungs-Konfiguration gefunden.")
+ return {
+ "total": len(results),
+ "success": 0,
+ "skipped": 0,
+ "errors": len(results),
+ "results": [r.model_dump() for r in results],
+ }
+
+ for index, positionId in enumerate(positionIds, start=1):
+ result = await bridge.pushPositionToAccounting(
+ instanceId,
+ positionId,
+ _resolvedConnector=connector,
+ _resolvedPlainConfig=plainConfig,
+ _resolvedConfigRecord=configRecord,
+ )
+ results.append(result)
+ # Reserve 5..95% for the push loop, keep the tail for summary.
+ pct = 5 + int(90 * index / total)
+ progressCb(pct, f"Position {index}/{total} verarbeitet")
- results = await bridge.pushBatchToAccounting(instanceId, positionIds)
skipped = [r for r in results if not r.success and r.errorMessage and "already synced" in r.errorMessage]
failed = [r for r in results if not r.success and r not in skipped]
if skipped:
- logger.info(
- "Accounting sync: %s position(s) already synced, skipped",
- len(skipped),
- )
+ logger.info("Accounting sync: %s position(s) already synced, skipped", len(skipped))
if failed:
logger.warning(
"Accounting sync had %s failure(s): %s",
len(failed),
"; ".join(r.errorMessage or "unknown" for r in failed[:3]),
)
+
+ progressCb(100, "Sync abgeschlossen.")
return {
"total": len(results),
"success": sum(1 for r in results if r.success),
@@ -1603,6 +1649,50 @@ async def sync_positions_to_accounting(
}
+try:
+ from modules.serviceCenter.services.serviceBackgroundJobs import registerJobHandler as _registerPushJobHandler
+ _registerPushJobHandler(TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE, _trusteeAccountingPushJobHandler)
+except Exception as _pushRegErr:
+ logger.warning("Failed to register trusteeAccountingPush job handler: %s", _pushRegErr)
+
+
+@router.post("/{instanceId}/accounting/sync", status_code=status.HTTP_202_ACCEPTED)
+@limiter.limit("5/minute")
+async def sync_positions_to_accounting(
+ request: Request,
+ instanceId: str = Path(..., description="Feature Instance ID"),
+ data: Dict[str, Any] = Body(...),
+ context: RequestContext = Depends(getRequestContext)
+) -> Dict[str, Any]:
+ """Submit a background job that pushes positions to the accounting system.
+
+ Body: ``{ positionIds: [...] }``
+
+ Returns ``{ jobId, status: "pending" }`` immediately so the user is not
+ blocked while the (potentially long) external accounting calls run.
+ Clients poll ``GET /api/jobs/{jobId}`` until status is ``SUCCESS`` /
+ ``ERROR`` and then read the same ``{ total, success, skipped, errors,
+ results }`` payload from ``job.result`` that the legacy synchronous
+ endpoint returned.
+ """
+ from modules.serviceCenter.services.serviceBackgroundJobs import startJob
+
+ mandateId = _validateInstanceAccess(instanceId, context)
+
+ positionIds = data.get("positionIds", [])
+ if not positionIds:
+ raise HTTPException(status_code=400, detail=routeApiMsg("positionIds required"))
+
+ jobId = await startJob(
+ TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE,
+ {"positionIds": list(positionIds)},
+ mandateId=mandateId,
+ featureInstanceId=instanceId,
+ triggeredBy=context.user.id if context.user else None,
+ )
+ return {"jobId": jobId, "status": "pending"}
+
+
@router.post("/{instanceId}/accounting/sync/{positionId}")
@limiter.limit("10/minute")
async def sync_single_position_to_accounting(
diff --git a/modules/routes/routeI18n.py b/modules/routes/routeI18n.py
index 91fbd9fe..cadf128e 100644
--- a/modules/routes/routeI18n.py
+++ b/modules/routes/routeI18n.py
@@ -98,6 +98,11 @@ _ISO_LABELS: Dict[str, str] = {
"ur": "اردو", "uz": "Oʻzbek", "yo": "Yorùbá", "zu": "isiZulu",
}
+# Priority order for the language picker: most relevant first, rest sorted by label.
+# Single source of truth -- frontend fetches via GET /api/i18n/iso-choices and must
+# never duplicate this list.
+_ISO_PRIORITY_CODES: List[str] = ["de", "gsw", "en", "fr", "it"]
+
# ---------------------------------------------------------------------------
# DB helpers
@@ -554,6 +559,38 @@ async def list_language_codes():
return sorted(out, key=lambda x: (not x.get("isDefault"), x["code"]))
+@router.get("/iso-choices")
+async def list_iso_choices():
+ """Return the catalog of supported ISO 639-1/-3 language codes plus their
+ native labels. Single source of truth for any UI that lets the user pick a
+ language code (e.g. SysAdmin "add language set" dropdown). The frontend
+ must NOT keep its own copy of this list.
+
+ Response:
+ {
+ "priorityCodes": ["de", "gsw", "en", "fr", "it"],
+ "choices": [{"value": "de", "label": "de — Deutsch"}, ...]
+ }
+ """
+ choices = [
+ {"value": code, "label": f"{code} — {label}"}
+ for code, label in _ISO_LABELS.items()
+ ]
+
+ def _sortKey(item):
+ try:
+ prio = _ISO_PRIORITY_CODES.index(item["value"])
+ return (0, prio)
+ except ValueError:
+ return (1, item["label"].lower())
+
+ choices.sort(key=_sortKey)
+ return {
+ "priorityCodes": list(_ISO_PRIORITY_CODES),
+ "choices": choices,
+ }
+
+
@router.get("/sets/{code}")
async def get_language_set(code: str):
db = _publicMgmtDb()
diff --git a/tests/demo/test_demo_bootstrap.py b/tests/demo/test_demo_bootstrap.py
index 1d725442..09076e57 100644
--- a/tests/demo/test_demo_bootstrap.py
+++ b/tests/demo/test_demo_bootstrap.py
@@ -48,7 +48,7 @@ class TestDemoBootstrap:
memberships = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mid})
assert len(memberships) >= 1, f"User not member of mandate {mandate.get('label')}"
- @pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "chatbot", "neutralization"])
+ @pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "neutralization"])
def test_happylifeFeaturesExist(self, db, mandateHappylife, featureCode):
mid = mandateHappylife.get("id")
instances = _getFeatureInstances(db, mid, featureCode)
@@ -66,6 +66,13 @@ class TestDemoBootstrap:
instances = _getFeatureInstances(db, mid, "chatbot")
assert len(instances) == 0, "Alpina Treuhand should not have chatbot"
+ def test_happylifeNoChatbot(self, db, mandateHappylife):
+ """HappyLife also should NOT have a chatbot instance — chatbot was
+ removed from the InvestorDemo on 2026-04-20 (see changelog)."""
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, "chatbot")
+ assert len(instances) == 0, "HappyLife should no longer have chatbot (removed 2026-04-20)"
+
class TestDemoBootstrapRma:
diff --git a/tests/demo/test_demo_uc3_chatbot.py b/tests/demo/test_demo_uc3_chatbot.py
index 89c8d7ba..0248bd5d 100644
--- a/tests/demo/test_demo_uc3_chatbot.py
+++ b/tests/demo/test_demo_uc3_chatbot.py
@@ -1,9 +1,11 @@
"""
T-UC3: Knowledge Chatbot.
-Verifies that the chatbot feature instance exists in HappyLife AG
-and that knowledge-base documents are available for upload.
-Note: The actual RAG demo runs via workspace, not the chatbot's own index.
+The chatbot feature instance was removed from the InvestorDemo on
+2026-04-20 (see changelog) — neither HappyLife nor Alpina bootstrap a
+chatbot today; the actual RAG demo runs via workspace. We still verify
+the knowledge-base demo files are present and that the bootstrap does
+NOT (re)create chatbot instances in either mandate.
"""
import pytest
@@ -13,11 +15,11 @@ from tests.demo.conftest import _getFeatureInstances
class TestChatbotSetup:
- def test_chatbotInstanceHappylife(self, db, mandateHappylife):
- """HappyLife must have a chatbot instance."""
+ def test_chatbotNotInHappylife(self, db, mandateHappylife):
+ """HappyLife should NOT have a chatbot instance (removed 2026-04-20)."""
mid = mandateHappylife.get("id")
instances = _getFeatureInstances(db, mid, "chatbot")
- assert len(instances) >= 1, "No chatbot instance in HappyLife"
+ assert len(instances) == 0, "HappyLife should no longer bootstrap a chatbot instance"
def test_chatbotNotInAlpina(self, db, mandateAlpina):
"""Alpina should NOT have a chatbot instance."""
diff --git a/tests/demo/test_pwg_demo_bootstrap.py b/tests/demo/test_pwg_demo_bootstrap.py
new file mode 100644
index 00000000..0613cafa
--- /dev/null
+++ b/tests/demo/test_pwg_demo_bootstrap.py
@@ -0,0 +1,226 @@
+# Copyright (c) 2026 Patrick Motsch
+# All rights reserved.
+"""T6 — PWG-Pilot demo bootstrap & idempotency tests.
+
+Covers AC 11 + AC 12 of the PWG-Pilot plan:
+ - ``PwgDemo2026.load()`` is idempotent (twice → no errors).
+ - All expected objects exist after load (mandate, demo user,
+ 4 feature instances, trustee seed data, imported pilot workflow with
+ ``active=False``).
+ - ``remove()`` cleans up cleanly and a subsequent ``load()`` rebuilds
+ the demo without error (idempotency over the full lifecycle).
+
+Mirrors the structure of ``tests/demo/test_demo_bootstrap.py`` and reuses
+its session-scoped ``db`` fixture from ``tests/demo/conftest.py``.
+
+Marked ``expensive + live`` because they hit the real Postgres databases
+(``poweron_app``, ``poweron_trustee``, ``poweron_graphicaleditor``); run
+them explicitly with::
+
+ pytest -m "expensive or live" tests/demo/test_pwg_demo_bootstrap.py
+"""
+
+import pytest
+
+from modules.datamodels.datamodelFeatures import FeatureInstance
+from modules.datamodels.datamodelMembership import UserMandate
+from modules.datamodels.datamodelUam import Mandate, UserInDB
+
+from tests.demo.conftest import _getFeatureInstances
+
+
+pytestmark = [pytest.mark.expensive, pytest.mark.live]
+
+
+# ---------------------------------------------------------------------------
+# Fixtures (function-scoped so they always reflect current DB state)
+# ---------------------------------------------------------------------------
+
+@pytest.fixture(scope="session")
+def pwgDemoConfig():
+ """Auto-discovered ``PwgDemo2026`` instance."""
+ from modules.demoConfigs import _getDemoConfigByCode
+ cfg = _getDemoConfigByCode("pwg-demo-2026")
+ assert cfg is not None, (
+ "Demo config 'pwg-demo-2026' not found — check modules/demoConfigs/pwgDemo2026.py"
+ )
+ return cfg
+
+
+@pytest.fixture
+def mandatePwg(db):
+ records = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
+ assert records, "Mandate 'stiftung-pwg' not found — run pwgDemoConfig.load() first"
+ return records[0]
+
+
+@pytest.fixture
+def pwgUser(db):
+ records = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"})
+ assert records, "User 'pwg.demo' not found — run pwgDemoConfig.load() first"
+ return records[0]
+
+
+# ---------------------------------------------------------------------------
+# Bootstrap idempotency
+# ---------------------------------------------------------------------------
+
+class TestPwgDemoBootstrap:
+
+ def test_loadIsIdempotent(self, db, pwgDemoConfig):
+ """Loading the PWG demo twice in a row must not raise errors."""
+ s1 = pwgDemoConfig.load(db)
+ assert len(s1.get("errors", [])) == 0, f"First load errors: {s1['errors']}"
+ s2 = pwgDemoConfig.load(db)
+ assert len(s2.get("errors", [])) == 0, f"Second load errors: {s2['errors']}"
+
+ def test_credentialsAreSurfacedFromLoadSummary(self, db, pwgDemoConfig):
+ s = pwgDemoConfig.load(db)
+ creds = s.get("credentials") or []
+ assert any(c.get("username") == "pwg.demo" for c in creds), (
+ "PWG demo must surface 'pwg.demo' credentials so the SysAdmin "
+ "doesn't have to grep source code for the password."
+ )
+
+ def test_mandateStiftungPwgExists(self, db):
+ records = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
+ assert len(records) == 1
+ assert records[0].get("label") == "Stiftung PWG"
+ assert records[0].get("enabled") is True
+
+ def test_pwgDemoUserExists(self, db):
+ records = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"})
+ assert len(records) == 1
+ user = records[0]
+ assert user.get("email") == "pwg.demo@poweron.swiss"
+ assert user.get("isSysAdmin") is True
+ assert user.get("language") == "de"
+
+ def test_pwgUserMembership(self, db, pwgUser, mandatePwg):
+ memberships = db.getRecordset(UserMandate, recordFilter={
+ "userId": pwgUser.get("id"),
+ "mandateId": mandatePwg.get("id"),
+ })
+ assert len(memberships) >= 1, "PWG demo user not a member of Stiftung PWG"
+
+ @pytest.mark.parametrize(
+ "featureCode",
+ ["workspace", "trustee", "graphicalEditor", "neutralization"],
+ )
+ def test_pwgFeaturesExist(self, db, mandatePwg, featureCode):
+ instances = _getFeatureInstances(db, mandatePwg.get("id"), featureCode)
+ assert len(instances) >= 1, f"Feature '{featureCode}' missing in Stiftung PWG"
+
+ def test_pwgFourFeatureInstances(self, db, mandatePwg):
+ instances = db.getRecordset(FeatureInstance, recordFilter={
+ "mandateId": mandatePwg.get("id"),
+ }) or []
+ codes = sorted({i.get("featureCode") for i in instances})
+ assert codes == ["graphicalEditor", "neutralization", "trustee", "workspace"], (
+ f"Expected exactly 4 feature instances, got {codes}"
+ )
+
+
+# ---------------------------------------------------------------------------
+# Trustee seed data — 5 fictitious tenants × 12 monthly bookings each
+# ---------------------------------------------------------------------------
+
+class TestPwgTrusteeSeed:
+
+ def test_trusteeRentAccountExists(self, db, mandatePwg):
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataAccount
+ instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee")
+ assert instances, "No trustee instance for PWG"
+ instId = instances[0].get("id")
+ from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb
+ trusteeDb = _openTrusteeDb()
+ accounts = trusteeDb.getRecordset(TrusteeDataAccount, recordFilter={
+ "featureInstanceId": instId,
+ "accountNumber": "6000",
+ }) or []
+ assert len(accounts) == 1, f"Expected exactly 1 rent account 6000, got {len(accounts)}"
+ assert accounts[0].get("isActive") is True
+
+ def test_trusteeFiveTenants(self, db, mandatePwg):
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataContact
+ instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee")
+ instId = instances[0].get("id")
+ from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb
+ trusteeDb = _openTrusteeDb()
+ contacts = trusteeDb.getRecordset(TrusteeDataContact, recordFilter={
+ "featureInstanceId": instId,
+ }) or []
+ # Some installations may already have other trustee contacts, but the
+ # 5 PWG seed tenants must be present.
+ names = {c.get("name") for c in contacts}
+ for expected in (
+ "Anna Müller", "Beat Schneider", "Carla Weber",
+ "Daniel Frey", "Eva Lang",
+ ):
+ assert expected in names, f"PWG seed tenant '{expected}' missing"
+
+ def test_trusteeMonthlyBookingsForTenant(self, db, mandatePwg):
+ """Every tenant gets 12 monthly journal entries."""
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataJournalEntry
+ instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee")
+ instId = instances[0].get("id")
+ from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb
+ trusteeDb = _openTrusteeDb()
+ entries = trusteeDb.getRecordset(TrusteeDataJournalEntry, recordFilter={
+ "featureInstanceId": instId,
+ }) or []
+ # 5 tenants × 12 months = 60; >= so reload doesn't false-fail.
+ pwgEntries = [e for e in entries if (e.get("reference") or "").startswith("PWG-")]
+ assert len(pwgEntries) >= 60, (
+ f"Expected >=60 PWG journal entries (5 tenants × 12 months), got {len(pwgEntries)}"
+ )
+
+
+# ---------------------------------------------------------------------------
+# Pilot workflow — imported envelope, must be active=False
+# ---------------------------------------------------------------------------
+
+class TestPwgPilotWorkflow:
+
+ def test_pilotWorkflowImported(self, db, mandatePwg):
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow
+ from modules.demoConfigs.pwgDemo2026 import _openGraphicalEditorDb
+ instances = _getFeatureInstances(db, mandatePwg.get("id"), "graphicalEditor")
+ assert instances, "No graphicalEditor instance for PWG"
+ instId = instances[0].get("id")
+ geDb = _openGraphicalEditorDb()
+ wfs = geDb.getRecordset(AutoWorkflow, recordFilter={
+ "mandateId": mandatePwg.get("id"),
+ "featureInstanceId": instId,
+ "label": "PWG Pilot: Jahresmietzinsbestätigung",
+ }) or []
+ assert len(wfs) == 1, f"Expected exactly 1 PWG pilot workflow, got {len(wfs)}"
+ wf = wfs[0]
+ # AC 10: imports must be inactive by default
+ assert wf.get("active") is False, "PWG pilot workflow must be imported with active=false"
+ graph = wf.get("graph") or {}
+ assert (graph.get("nodes") or []), "PWG pilot workflow has no nodes"
+
+
+# ---------------------------------------------------------------------------
+# Lifecycle: remove + reload (mirrors investor demo TestDemoRemoveAndReload)
+# ---------------------------------------------------------------------------
+
+class TestPwgRemoveAndReload:
+
+ def test_removeAndReload(self, db, pwgDemoConfig):
+ """Remove the PWG demo, verify it is gone, then reload it."""
+ rs = pwgDemoConfig.remove(db)
+ assert len(rs.get("errors", [])) == 0, f"Remove errors: {rs['errors']}"
+
+ mandates = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
+ assert len(mandates) == 0, "Stiftung PWG mandate should be gone after remove"
+
+ users = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"})
+ assert len(users) == 0, "pwg.demo user should be gone after remove"
+
+ ls = pwgDemoConfig.load(db)
+ assert len(ls.get("errors", [])) == 0, f"Reload errors: {ls['errors']}"
+
+ mandates = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
+ assert len(mandates) == 1, "Stiftung PWG must exist after reload"
diff --git a/tests/integration/rbac/test_rbac_database.py b/tests/integration/rbac/test_rbac_database.py
index 72eb1b26..208ed6dd 100644
--- a/tests/integration/rbac/test_rbac_database.py
+++ b/tests/integration/rbac/test_rbac_database.py
@@ -166,7 +166,7 @@ class TestRbacDatabaseFiltering:
try:
mandate = Mandate(
id=testMandateId,
- name="RBAC test mandate",
+ name="rbac-test-mandate-uc",
label="RBAC test",
)
mandatePayload = mandate.model_dump()
diff --git a/tests/test_phase123_basic.py b/tests/test_phase123_basic.py
deleted file mode 100644
index 59a3234d..00000000
--- a/tests/test_phase123_basic.py
+++ /dev/null
@@ -1,314 +0,0 @@
-"""
-Basic verification tests for Phase 1-3 implementation.
-Run with: python tests/test_phase123_basic.py
-Requires: gateway running on localhost:8000
-"""
-import sys
-import os
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-print("=" * 60)
-print("PHASE 1-3 BASIC VERIFICATION")
-print("=" * 60)
-
-errors = []
-passes = []
-
-def _check(label, condition, detail=""):
- if condition:
- passes.append(label)
- print(f" [PASS] {label}")
- else:
- errors.append(f"{label}: {detail}")
- print(f" [FAIL] {label} — {detail}")
-
-# ── Phase 1: Data Models ──────────────────────────────────────────────────────
-print("\n--- Phase 1: Data Models ---")
-
-try:
- from modules.datamodels.datamodelUam import Mandate
- m = Mandate(name="test", label="test")
- _check("Mandate has isSystem field", hasattr(m, "isSystem"))
- _check("Mandate isSystem default False", m.isSystem is False)
- _check("Mandate no mandateType field", not hasattr(m, "mandateType"))
-except Exception as e:
- errors.append(f"Phase 1 DataModel: {e}")
- print(f" [FAIL] Phase 1 DataModel import: {e}")
-
-try:
- from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, BUILTIN_PLANS, SubscriptionPlan
- _check("PENDING status exists", hasattr(SubscriptionStatusEnum, "PENDING"))
- _check("BUILTIN_PLANS has TRIAL_14D", "TRIAL_14D" in BUILTIN_PLANS)
- trial = BUILTIN_PLANS["TRIAL_14D"]
- _check("TRIAL_14D has maxDataVolumeMB", hasattr(trial, "maxDataVolumeMB"))
- _check("TRIAL_14D maxDataVolumeMB=1024", trial.maxDataVolumeMB == 1024)
- _check("TRIAL_14D has includedModules", hasattr(trial, "includedModules"))
- _check("TRIAL_14D includedModules=2", trial.includedModules == 2)
- _check("TRIAL_14D trialDays=14", trial.trialDays == 14)
-except Exception as e:
- errors.append(f"Phase 1 Subscription: {e}")
- print(f" [FAIL] Phase 1 Subscription: {e}")
-
-# ── Phase 2: Scope Fields ─────────────────────────────────────────────────────
-print("\n--- Phase 2: Scope Fields on Models ---")
-
-try:
- from modules.datamodels.datamodelFiles import FileItem
- fi = FileItem(fileName="test.txt", mimeType="text/plain", fileHash="abc", fileSize=100)
- _check("FileItem has scope field", hasattr(fi, "scope"))
- _check("FileItem scope default=personal", fi.scope == "personal")
- _check("FileItem has neutralize field", hasattr(fi, "neutralize"))
- _check("FileItem neutralize default=False", fi.neutralize == False)
-except Exception as e:
- errors.append(f"Phase 2 FileItem: {e}")
- print(f" [FAIL] Phase 2 FileItem: {e}")
-
-try:
- from modules.datamodels.datamodelDataSource import DataSource
- ds = DataSource(connectionId="c1", sourceType="sharepoint", path="/test", label="Test")
- _check("DataSource has scope field", hasattr(ds, "scope"))
- _check("DataSource scope default=personal", ds.scope == "personal")
- _check("DataSource has neutralize field", hasattr(ds, "neutralize"))
- _check("DataSource neutralize default=False", ds.neutralize == False)
-except Exception as e:
- errors.append(f"Phase 2 DataSource: {e}")
- print(f" [FAIL] Phase 2 DataSource: {e}")
-
-try:
- from modules.datamodels.datamodelKnowledge import FileContentIndex
- fci = FileContentIndex(userId="u1", fileName="test.txt", mimeType="text/plain")
- _check("FileContentIndex has scope field", hasattr(fci, "scope"))
- _check("FileContentIndex scope default=personal", fci.scope == "personal")
- _check("FileContentIndex has neutralizationStatus", hasattr(fci, "neutralizationStatus"))
- _check("FileContentIndex neutralizationStatus default=None", fci.neutralizationStatus is None)
-except Exception as e:
- errors.append(f"Phase 2 FileContentIndex: {e}")
- print(f" [FAIL] Phase 2 FileContentIndex: {e}")
-
-# ── Phase 2: RAG Scope Filtering ──────────────────────────────────────────────
-print("\n--- Phase 2: RAG Scope Logic ---")
-
-try:
- from modules.interfaces.interfaceDbKnowledge import KnowledgeObjects
- _check("KnowledgeObjects has _getScopedFileIds", hasattr(KnowledgeObjects, "_getScopedFileIds"))
- _check("KnowledgeObjects has _buildScopeFilter", hasattr(KnowledgeObjects, "_buildScopeFilter"))
-
- import inspect
- sig = inspect.signature(KnowledgeObjects._getScopedFileIds)
- params = list(sig.parameters.keys())
- _check("_getScopedFileIds has isSysAdmin param", "isSysAdmin" in params)
-
- sig2 = inspect.signature(KnowledgeObjects.semanticSearch)
- params2 = list(sig2.parameters.keys())
- _check("semanticSearch has scope param", "scope" in params2)
- _check("semanticSearch has isSysAdmin param", "isSysAdmin" in params2)
-except Exception as e:
- errors.append(f"Phase 2 RAG: {e}")
- print(f" [FAIL] Phase 2 RAG: {e}")
-
-# ── Phase 3: Neutralization Methods ───────────────────────────────────────────
-print("\n--- Phase 3: Neutralization Integration ---")
-
-try:
- from modules.workflows.workflowManager import WorkflowManager
- _check("WorkflowManager has _neutralizePromptIfRequired", hasattr(WorkflowManager, "_neutralizePromptIfRequired"))
- _check("WorkflowManager has _rehydrateResponseIfNeeded", hasattr(WorkflowManager, "_rehydrateResponseIfNeeded"))
-
- import inspect
- sig_n = inspect.signature(WorkflowManager._neutralizePromptIfRequired)
- _check("_neutralizePromptIfRequired is async", inspect.iscoroutinefunction(WorkflowManager._neutralizePromptIfRequired))
-
- sig_r = inspect.signature(WorkflowManager._rehydrateResponseIfNeeded)
- _check("_rehydrateResponseIfNeeded is async", inspect.iscoroutinefunction(WorkflowManager._rehydrateResponseIfNeeded))
-except Exception as e:
- errors.append(f"Phase 3 WorkflowManager: {e}")
- print(f" [FAIL] Phase 3 WorkflowManager: {e}")
-
-# ── Phase 3: Fail-Safe Logic ──────────────────────────────────────────────────
-print("\n--- Phase 3: Fail-Safe Logic ---")
-
-try:
- import ast
- with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "workflows", "methods", "methodContext", "actions", "neutralizeData.py"), "r") as f:
- source = f.read()
- _check("neutralizeData.py has 'SKIPPING' fail-safe", "SKIPPING" in source)
- _check("neutralizeData.py has 'do NOT pass original' comment", "do NOT pass original" in source.lower() or "not passing original" in source.lower())
- _check("neutralizeData.py uses continue for skip", "continue" in source)
-except Exception as e:
- errors.append(f"Phase 3 Fail-Safe: {e}")
- print(f" [FAIL] Phase 3 Fail-Safe: {e}")
-
-# ── Phase 2: Route Endpoints ──────────────────────────────────────────────────
-print("\n--- Phase 2: API Endpoints ---")
-
-try:
- import ast
- with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "routes", "routeDataFiles.py"), "r") as f:
- source = f.read()
- _check("routeDataFiles has PATCH scope endpoint", "updateFileScope" in source)
- _check("routeDataFiles has PATCH neutralize endpoint", "updateFileNeutralize" in source)
- _check("routeDataFiles checks global sysAdmin", "isSysAdmin" in source)
-except Exception as e:
- errors.append(f"Phase 2 Routes: {e}")
- print(f" [FAIL] Phase 2 Routes: {e}")
-
-# ── Phase 1: Store Endpoints ──────────────────────────────────────────────────
-print("\n--- Phase 1: Store Endpoints ---")
-
-try:
- with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "routes", "routeStore.py"), "r") as f:
- source = f.read()
- _check("routeStore has listUserMandates", "listUserMandates" in source or "list_user_mandates" in source)
- _check("routeStore has getSubscriptionInfo", "getSubscriptionInfo" in source or "get_subscription_info" in source)
- _check("routeStore has orphan control", "orphan" in source.lower() or "last" in source.lower())
-except Exception as e:
- errors.append(f"Phase 1 Store: {e}")
- print(f" [FAIL] Phase 1 Store: {e}")
-
-# ── Phase 1: Provisioning ─────────────────────────────────────────────────────
-print("\n--- Phase 1: Provisioning ---")
-
-try:
- with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "interfaces", "interfaceDbApp.py"), "r") as f:
- source = f.read()
- _check("interfaceDbApp has _provisionMandateForUser", "_provisionMandateForUser" in source)
- _check("interfaceDbApp has _activatePendingSubscriptions", "_activatePendingSubscriptions" in source)
- _check("interfaceDbApp has deleteMandate cascade", "deleteMandate" in source and "cascade" in source.lower())
-except Exception as e:
- errors.append(f"Phase 1 Provisioning: {e}")
- print(f" [FAIL] Phase 1 Provisioning: {e}")
-
-# ── Phase 1: Registration Routes ──────────────────────────────────────────────
-print("\n--- Phase 1: Registration ---")
-
-try:
- with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "routes", "routeSecurityLocal.py"), "r") as f:
- source = f.read()
- _check("routeSecurityLocal has registrationType", "registrationType" in source)
- _check("routeSecurityLocal has companyName", "companyName" in source)
- _check("routeSecurityLocal has onboarding endpoint", "onboarding" in source)
-except Exception as e:
- errors.append(f"Phase 1 Registration: {e}")
- print(f" [FAIL] Phase 1 Registration: {e}")
-
-# ── Fix 1: OnboardingWizard Integration ────────────────────────────────────────
-print("\n--- Fix 1: OnboardingWizard Integration ---")
-
-try:
- loginPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "..", "frontend_nyla", "src", "pages", "Login.tsx")
- with open(loginPath, "r", encoding="utf-8") as f:
- source = f.read()
- _check("Login.tsx imports OnboardingWizard", "OnboardingWizard" in source)
- _check("Login.tsx has showOnboardingWizard state", "showOnboardingWizard" in source)
- _check("Login.tsx checks isNewUser", "isNewUser" in source)
-except Exception as e:
- errors.append(f"Fix 1: {e}")
- print(f" [FAIL] Fix 1: {e}")
-
-# ── Fix 2: CommCoach UDB Integration ──────────────────────────────────────────
-print("\n--- Fix 2: CommCoach UDB Integration ---")
-
-try:
- dossierPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "..", "frontend_nyla", "src", "pages", "views", "commcoach", "CommcoachDossierView.tsx")
- with open(dossierPath, "r", encoding="utf-8") as f:
- source = f.read()
- _check("CommCoach imports UnifiedDataBar", "UnifiedDataBar" in source)
- _check("CommCoach imports FilesTab", "FilesTab" in source)
- _check("CommCoach no longer imports getDocumentsApi", "getDocumentsApi" not in source)
- _check("CommCoach has UDB sidebar", "udbSidebar" in source or "UnifiedDataBar" in source)
-except Exception as e:
- errors.append(f"Fix 2: {e}")
- print(f" [FAIL] Fix 2: {e}")
-
-# ── Fix 3: Neutralization Backend Endpoints ───────────────────────────────────
-print("\n--- Fix 3: Neutralization Backend Endpoints ---")
-
-try:
- routePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "features", "neutralization", "routeFeatureNeutralizer.py")
- with open(routePath, "r") as f:
- source = f.read()
- _check("Neutralization has deleteAttribute endpoint", "deleteAttribute" in source or "delete_attribute" in source)
- _check("Neutralization has retrigger endpoint", "retrigger" in source)
- _check("Neutralization has single attribute delete", "single" in source or "attributeId" in source)
-except Exception as e:
- errors.append(f"Fix 3: {e}")
- print(f" [FAIL] Fix 3: {e}")
-
-# ── Fix 4: Central AI Neutralization ──────────────────────────────────────────
-print("\n--- Fix 4: Central AI Neutralization ---")
-
-try:
- aiPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "serviceCenter", "services", "serviceAi", "mainServiceAi.py")
- with open(aiPath, "r") as f:
- source = f.read()
- _check("AiService has _shouldNeutralize", "_shouldNeutralize" in source)
- _check("AiService has _neutralizeRequest", "_neutralizeRequest" in source)
- _check("AiService has _rehydrateResponse", "_rehydrateResponse" in source)
- _check("callAi uses neutralization", "_shouldNeutralize" in source and "_neutralizeRequest" in source)
-except Exception as e:
- errors.append(f"Fix 4: {e}")
- print(f" [FAIL] Fix 4: {e}")
-
-# ── Fix 5: Voice Settings User Level ──────────────────────────────────────────
-print("\n--- Fix 5: Voice Settings User Level ---")
-
-try:
- from modules.datamodels.datamodelUam import UserVoicePreferences
- uvp = UserVoicePreferences(userId="u1")
- _check("UserVoicePreferences model exists", True)
- _check("UserVoicePreferences has sttLanguage", hasattr(uvp, "sttLanguage"))
- _check("UserVoicePreferences default sttLanguage=de-DE", uvp.sttLanguage == "de-DE")
- _check("UserVoicePreferences has ttsVoice", hasattr(uvp, "ttsVoice"))
-except Exception as e:
- errors.append(f"Fix 5: {e}")
- print(f" [FAIL] Fix 5: {e}")
-
-try:
- voiceUserPath = os.path.join(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "routes", "routeVoiceUser.py",
- )
- with open(voiceUserPath, "r") as f:
- source = f.read()
- _check("Voice preferences GET endpoint", '"/preferences"' in source and "getVoicePreferences" in source)
- _check("Voice preferences PUT endpoint", "updateVoicePreferences" in source)
-except Exception as e:
- errors.append(f"Fix 5 Routes: {e}")
- print(f" [FAIL] Fix 5 Routes: {e}")
-
-# ── Fix 6: RAG mandate-wide scope ─────────────────────────────────────────────
-print("\n--- Fix 6: RAG mandate-wide scope ---")
-
-try:
- knowledgePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
- "modules", "serviceCenter", "services", "serviceKnowledge", "mainServiceKnowledge.py")
- with open(knowledgePath, "r") as f:
- source = f.read()
- _check("buildAgentContext passes mandateId to semanticSearch", "mandateId=mandateId" in source)
- _check("buildAgentContext has isSysAdmin param", "isSysAdmin" in source)
-except Exception as e:
- errors.append(f"Fix 6: {e}")
- print(f" [FAIL] Fix 6: {e}")
-
-# ── Summary ───────────────────────────────────────────────────────────────────
-print("\n" + "=" * 60)
-print(f"RESULTS: {len(passes)} passed, {len(errors)} failed")
-print("=" * 60)
-
-if errors:
- print("\nFAILURES:")
- for e in errors:
- print(f" - {e}")
- sys.exit(1)
-else:
- print("\nALL CHECKS PASSED!")
- sys.exit(0)
diff --git a/tests/test_service_redmine_stats.py b/tests/test_service_redmine_stats.py
index 310c15c7..aecd2caf 100644
--- a/tests/test_service_redmine_stats.py
+++ b/tests/test_service_redmine_stats.py
@@ -112,6 +112,8 @@ class TestAggregateEndToEnd:
dateTo="2026-04-30",
bucket="month",
trackerIdsFilter=[],
+ categoryIdsFilter=[],
+ statusFilter="",
instanceId="test-instance",
)
assert dto.instanceId == "test-instance"
diff --git a/tests/unit/serviceAgent/test_workflow_tools_crud.py b/tests/unit/serviceAgent/test_workflow_tools_crud.py
new file mode 100644
index 00000000..9ebe1df6
--- /dev/null
+++ b/tests/unit/serviceAgent/test_workflow_tools_crud.py
@@ -0,0 +1,383 @@
+# Copyright (c) 2026 Patrick Motsch
+# All rights reserved.
+"""T3 — Unit tests for the workflow-CRUD agent tools.
+
+Covers AC 5 + AC 6 of the PWG-Pilot plan:
+ - createWorkflow happy-path returns a workflowId.
+ - createWorkflow rejects missing label / instanceId.
+ - deleteWorkflow without ``confirm=true`` is a NO-OP and returns an error.
+ - deleteWorkflow with ``confirm=true`` deletes and returns success.
+ - updateWorkflowMetadata patches only the supplied fields.
+ - createWorkflowFromFile / exportWorkflowToFile happy-path round-trip.
+
+The tools call into a feature-instance interface; we replace
+``workflowTools._getInterface`` with a fake that captures interactions
+without touching any database.
+"""
+
+import asyncio
+import json
+import uuid
+from typing import Any, Dict, Optional
+
+import pytest
+
+from modules.serviceCenter.services.serviceAgent import workflowTools
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+class _FakeInterface:
+ """In-memory stand-in for ``GraphicalEditorObjects``.
+
+ Stores workflows by id and records every method call in ``self.calls``
+ so tests can assert on the parameters the tool layer forwarded.
+ """
+
+ def __init__(self, mandateId: str = "mand-1", featureInstanceId: str = "inst-1"):
+ self.mandateId = mandateId
+ self.featureInstanceId = featureInstanceId
+ self.workflows: Dict[str, Dict[str, Any]] = {}
+ self.calls: list = []
+
+ def createWorkflow(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ self.calls.append(("createWorkflow", data))
+ wfId = data.get("id") or str(uuid.uuid4())
+ record = dict(data)
+ record["id"] = wfId
+ record["mandateId"] = self.mandateId
+ record["featureInstanceId"] = self.featureInstanceId
+ record.setdefault("active", False)
+ self.workflows[wfId] = record
+ return record
+
+ def updateWorkflow(self, workflowId: str, data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
+ self.calls.append(("updateWorkflow", workflowId, data))
+ existing = self.workflows.get(workflowId)
+ if not existing:
+ return None
+ existing.update(data)
+ return existing
+
+ def deleteWorkflow(self, workflowId: str) -> bool:
+ self.calls.append(("deleteWorkflow", workflowId))
+ return self.workflows.pop(workflowId, None) is not None
+
+ def getWorkflow(self, workflowId: str) -> Optional[Dict[str, Any]]:
+ return self.workflows.get(workflowId)
+
+ def importWorkflowFromDict(
+ self,
+ envelope: Dict[str, Any],
+ existingWorkflowId: Optional[str] = None,
+ ) -> Dict[str, Any]:
+ self.calls.append(("importWorkflowFromDict", envelope, existingWorkflowId))
+ data = {
+ "label": envelope.get("label", "Imported"),
+ "description": envelope.get("description", ""),
+ "tags": envelope.get("tags", []),
+ "graph": envelope.get("graph", {"nodes": [], "connections": []}),
+ "invocations": envelope.get("invocations", []),
+ "active": False,
+ }
+ if existingWorkflowId:
+ updated = self.updateWorkflow(existingWorkflowId, data) or {}
+ return {"workflow": updated, "warnings": [], "created": False}
+ created = self.createWorkflow(data)
+ return {"workflow": created, "warnings": [], "created": True}
+
+ def exportWorkflowToDict(self, workflowId: str) -> Optional[Dict[str, Any]]:
+ wf = self.workflows.get(workflowId)
+ if not wf:
+ return None
+ return {
+ "$schemaVersion": "1.0",
+ "$kind": "poweron.workflow",
+ "label": wf.get("label"),
+ "description": wf.get("description", ""),
+ "tags": wf.get("tags", []),
+ "graph": wf.get("graph") or {"nodes": [], "connections": []},
+ "invocations": wf.get("invocations") or [],
+ }
+
+
+@pytest.fixture
+def fakeInterface(monkeypatch):
+ """Replace ``_getInterface`` with a fixture-scoped fake."""
+ fake = _FakeInterface()
+ monkeypatch.setattr(workflowTools, "_getInterface", lambda _ctx, _iid: fake)
+ return fake
+
+
+def _ctx(workflowId: str = "wf-1", instanceId: str = "inst-1") -> Dict[str, Any]:
+ """Standard agent-tool context dict."""
+ return {
+ "workflowId": workflowId,
+ "featureInstanceId": instanceId,
+ "userId": "user-1",
+ "mandateId": "mand-1",
+ }
+
+
+def _runTool(handler, params: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
+ return asyncio.run(handler(params, context))
+
+
+def _payload(result: ToolResult) -> Dict[str, Any]:
+ """Decode the tool's data string back into a dict for easy asserts."""
+ assert isinstance(result.data, str), "ToolResult.data must be a string per registry contract"
+ return json.loads(result.data)
+
+
+# ---------------------------------------------------------------------------
+# createWorkflow — AC 5
+# ---------------------------------------------------------------------------
+
+class TestCreateWorkflow:
+ def test_happyPathReturnsWorkflowId(self, fakeInterface):
+ result = _runTool(workflowTools._createWorkflow, {"label": "Smoke-Test"}, _ctx())
+ assert result.success, result.error
+ payload = _payload(result)
+ assert payload["workflowId"]
+ assert payload["label"] == "Smoke-Test"
+ assert payload["workflowId"] in fakeInterface.workflows
+ assert fakeInterface.workflows[payload["workflowId"]]["active"] is False
+
+ def test_missingLabelIsRejected(self, fakeInterface):
+ result = _runTool(workflowTools._createWorkflow, {}, _ctx())
+ assert not result.success
+ assert "label" in (result.error or "").lower()
+ assert fakeInterface.calls == [], "no DB call must happen on validation error"
+
+ def test_missingInstanceIdIsRejected(self, fakeInterface):
+ ctx = {"workflowId": "wf-1", "userId": "user-1", "mandateId": "mand-1"}
+ result = _runTool(workflowTools._createWorkflow, {"label": "Empty"}, ctx)
+ assert not result.success
+ assert "instanceid" in (result.error or "").lower()
+
+ def test_blankLabelIsRejected(self, fakeInterface):
+ result = _runTool(workflowTools._createWorkflow, {"label": " "}, _ctx())
+ assert not result.success
+
+ def test_initialGraphAndTagsAreForwarded(self, fakeInterface):
+ graph = {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []}
+ result = _runTool(
+ workflowTools._createWorkflow,
+ {"label": "With Graph", "tags": ["pwg"], "graph": graph, "description": "d"},
+ _ctx(),
+ )
+ assert result.success
+ wfId = _payload(result)["workflowId"]
+ stored = fakeInterface.workflows[wfId]
+ assert stored["tags"] == ["pwg"]
+ assert stored["description"] == "d"
+ assert stored["graph"]["nodes"][0]["id"] == "n1"
+
+
+# ---------------------------------------------------------------------------
+# deleteWorkflow — AC 6
+# ---------------------------------------------------------------------------
+
+class TestDeleteWorkflow:
+ def test_withoutConfirmReturnsError(self, fakeInterface):
+ fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"}
+ result = _runTool(workflowTools._deleteWorkflow, {"workflowId": "wf-x"}, _ctx())
+ assert not result.success
+ assert "confirm" in (result.error or "").lower()
+ # Critical: no destructive call must reach the interface
+ assert all(call[0] != "deleteWorkflow" for call in fakeInterface.calls)
+ assert "wf-x" in fakeInterface.workflows
+
+ def test_withConfirmFalseAlsoBlocks(self, fakeInterface):
+ fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"}
+ result = _runTool(
+ workflowTools._deleteWorkflow,
+ {"workflowId": "wf-x", "confirm": False},
+ _ctx(),
+ )
+ assert not result.success
+ assert "wf-x" in fakeInterface.workflows
+
+ def test_withConfirmTrueDeletes(self, fakeInterface):
+ fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"}
+ result = _runTool(
+ workflowTools._deleteWorkflow,
+ {"workflowId": "wf-x", "confirm": True},
+ _ctx(),
+ )
+ assert result.success, result.error
+ assert "wf-x" not in fakeInterface.workflows
+
+ def test_unknownWorkflowReturnsError(self, fakeInterface):
+ result = _runTool(
+ workflowTools._deleteWorkflow,
+ {"workflowId": "wf-ghost", "confirm": True},
+ _ctx(),
+ )
+ assert not result.success
+ assert "not found" in (result.error or "").lower()
+
+ def test_missingIdsReturnError(self, fakeInterface):
+ result = _runTool(
+ workflowTools._deleteWorkflow,
+ {"confirm": True},
+ {"userId": "user-1", "mandateId": "mand-1"},
+ )
+ assert not result.success
+ assert "required" in (result.error or "").lower()
+
+
+# ---------------------------------------------------------------------------
+# updateWorkflowMetadata — supports the "rename" intent without touching graph
+# ---------------------------------------------------------------------------
+
+class TestUpdateWorkflowMetadata:
+ def test_renameOnlyTouchesLabel(self, fakeInterface):
+ fakeInterface.workflows["wf-1"] = {
+ "id": "wf-1",
+ "label": "Old Name",
+ "graph": {"nodes": [{"id": "n1"}], "connections": []},
+ }
+ result = _runTool(
+ workflowTools._updateWorkflowMetadata,
+ {"workflowId": "wf-1", "label": "New Name"},
+ _ctx(),
+ )
+ assert result.success, result.error
+ payload = _payload(result)
+ assert payload["label"] == "New Name"
+ assert payload["changed"] == ["label"]
+ # Graph must remain untouched
+ stored = fakeInterface.workflows["wf-1"]
+ assert stored["graph"]["nodes"][0]["id"] == "n1"
+
+ def test_emptyPatchIsRejected(self, fakeInterface):
+ fakeInterface.workflows["wf-1"] = {"id": "wf-1", "label": "L"}
+ result = _runTool(
+ workflowTools._updateWorkflowMetadata,
+ {"workflowId": "wf-1"},
+ _ctx(),
+ )
+ assert not result.success
+
+ def test_blankLabelIsRejected(self, fakeInterface):
+ fakeInterface.workflows["wf-1"] = {"id": "wf-1", "label": "L"}
+ result = _runTool(
+ workflowTools._updateWorkflowMetadata,
+ {"workflowId": "wf-1", "label": " "},
+ _ctx(),
+ )
+ assert not result.success
+
+
+# ---------------------------------------------------------------------------
+# createWorkflowFromFile / exportWorkflowToFile — round-trip via the tool layer
+# ---------------------------------------------------------------------------
+
+class TestImportExportTools:
+ def test_inlineEnvelopeImportCreatesWorkflow(self, fakeInterface):
+ envelope = {
+ "$schemaVersion": "1.0",
+ "label": "Imported PWG",
+ "graph": {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []},
+ }
+ result = _runTool(
+ workflowTools._createWorkflowFromFile,
+ {"envelope": envelope},
+ _ctx(),
+ )
+ assert result.success, result.error
+ payload = _payload(result)
+ assert payload["workflowId"]
+ assert payload["created"] is True
+ assert payload["label"] == "Imported PWG"
+ assert fakeInterface.workflows[payload["workflowId"]]["active"] is False
+
+ def test_importRequiresFileIdOrEnvelope(self, fakeInterface):
+ result = _runTool(
+ workflowTools._createWorkflowFromFile,
+ {},
+ _ctx(),
+ )
+ assert not result.success
+ assert "fileid" in (result.error or "").lower() or "envelope" in (result.error or "").lower()
+
+ def test_existingWorkflowIdReplacesGraph(self, fakeInterface):
+ fakeInterface.workflows["wf-1"] = {
+ "id": "wf-1",
+ "label": "Existing",
+ "graph": {"nodes": [], "connections": []},
+ }
+ envelope = {
+ "$schemaVersion": "1.0",
+ "label": "Replaced",
+ "graph": {"nodes": [{"id": "n2", "type": "trigger.manual"}], "connections": []},
+ }
+ result = _runTool(
+ workflowTools._createWorkflowFromFile,
+ {"envelope": envelope, "existingWorkflowId": "wf-1"},
+ _ctx(),
+ )
+ assert result.success, result.error
+ payload = _payload(result)
+ assert payload["created"] is False
+ assert fakeInterface.workflows["wf-1"]["graph"]["nodes"][0]["id"] == "n2"
+
+ def test_exportProducesEnvelopeWithSchemaVersion(self, fakeInterface):
+ fakeInterface.workflows["wf-1"] = {
+ "id": "wf-1",
+ "label": "Round-Trip",
+ "graph": {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []},
+ }
+ result = _runTool(
+ workflowTools._exportWorkflowToFile,
+ {"workflowId": "wf-1"},
+ _ctx(),
+ )
+ assert result.success, result.error
+ payload = _payload(result)
+ assert payload["fileName"].endswith(".workflow.json")
+ assert payload["schemaVersion"] == "1.0"
+ envelope = payload["envelope"]
+ assert envelope["label"] == "Round-Trip"
+ assert envelope["$kind"] == "poweron.workflow"
+
+ def test_exportUnknownWorkflowReturnsError(self, fakeInterface):
+ result = _runTool(
+ workflowTools._exportWorkflowToFile,
+ {"workflowId": "wf-ghost"},
+ _ctx(),
+ )
+ assert not result.success
+ assert "not found" in (result.error or "").lower()
+
+
+# ---------------------------------------------------------------------------
+# Tool definitions — make sure the new tools are registered with the toolbox
+# (cheap regression test that a refactor doesn't drop one of them silently)
+# ---------------------------------------------------------------------------
+
+class TestToolDefinitions:
+ def test_allCrudToolsAreRegistered(self):
+ defs = workflowTools.getWorkflowToolDefinitions()
+ names = {d["name"] for d in defs}
+ for required in (
+ "createWorkflow",
+ "createWorkflowFromFile",
+ "exportWorkflowToFile",
+ "deleteWorkflow",
+ "updateWorkflowMetadata",
+ ):
+ assert required in names, f"{required} missing from workflow toolbox"
+
+ def test_deleteWorkflowMarksConfirmRequired(self):
+ defs = {d["name"]: d for d in workflowTools.getWorkflowToolDefinitions()}
+ deleteSpec = defs["deleteWorkflow"]
+ params = deleteSpec.get("parameters", {})
+ assert "confirm" in (params.get("required") or []), (
+ "deleteWorkflow must declare confirm as required so the model "
+ "cannot accidentally call it without an explicit confirmation."
+ )
diff --git a/tests/unit/services/test_json_extraction_merging.py b/tests/unit/services/test_json_extraction_merging.py
index 11f18bba..49f430a8 100644
--- a/tests/unit/services/test_json_extraction_merging.py
+++ b/tests/unit/services/test_json_extraction_merging.py
@@ -3,6 +3,14 @@
# All rights reserved.
"""
Test script for JSON extraction response detection and merging.
+
+The methods under test (``_isJsonExtractionResponse``,
+``_mergeJsonExtractionResponses``, etc.) are pure data-manipulation and
+do NOT touch ``self._context`` / ``self._get_service`` / the DB. We
+therefore bypass ``ExtractionService.__init__`` (which would require a
+live ``ServiceCenterContext`` + service-resolver) by instantiating with
+``__new__`` — same as constructing a stub without dependency wiring.
+
Run: python gateway/tests/unit/services/test_json_extraction_merging.py
"""
@@ -20,7 +28,7 @@ from modules.serviceCenter.services.serviceExtraction.mainServiceExtraction impo
def test_detects_json_with_code_fences():
"""Test that JSON extraction responses with markdown code fences are detected"""
print("Test 1: Detecting JSON with code fences...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test1",
@@ -38,7 +46,7 @@ def test_detects_json_with_code_fences():
def test_detects_json_without_code_fences():
"""Test that JSON extraction responses without code fences are detected"""
print("Test 2: Detecting JSON without code fences...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test2",
@@ -56,7 +64,7 @@ def test_detects_json_without_code_fences():
def test_rejects_non_extraction_json():
"""Test that regular JSON (without extracted_content) is rejected"""
print("Test 3: Rejecting non-extraction JSON...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test3",
@@ -74,7 +82,7 @@ def test_rejects_non_extraction_json():
def test_rejects_non_json_content():
"""Test that non-JSON content is rejected"""
print("Test 4: Rejecting non-JSON content...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test4",
@@ -92,7 +100,7 @@ def test_rejects_non_json_content():
def test_merges_tables_with_same_headers():
"""Test that tables with identical headers are merged"""
print("Test 5: Merging tables with same headers...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@@ -116,18 +124,22 @@ def test_merges_tables_with_same_headers():
assert len(merged["extracted_content"]["tables"]) == 1, f"Should have one merged table, got {len(merged['extracted_content']['tables'])}"
table = merged["extracted_content"]["tables"][0]
assert table["headers"] == ["Name", "Amount"], f"Headers should match, got {table['headers']}"
- # Should have 3 unique rows (Alice appears twice but should be deduplicated)
- assert len(table["rows"]) == 3, f"Should have 3 unique rows, got {len(table['rows'])}"
+ # Per the documented merge contract ("Tables: Combines all table rows,
+ # ... duplicates preserved" — see _mergeJsonExtractionResponses
+ # docstring), identical rows from different parts are NOT deduplicated.
+ # Alice appears in both parts, so the merged table has 4 rows.
+ assert len(table["rows"]) == 4, f"Should have 4 rows (duplicates preserved), got {len(table['rows'])}"
assert ["Alice", "100"] in table["rows"], "Alice row should be present"
assert ["Bob", "200"] in table["rows"], "Bob row should be present"
assert ["Charlie", "300"] in table["rows"], "Charlie row should be present"
+ assert table["rows"].count(["Alice", "100"]) == 2, "Alice row must be preserved twice (no dedup)"
print(" [PASS]")
def test_merges_multiple_json_blocks_separated_by_dash():
"""Test that multiple JSON blocks separated by --- are merged"""
print("Test 6: Merging multiple JSON blocks separated by ---...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
# Create content part with multiple JSON blocks separated by ---
part1 = ContentPart(
@@ -153,7 +165,7 @@ def test_merges_multiple_json_blocks_separated_by_dash():
def test_merges_text_content():
"""Test that text content from multiple parts is merged"""
print("Test 7: Merging text content...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@@ -183,7 +195,7 @@ def test_merges_text_content():
def test_merges_headings_and_lists():
"""Test that headings and lists are merged"""
print("Test 8: Merging headings and lists...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@@ -218,7 +230,7 @@ def test_merges_headings_and_lists():
def test_handles_empty_content_parts():
"""Test that empty content parts are handled gracefully"""
print("Test 9: Handling empty content parts...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@@ -246,7 +258,7 @@ def test_handles_empty_content_parts():
def test_merges_tables_with_different_headers():
"""Test that tables with different headers are kept separate"""
print("Test 10: Keeping tables with different headers separate...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@@ -284,7 +296,7 @@ def test_merges_tables_with_different_headers():
def test_real_world_scenario():
"""Test with a realistic scenario similar to the debug file"""
print("Test 11: Real-world scenario (multiple documents, multiple JSON blocks)...")
- service = ExtractionService(None)
+ service = ExtractionService.__new__(ExtractionService)
# Simulate 3 documents, each with a table extraction response
part1 = ContentPart(
@@ -314,25 +326,23 @@ def test_real_world_scenario():
merged = service._mergeJsonExtractionResponses([part1, part2, part3])
- # Should have one merged table with all unique transactions
+ # Should have one merged table with all transactions
assert len(merged["extracted_content"]["tables"]) == 1, f"Should have one merged table, got {len(merged['extracted_content']['tables'])}"
table = merged["extracted_content"]["tables"][0]
assert table["headers"] == ["Transaction ID", "Date", "Amount"], "Headers should match"
-
- # Should have 5 unique rows (TXN001 appears twice but should be deduplicated)
- assert len(table["rows"]) == 5, f"Should have 5 unique rows, got {len(table['rows'])}"
-
- # Verify all transactions are present
+
+ # Per the documented merge contract, duplicate rows are preserved.
+ # TXN001 occurs in both doc1 and doc2 -> 6 rows total.
+ assert len(table["rows"]) == 6, f"Should have 6 rows (duplicates preserved), got {len(table['rows'])}"
+
transaction_ids = [row[0] for row in table["rows"]]
- assert "TXN001" in transaction_ids, "TXN001 should be present"
- assert "TXN002" in transaction_ids, "TXN002 should be present"
- assert "TXN003" in transaction_ids, "TXN003 should be present"
- assert "TXN004" in transaction_ids, "TXN004 should be present"
- assert "TXN005" in transaction_ids, "TXN005 should be present"
-
- # Verify TXN001 appears only once (deduplicated)
- assert transaction_ids.count("TXN001") == 1, "TXN001 should appear only once (deduplicated)"
-
+ for txn in ("TXN001", "TXN002", "TXN003", "TXN004", "TXN005"):
+ assert txn in transaction_ids, f"{txn} should be present"
+
+ # TXN001 must appear twice (no dedup at merge time — dedup is the
+ # responsibility of downstream consumers if needed).
+ assert transaction_ids.count("TXN001") == 2, "TXN001 must appear twice (duplicates preserved)"
+
print(" [PASS]")
diff --git a/tests/unit/workflows/test_automation2_graphUtils.py b/tests/unit/workflows/test_automation2_graphUtils.py
index 45f4ba0f..78077987 100644
--- a/tests/unit/workflows/test_automation2_graphUtils.py
+++ b/tests/unit/workflows/test_automation2_graphUtils.py
@@ -34,9 +34,14 @@ class TestResolveParameterReferences:
assert resolveParameterReferences(value, node_outputs) == "b"
def test_ref_missing_node(self):
+ # Current runtime semantics: an unresolved ref (nodeId not in
+ # node_outputs) collapses to None rather than the original
+ # placeholder dict. The workflow engine relies on this — downstream
+ # nodes treat missing refs as "no value yet" rather than "literal
+ # placeholder" — so we lock the contract here.
node_outputs = {}
value = {"type": "ref", "nodeId": "missing", "path": ["x"]}
- assert resolveParameterReferences(value, node_outputs) == value
+ assert resolveParameterReferences(value, node_outputs) is None
def test_value_wrapper(self):
value = {"type": "value", "value": "static text"}
From 794ba36f27a8c76ddd3265bb47f9c22b21d723e2 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Sat, 25 Apr 2026 01:13:01 +0200
Subject: [PATCH 2/7] teamsbot
---
...g-mietzinsbestaetigung-pilot.workflow.json | 3 +-
env_dev.env | 2 +-
.../datamodels/datamodelWorkflowActions.py | 27 +-
.../graphicalEditor/adapterValidator.py | 205 ++
.../features/graphicalEditor/nodeAdapter.py | 172 ++
.../graphicalEditor/nodeDefinitions/ai.py | 35 +-
.../nodeDefinitions/clickup.py | 8 +-
.../nodeDefinitions/context.py | 15 +-
.../graphicalEditor/nodeDefinitions/data.py | 18 +-
.../graphicalEditor/nodeDefinitions/email.py | 50 +-
.../graphicalEditor/nodeDefinitions/flow.py | 4 +-
.../graphicalEditor/nodeDefinitions/input.py | 2 +-
.../nodeDefinitions/sharepoint.py | 6 +-
.../nodeDefinitions/triggers.py | 2 +-
.../nodeDefinitions/trustee.py | 27 +-
.../features/graphicalEditor/nodeRegistry.py | 57 +-
modules/features/graphicalEditor/portTypes.py | 724 +++--
.../routeFeatureGraphicalEditor.py | 46 +
.../graphicalEditor/upstreamPathsService.py | 128 +
.../features/teamsbot/datamodelTeamsbot.py | 61 +-
.../teamsbot/interfaceFeatureTeamsbot.py | 64 +-
.../features/teamsbot/routeFeatureTeamsbot.py | 147 +-
modules/features/teamsbot/service.py | 2576 +++++++++++++++--
.../trustee/accounting/accountingDataSync.py | 31 +-
.../connectors/accountingConnectorRma.py | 9 +-
.../trustee/datamodelFeatureTrustee.py | 4 +-
.../features/trustee/routeFeatureTrustee.py | 76 +-
modules/interfaces/interfaceBootstrap.py | 26 +-
.../serviceAgent/actionToolAdapter.py | 129 +-
.../services/serviceAgent/workflowTools.py | 118 +
.../services/serviceAi/mainServiceAi.py | 27 +-
.../workflows/automation2/executionEngine.py | 15 +-
.../executors/actionNodeExecutor.py | 130 +-
.../automation2/executors/dataExecutor.py | 41 +-
.../featureInstanceRefMigration.py | 159 +
modules/workflows/automation2/graphUtils.py | 137 +-
.../automation2/pickNotPushMigration.py | 83 +
.../automation2/udmUpstreamShapes.py | 36 +
.../methods/_actionSignatureValidator.py | 177 ++
.../workflows/methods/methodAi/methodAi.py | 27 +-
modules/workflows/methods/methodBase.py | 21 +-
.../methods/methodChatbot/methodChatbot.py | 4 +-
.../methods/methodClickup/methodClickup.py | 21 +-
.../methods/methodContext/methodContext.py | 10 +-
.../methods/methodFile/methodFile.py | 3 +-
.../methods/methodJira/methodJira.py | 27 +-
.../methods/methodOutlook/methodOutlook.py | 19 +-
.../methods/methodRedmine/methodRedmine.py | 44 +-
.../methodSharepoint/methodSharepoint.py | 35 +-
.../methodTrustee/actions/processDocuments.py | 13 +-
.../methodTrustee/actions/syncToAccounting.py | 6 +-
.../methods/methodTrustee/methodTrustee.py | 47 +-
scripts/_listMandates.py | 25 +
scripts/check_orphan_featureinstance.py | 97 +
.../script_migrate_feature_instance_refs.py | 213 ++
tests/integration/automation2/__init__.py | 2 +
.../test_pick_not_push_migration_v2.py | 189 ++
tests/integration/trustee/__init__.py | 4 +
.../trustee/test_spesenbelege_workflow_e2e.py | 474 +++
.../test_action_node_connection_provenance.py | 9 +
.../graphicalEditor/test_adapter_validator.py | 352 +++
.../unit/graphicalEditor/test_node_adapter.py | 170 ++
.../graphicalEditor/test_portTypes_catalog.py | 257 ++
.../test_port_schema_recursive.py | 24 +
.../test_upstream_paths_and_graph_schema.py | 67 +
tests/unit/methods/__init__.py | 0
.../test_action_signature_validator.py | 289 ++
.../test_trustee_schema_compliance.py | 188 ++
tests/unit/scripts/__init__.py | 2 +
.../test_migrate_feature_instance_refs.py | 289 ++
.../test_action_tool_adapter_typed.py | 127 +
tests/unit/teamsbot/__init__.py | 0
tests/unit/teamsbot/test_directorPrompts.py | 604 ++++
.../unit/workflow/test_phase3_context_node.py | 33 +-
.../workflows/test_automation2_graphUtils.py | 99 +
.../test_featureInstanceRefMigration.py | 310 ++
76 files changed, 8899 insertions(+), 779 deletions(-)
create mode 100644 modules/features/graphicalEditor/adapterValidator.py
create mode 100644 modules/features/graphicalEditor/nodeAdapter.py
create mode 100644 modules/features/graphicalEditor/upstreamPathsService.py
create mode 100644 modules/workflows/automation2/featureInstanceRefMigration.py
create mode 100644 modules/workflows/automation2/pickNotPushMigration.py
create mode 100644 modules/workflows/automation2/udmUpstreamShapes.py
create mode 100644 modules/workflows/methods/_actionSignatureValidator.py
create mode 100644 scripts/_listMandates.py
create mode 100644 scripts/check_orphan_featureinstance.py
create mode 100644 scripts/script_migrate_feature_instance_refs.py
create mode 100644 tests/integration/automation2/__init__.py
create mode 100644 tests/integration/automation2/test_pick_not_push_migration_v2.py
create mode 100644 tests/integration/trustee/__init__.py
create mode 100644 tests/integration/trustee/test_spesenbelege_workflow_e2e.py
create mode 100644 tests/unit/graphicalEditor/test_action_node_connection_provenance.py
create mode 100644 tests/unit/graphicalEditor/test_adapter_validator.py
create mode 100644 tests/unit/graphicalEditor/test_node_adapter.py
create mode 100644 tests/unit/graphicalEditor/test_portTypes_catalog.py
create mode 100644 tests/unit/graphicalEditor/test_port_schema_recursive.py
create mode 100644 tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py
create mode 100644 tests/unit/methods/__init__.py
create mode 100644 tests/unit/methods/test_action_signature_validator.py
create mode 100644 tests/unit/nodeDefinitions/test_trustee_schema_compliance.py
create mode 100644 tests/unit/scripts/__init__.py
create mode 100644 tests/unit/scripts/test_migrate_feature_instance_refs.py
create mode 100644 tests/unit/serviceAgent/test_action_tool_adapter_typed.py
create mode 100644 tests/unit/teamsbot/__init__.py
create mode 100644 tests/unit/teamsbot/test_directorPrompts.py
create mode 100644 tests/unit/workflows/test_featureInstanceRefMigration.py
diff --git a/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json b/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json
index 8a5a7f60..78f50751 100644
--- a/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json
+++ b/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json
@@ -37,7 +37,8 @@
"y": 200,
"title": "Pro Scan-Dokument",
"parameters": {
- "level": 1,
+ "items": {"type": "ref", "nodeId": "n2", "path": ["files"]},
+ "level": "auto",
"concurrency": 1
}
},
diff --git a/env_dev.env b/env_dev.env
index 4f1c7367..60bc5511 100644
--- a/env_dev.env
+++ b/env_dev.env
@@ -77,7 +77,7 @@ Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJK
# Teamsbot Browser Bot Service
# For local testing: run the bot locally with `npm run dev` in service-teams-browser-bot
# The bot will connect back to localhost:8000 via WebSocket
-TEAMSBOT_BROWSER_BOT_URL = https://cae-poweron-shared.redwater-53d21339.switzerlandnorth.azurecontainerapps.io
+TEAMSBOT_BROWSER_BOT_URL = http://localhost:4100
# Debug Configuration
APP_DEBUG_CHAT_WORKFLOW_ENABLED = True
diff --git a/modules/datamodels/datamodelWorkflowActions.py b/modules/datamodels/datamodelWorkflowActions.py
index 09c07c14..e82941f6 100644
--- a/modules/datamodels/datamodelWorkflowActions.py
+++ b/modules/datamodels/datamodelWorkflowActions.py
@@ -22,9 +22,24 @@ class WorkflowActionParameter(BaseModel):
json_schema_extra={"label": "Name"},
)
type: str = Field(
- description="Python type as string: 'str', 'int', 'bool', 'List[str]', etc.",
+ description=(
+ "Type reference. Either a primitive ('str', 'int', 'bool', 'float', 'Any', "
+ "'List[str]', 'Dict[str,Any]', …) or a PORT_TYPE_CATALOG schema name "
+ "(e.g. 'ConnectionRef', 'FeatureInstanceRef', 'DocumentList', "
+ "'TrusteeProcessResult'). Catalog types are validated by "
+ "_actionSignatureValidator at startup."
+ ),
json_schema_extra={"label": "Typ"},
)
+ uiHint: Optional[str] = Field(
+ None,
+ description=(
+ "Optional UI rendering hint for adapters. "
+ "Free-form (e.g. 'textarea', 'cron', 'fieldBuilder'). "
+ "Adapters can override; defaults derive from frontendType when absent."
+ ),
+ json_schema_extra={"label": "UI-Hinweis"},
+ )
frontendType: FrontendType = Field(
description="UI rendering type (from global FrontendType enum)",
json_schema_extra={"label": "Frontend-Typ"},
@@ -80,6 +95,16 @@ class WorkflowActionDefinition(BaseModel):
description="Parameter schema definitions",
json_schema_extra={"label": "Parameter"},
)
+ outputType: str = Field(
+ "ActionResult",
+ description=(
+ "PORT_TYPE_CATALOG schema name produced by this action "
+ "(e.g. 'TrusteeProcessResult', 'EmailDraft', 'DocumentList'). "
+ "Defaults to 'ActionResult' for fire-and-forget actions. "
+ "Validated by _actionSignatureValidator at startup."
+ ),
+ json_schema_extra={"label": "Ausgabe-Typ"},
+ )
execute: Optional[Callable] = Field(
None,
description="Execution function - async function that takes parameters dict and returns ActionResult. Set dynamically.",
diff --git a/modules/features/graphicalEditor/adapterValidator.py b/modules/features/graphicalEditor/adapterValidator.py
new file mode 100644
index 00000000..7f760896
--- /dev/null
+++ b/modules/features/graphicalEditor/adapterValidator.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Adapter Validator — enforces 5 drift rules between Schicht-3 NodeAdapters
+and the Schicht-2 Actions they bind to.
+
+This is the CI-safety net described in the typed-action-architecture plan:
+any drift between an Editor-Node Adapter and the underlying Action signature
+must be caught at build time, never silently in production.
+
+Rules
+-----
+1. Every `userParams[].actionArg` exists as a parameter in the bound Action.
+2. Every required Action parameter is covered by either `userParams` or
+ `contextParams` (i.e. no required arg is silently unset).
+3. Every Action parameter type exists in PORT_TYPE_CATALOG (or is a primitive).
+4. The Action `outputType` exists in PORT_TYPE_CATALOG (or is a primitive).
+5. Every method-bound STATIC node has an Adapter (no orphan node ids).
+
+Rules 3+4 are already enforced by `_actionSignatureValidator` in Phase 2 —
+this module composes with it so the report covers both layers.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Mapping
+
+from modules.features.graphicalEditor.nodeAdapter import (
+ NodeAdapter,
+ _adapterFromLegacyNode,
+ _isMethodBoundNode,
+)
+from modules.workflows.methods._actionSignatureValidator import _validateTypeRef
+
+
+@dataclass
+class AdapterValidationReport:
+ """Aggregated drift report across all adapters."""
+
+ errors: List[str] = field(default_factory=list)
+ warnings: List[str] = field(default_factory=list)
+
+ @property
+ def isHealthy(self) -> bool:
+ return not self.errors
+
+ def merge(self, other: "AdapterValidationReport") -> None:
+ self.errors.extend(other.errors)
+ self.warnings.extend(other.warnings)
+
+
+def _validateAdapterAgainstAction(
+ adapter: NodeAdapter,
+ actionDef: Any,
+) -> AdapterValidationReport:
+ """Apply rules 1-4 to a single Adapter / Action pair.
+
+ `actionDef` is duck-typed so tests can pass dataclasses; production passes
+ a `WorkflowActionDefinition` Pydantic model.
+ """
+ report = AdapterValidationReport()
+ actionParams: Mapping[str, Any] = getattr(actionDef, "parameters", {}) or {}
+ outputType: str = getattr(actionDef, "outputType", "ActionResult") or "ActionResult"
+
+ # Rule 1: every userParam.actionArg exists in the Action
+ declaredArgs = {up.actionArg for up in adapter.userParams}
+ for arg in declaredArgs:
+ if arg not in actionParams:
+ report.errors.append(
+ f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': "
+ f"userParams.actionArg '{arg}' does not exist in action parameters "
+ f"(known: {sorted(actionParams.keys())})"
+ )
+
+ # Rule 2: every required Action arg is covered (userParams OR contextParams)
+ coveredArgs = declaredArgs | set(adapter.contextParams.keys())
+ for paramName, paramDef in actionParams.items():
+ isRequired = bool(getattr(paramDef, "required", False))
+ if isRequired and paramName not in coveredArgs:
+ report.errors.append(
+ f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': "
+ f"required action arg '{paramName}' is neither in userParams nor contextParams"
+ )
+
+ # Rule 3: every Action parameter type exists in catalog (re-runs Phase-2 rule)
+ for paramName, paramDef in actionParams.items():
+ typeRef = getattr(paramDef, "type", None)
+ if not typeRef:
+ report.errors.append(
+ f"action '{adapter.bindsAction}.{paramName}': missing 'type' on parameter"
+ )
+ continue
+ for err in _validateTypeRef(typeRef):
+ report.errors.append(
+ f"action '{adapter.bindsAction}.{paramName}': {err}"
+ )
+
+ # Rule 4: Action outputType exists in catalog (or is a generic fire-and-forget type)
+ if outputType not in {"ActionResult", "Transit"}:
+ for err in _validateTypeRef(outputType):
+ report.errors.append(
+ f"action '{adapter.bindsAction}'.outputType: {err}"
+ )
+
+ return report
+
+
+def _validateAllAdapters(
+ staticNodes: List[Mapping[str, Any]],
+ actionsRegistry: Mapping[str, Mapping[str, Any]],
+) -> AdapterValidationReport:
+ """Run rules 1-5 across all method-bound static node definitions.
+
+ Args:
+ staticNodes: list of legacy node-dicts (`STATIC_NODE_TYPES`).
+ actionsRegistry: mapping of method-shortname -> {actionName: WorkflowActionDefinition}.
+ Built from live `methods` registry or test-stubbed methods.
+
+ Returns:
+ Aggregated drift report. `isHealthy` is True only if every method-bound
+ node has a matching Action and all 5 rules pass.
+ """
+ report = AdapterValidationReport()
+ seenAdapterIds: set[str] = set()
+
+ for node in staticNodes:
+ if not _isMethodBoundNode(node):
+ continue
+
+ adapter = _adapterFromLegacyNode(node)
+ if adapter is None:
+ report.errors.append(
+ f"node '{node.get('id')}' is method-bound but adapter projection failed"
+ )
+ continue
+ seenAdapterIds.add(adapter.nodeId)
+
+ methodName = str(node.get("_method") or "")
+ actionName = str(node.get("_action") or "")
+ methodActions = actionsRegistry.get(methodName) or {}
+ actionDef = methodActions.get(actionName)
+ if actionDef is None:
+ report.errors.append(
+ f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': "
+ f"action not found in registry (method '{methodName}' has actions: "
+ f"{sorted(methodActions.keys())})"
+ )
+ continue
+
+ report.merge(_validateAdapterAgainstAction(adapter, actionDef))
+
+ # Rule 5: every Action with dynamicMode=False MUST have an Editor Adapter.
+ # dynamicMode=True actions are agent-only and may legitimately lack one.
+ boundActions: set[str] = set()
+ for node in staticNodes:
+ if not _isMethodBoundNode(node):
+ continue
+ boundActions.add(f"{node.get('_method')}.{node.get('_action')}")
+
+ for methodName, actions in actionsRegistry.items():
+ for actionName, actionDef in actions.items():
+ if bool(getattr(actionDef, "dynamicMode", False)):
+ continue
+ fqn = f"{methodName}.{actionName}"
+ if fqn not in boundActions:
+ report.warnings.append(
+ f"action '{fqn}' has no Editor adapter "
+ f"(set dynamicMode=True if intended as agent-only)"
+ )
+
+ return report
+
+
+def _formatAdapterReport(report: AdapterValidationReport) -> str:
+ """Format a report for human-readable logging."""
+ lines: List[str] = []
+ if report.isHealthy and not report.warnings:
+ lines.append("Adapter validator: all healthy.")
+ return "\n".join(lines)
+
+ if report.errors:
+ lines.append(f"Adapter validator: {len(report.errors)} ERROR(s)")
+ for e in report.errors:
+ lines.append(f" ERROR: {e}")
+ if report.warnings:
+ lines.append(f"Adapter validator: {len(report.warnings)} WARNING(s)")
+ for w in report.warnings:
+ lines.append(f" WARN: {w}")
+ return "\n".join(lines)
+
+
+def _buildActionsRegistryFromMethods(
+ methodInstances: Mapping[str, Any],
+) -> Dict[str, Dict[str, Any]]:
+ """Convenience: turn `{shortName: methodInstance}` into the registry shape.
+
+ `methodInstance._actions` is a dict of action-name -> WorkflowActionDefinition.
+ """
+ registry: Dict[str, Dict[str, Any]] = {}
+ for shortName, instance in methodInstances.items():
+ actions = getattr(instance, "_actions", None)
+ if isinstance(actions, dict):
+ registry[shortName] = dict(actions)
+ return registry
diff --git a/modules/features/graphicalEditor/nodeAdapter.py b/modules/features/graphicalEditor/nodeAdapter.py
new file mode 100644
index 00000000..ed7ec711
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeAdapter.py
@@ -0,0 +1,172 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Schicht-3 Adapter Layer — projects Schicht-2 Actions into Editor-Node form.
+
+Architecture (see wiki/c-work/1-plan/2026-04-typed-action-architecture.md):
+ - Schicht 1: Types Catalog (portTypes.PORT_TYPE_CATALOG)
+ - Schicht 2: Methods/Actions (modules/workflows/methods/method*) - source of truth
+ for Backend capabilities (parameter types, output types).
+ - Schicht 3: Adapters (this module) - Editor-Node + AI-Agent-Tool wrappers around
+ Actions. References Action signature, never duplicates types.
+ - Schicht 4: Workflow-Bindings + Agent-Tool-Calls (instance-level wiring).
+
+This module defines the in-code Adapter representation (NodeAdapter,
+UserParamMapping) and the projection helpers that convert between the
+legacy node-dict wire format and the typed Adapter view.
+
+Wire-format compatibility: the legacy dicts in nodeDefinitions/*.py remain
+the wire format consumed by the frontend until Phase 4. This module exposes
+an Adapter VIEW over those dicts so the validator and AI-tool generator can
+operate on a clean, typed structure without breaking consumers.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Mapping, Optional
+
+
+@dataclass(frozen=True)
+class UserParamMapping:
+ """Maps an Action argument into a Node's user-facing parameter.
+
+ The Action signature is the source of truth for type/required/description.
+ This mapping carries Editor-specific overrides (label, UI hints, conditional
+ visibility) but never re-declares the type.
+ """
+
+ actionArg: str
+ label: Optional[Any] = None
+ description: Optional[Any] = None
+ uiHint: Optional[str] = None
+ frontendOptions: Optional[Any] = None
+ visibleWhen: Optional[Dict[str, Any]] = None
+ defaultValue: Any = None
+
+
+@dataclass(frozen=True)
+class NodeAdapter:
+ """Schicht-3 Editor-Node adapter — binds to a Schicht-2 Action.
+
+ All type information for `userParams` is inherited from the bound Action.
+ The adapter only carries Editor-specific concerns (UI labels, port topology,
+ icon/color metadata).
+ """
+
+ nodeId: str
+ bindsAction: str
+ category: str
+ label: Any
+ description: Any
+ userParams: List[UserParamMapping] = field(default_factory=list)
+ contextParams: Dict[str, str] = field(default_factory=dict)
+ inputs: int = 1
+ outputs: int = 1
+ inputAccepts: List[List[str]] = field(default_factory=list)
+ outputLabels: Optional[List[Any]] = None
+ meta: Dict[str, Any] = field(default_factory=dict)
+
+
+def _isMethodBoundNode(node: Mapping[str, Any]) -> bool:
+ """True if a legacy node dict is bound to a Schicht-2 Action."""
+ return bool(node.get("_method") and node.get("_action"))
+
+
+def _bindsActionFromLegacy(node: Mapping[str, Any]) -> Optional[str]:
+ """Build the canonical 'method.action' identifier from a legacy node dict.
+
+ Returns None for framework-primitive nodes (trigger/flow/input/data).
+ """
+ method = node.get("_method")
+ action = node.get("_action")
+ if not method or not action:
+ return None
+ return f"{method}.{action}"
+
+
+def _userParamFromLegacyParam(legacyParam: Mapping[str, Any]) -> UserParamMapping:
+ """Project a legacy parameter dict into a UserParamMapping view.
+
+ The view carries only Editor-overrides; type/required come from the Action.
+ """
+ return UserParamMapping(
+ actionArg=str(legacyParam.get("name", "")),
+ label=legacyParam.get("label"),
+ description=legacyParam.get("description"),
+ uiHint=legacyParam.get("frontendType"),
+ frontendOptions=legacyParam.get("frontendOptions"),
+ visibleWhen=_extractVisibleWhen(legacyParam.get("frontendOptions")),
+ defaultValue=legacyParam.get("default"),
+ )
+
+
+def _extractVisibleWhen(frontendOptions: Any) -> Optional[Dict[str, Any]]:
+ """Extract conditional-visibility hint from legacy frontendOptions.showWhen."""
+ if not isinstance(frontendOptions, dict):
+ return None
+ dependsOn = frontendOptions.get("dependsOn")
+ showWhen = frontendOptions.get("showWhen")
+ if not dependsOn or not showWhen:
+ return None
+ return {"actionArg": str(dependsOn), "in": list(showWhen) if isinstance(showWhen, (list, tuple)) else [showWhen]}
+
+
+def _adapterFromLegacyNode(node: Mapping[str, Any]) -> Optional[NodeAdapter]:
+ """Build a NodeAdapter view from a legacy node dict.
+
+ Returns None for framework-primitive nodes (no _method/_action binding).
+ Pure projection — no validation, no Action-signature lookup.
+ """
+ if not _isMethodBoundNode(node):
+ return None
+
+ bindsAction = _bindsActionFromLegacy(node)
+ if not bindsAction:
+ return None
+
+ inputAccepts = _projectInputAccepts(node)
+
+ return NodeAdapter(
+ nodeId=str(node.get("id", "")),
+ bindsAction=bindsAction,
+ category=str(node.get("category", "")),
+ label=node.get("label", ""),
+ description=node.get("description", ""),
+ userParams=[_userParamFromLegacyParam(p) for p in (node.get("parameters") or [])],
+ contextParams={},
+ inputs=int(node.get("inputs", 1)),
+ outputs=int(node.get("outputs", 1)),
+ inputAccepts=inputAccepts,
+ outputLabels=node.get("outputLabels"),
+ meta=dict(node.get("meta") or {}),
+ )
+
+
+def _projectInputAccepts(node: Mapping[str, Any]) -> List[List[str]]:
+ """Convert legacy `inputPorts` dict-of-dicts into a per-port `accepts` list."""
+ inputPorts = node.get("inputPorts") or {}
+ if not isinstance(inputPorts, dict):
+ return []
+ inputs = int(node.get("inputs", 0) or 0)
+ if inputs <= 0:
+ return []
+ out: List[List[str]] = []
+ for portIdx in range(inputs):
+ portCfg = inputPorts.get(portIdx) or inputPorts.get(str(portIdx)) or {}
+ accepts = portCfg.get("accepts") if isinstance(portCfg, dict) else None
+ out.append(list(accepts) if isinstance(accepts, (list, tuple)) else [])
+ return out
+
+
+def _projectAllAdapters(staticNodes: List[Mapping[str, Any]]) -> Dict[str, NodeAdapter]:
+ """Project a list of legacy node dicts into a {nodeId: NodeAdapter} map.
+
+ Framework-primitive nodes (no Action binding) are silently skipped.
+ """
+ out: Dict[str, NodeAdapter] = {}
+ for node in staticNodes:
+ adapter = _adapterFromLegacyNode(node)
+ if adapter is not None:
+ out[adapter.nodeId] = adapter
+ return out
diff --git a/modules/features/graphicalEditor/nodeDefinitions/ai.py b/modules/features/graphicalEditor/nodeDefinitions/ai.py
index dce86056..d0e0eb22 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/ai.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/ai.py
@@ -12,19 +12,19 @@ AI_NODES = [
"parameters": [
{"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea",
"description": t("KI-Prompt")},
- {"name": "outputFormat", "type": "string", "required": False, "frontendType": "select",
- "frontendOptions": {"options": ["text", "json", "emailDraft"]},
- "description": t("Ausgabeformat"), "default": "text"},
+ {"name": "resultType", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["txt", "json", "md", "csv", "xml", "html", "pdf", "docx", "xlsx", "pptx", "png", "jpg"]},
+ "description": t("Ausgabeformat"), "default": "txt"},
{"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
- {"name": "context", "type": "string", "required": False, "frontendType": "hidden",
- "description": t("Kontext-Daten (via Wire oder DataRef)"), "default": ""},
{"name": "simpleMode", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": t("Einfacher Modus"), "default": True},
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["DocumentList", "AiResult", "TextResult", "Transit"]}},
+ "inputPorts": {0: {"accepts": [
+ "DocumentList", "AiResult", "TextResult", "Transit", "LoopItem", "ActionResult",
+ ]}},
"outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-robot", "color": "#9C27B0", "usesAi": True},
"_method": "ai",
@@ -53,9 +53,11 @@ AI_NODES = [
"label": t("Dokument zusammenfassen"),
"description": t("Dokumentinhalt zusammenfassen"),
"parameters": [
+ {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
{"name": "summaryLength", "type": "string", "required": False, "frontendType": "select",
- "frontendOptions": {"options": ["short", "medium", "long"]},
- "description": t("Kurz, mittel oder lang"), "default": "medium"},
+ "frontendOptions": {"options": ["brief", "medium", "detailed"]},
+ "description": t("Kurz, mittel oder ausführlich"), "default": "medium"},
],
"inputs": 1,
"outputs": 1,
@@ -71,9 +73,10 @@ AI_NODES = [
"label": t("Dokument übersetzen"),
"description": t("Dokument in Zielsprache übersetzen"),
"parameters": [
- {"name": "targetLanguage", "type": "string", "required": True, "frontendType": "select",
- "frontendOptions": {"options": ["en", "de", "fr", "it", "es", "pt", "nl"]},
- "description": t("Zielsprache")},
+ {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
+ {"name": "targetLanguage", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Zielsprache (z.B. de, en, French)")},
],
"inputs": 1,
"outputs": 1,
@@ -89,8 +92,10 @@ AI_NODES = [
"label": t("Dokument konvertieren"),
"description": t("Dokument in anderes Format konvertieren"),
"parameters": [
+ {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
{"name": "targetFormat", "type": "string", "required": True, "frontendType": "select",
- "frontendOptions": {"options": ["pdf", "docx", "txt", "html", "md"]},
+ "frontendOptions": {"options": ["docx", "pdf", "xlsx", "csv", "txt", "html", "json", "md"]},
"description": t("Zielformat")},
],
"inputs": 1,
@@ -126,9 +131,9 @@ AI_NODES = [
"parameters": [
{"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
"description": t("Code-Generierungs-Prompt")},
- {"name": "language", "type": "string", "required": False, "frontendType": "select",
- "frontendOptions": {"options": ["python", "javascript", "typescript", "java", "csharp", "go"]},
- "description": t("Programmiersprache"), "default": "python"},
+ {"name": "resultType", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["py", "js", "ts", "html", "java", "cpp", "txt", "json", "csv", "xml"]},
+ "description": t("Datei-Endung der erzeugten Code-Datei"), "default": "py"},
],
"inputs": 1,
"outputs": 1,
diff --git a/modules/features/graphicalEditor/nodeDefinitions/clickup.py b/modules/features/graphicalEditor/nodeDefinitions/clickup.py
index 210fe7f7..56b27984 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/clickup.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/clickup.py
@@ -94,8 +94,6 @@ CLICKUP_NODES = [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "clickup"},
"description": t("ClickUp-Verbindung")},
- {"name": "teamId", "type": "string", "required": False, "frontendType": "text",
- "description": t("Workspace")},
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Pfad zur Liste")},
@@ -144,10 +142,8 @@ CLICKUP_NODES = [
"description": t("Task-ID")},
{"name": "path", "type": "string", "required": False, "frontendType": "text",
"description": t("Oder Pfad")},
- {"name": "taskUpdateEntries", "type": "object", "required": False, "frontendType": "keyValueRows",
- "description": t("Zu ändernde Felder")},
{"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json",
- "description": t("JSON für API")},
+ "description": t("JSON-Body für PUT /task/{id}, z.B. {\"name\":\"...\",\"status\":\"...\"}")},
],
"inputs": 1,
"outputs": 1,
@@ -172,6 +168,8 @@ CLICKUP_NODES = [
"description": t("Oder Pfad")},
{"name": "fileName", "type": "string", "required": False, "frontendType": "text",
"description": t("Dateiname")},
+ {"name": "content", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""},
],
"inputs": 1,
"outputs": 1,
diff --git a/modules/features/graphicalEditor/nodeDefinitions/context.py b/modules/features/graphicalEditor/nodeDefinitions/context.py
index b677dca6..81d878be 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/context.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/context.py
@@ -10,14 +10,13 @@ CONTEXT_NODES = [
"label": t("Inhalt extrahieren"),
"description": t("Dokumentstruktur extrahieren ohne KI (Seiten, Abschnitte, Bilder, Tabellen)"),
"parameters": [
- {"name": "outputDetail", "type": "string", "required": False, "frontendType": "select",
- "frontendOptions": {"options": ["full", "structure", "references"]},
- "description": t("Detailgrad: full = alles, structure = Skelett, references = Dateireferenzen"),
- "default": "full"},
- {"name": "includeImages", "type": "boolean", "required": False, "frontendType": "checkbox",
- "description": t("Bilder extrahieren"), "default": True},
- {"name": "includeTables", "type": "boolean", "required": False, "frontendType": "checkbox",
- "description": t("Tabellen extrahieren"), "default": True},
+ {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
+ {"name": "extractionOptions", "type": "object", "required": False, "frontendType": "json",
+ "description": t(
+ "Extraktions-Optionen (JSON), z.B. {\"includeImages\": true, \"includeTables\": true, "
+ "\"outputDetail\": \"full\"}"),
+ "default": {}},
],
"inputs": 1,
"outputs": 1,
diff --git a/modules/features/graphicalEditor/nodeDefinitions/data.py b/modules/features/graphicalEditor/nodeDefinitions/data.py
index 73552928..b6208840 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/data.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/data.py
@@ -16,27 +16,11 @@ DATA_NODES = [
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["Transit"]}},
+ "inputPorts": {0: {"accepts": ["Transit", "AiResult", "LoopItem"]}},
"outputPorts": {0: {"schema": "AggregateResult"}},
"executor": "data",
"meta": {"icon": "mdi-playlist-plus", "color": "#607D8B", "usesAi": False},
},
- {
- "id": "data.transform",
- "category": "data",
- "label": t("Umwandeln"),
- "description": t("Daten umstrukturieren"),
- "parameters": [
- {"name": "mappings", "type": "json", "required": True, "frontendType": "mappingTable",
- "description": t("Feld-Zuordnungen"), "default": []},
- ],
- "inputs": 1,
- "outputs": 1,
- "inputPorts": {0: {"accepts": ["Transit"]}},
- "outputPorts": {0: {"schema": "ActionResult", "dynamic": True, "deriveFrom": "mappings"}},
- "executor": "data",
- "meta": {"icon": "mdi-swap-horizontal-bold", "color": "#607D8B", "usesAi": False},
- },
{
"id": "data.filter",
"category": "data",
diff --git a/modules/features/graphicalEditor/nodeDefinitions/email.py b/modules/features/graphicalEditor/nodeDefinitions/email.py
index 30872815..11ff9895 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/email.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/email.py
@@ -17,14 +17,8 @@ EMAIL_NODES = [
"description": t("Ordner"), "default": "Inbox"},
{"name": "limit", "type": "number", "required": False, "frontendType": "number",
"description": t("Max E-Mails"), "default": 100},
- {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
- "description": t("Nur von dieser Adresse"), "default": ""},
- {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
- "description": t("Betreff muss enthalten"), "default": ""},
- {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
- "description": t("Nur mit Anhängen"), "default": False},
{"name": "filter", "type": "string", "required": False, "frontendType": "text",
- "description": t("Erweitert: Filter-Text"), "default": ""},
+ "description": t("Filter-Ausdruck (z.B. 'from:max@example.com hasAttachment:true betreff')"), "default": ""},
],
"inputs": 1,
"outputs": 1,
@@ -43,24 +37,12 @@ EMAIL_NODES = [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"},
"description": t("E-Mail-Konto Verbindung")},
- {"name": "query", "type": "string", "required": False, "frontendType": "text",
- "description": t("Suchbegriff"), "default": ""},
+ {"name": "query", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Suchausdruck (z.B. 'from:max@example.com hasAttachments:true Rechnung')")},
{"name": "folder", "type": "string", "required": False, "frontendType": "text",
- "description": t("Ordner"), "default": "Inbox"},
+ "description": t("Ordner"), "default": "All"},
{"name": "limit", "type": "number", "required": False, "frontendType": "number",
"description": t("Max E-Mails"), "default": 100},
- {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
- "description": t("Von Adresse"), "default": ""},
- {"name": "toAddress", "type": "string", "required": False, "frontendType": "text",
- "description": t("An Adresse"), "default": ""},
- {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
- "description": t("Betreff enthält"), "default": ""},
- {"name": "bodyContains", "type": "string", "required": False, "frontendType": "text",
- "description": t("Inhalt enthält"), "default": ""},
- {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
- "description": t("Mit Anhängen"), "default": False},
- {"name": "filter", "type": "string", "required": False, "frontendType": "text",
- "description": t("Erweitert: KQL-Filter"), "default": ""},
],
"inputs": 1,
"outputs": 1,
@@ -74,22 +56,24 @@ EMAIL_NODES = [
"id": "email.draftEmail",
"category": "email",
"label": t("E-Mail entwerfen"),
- "description": t("E-Mail-Entwurf erstellen"),
+ "description": t(
+ "AI-gestützt einen E-Mail-Entwurf aus Kontext und optionalen Dokumenten erstellen"),
"parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"},
"description": t("E-Mail-Konto")},
- {"name": "subject", "type": "string", "required": True, "frontendType": "text",
- "description": t("Betreff")},
- {"name": "body", "type": "string", "required": True, "frontendType": "textarea",
- "description": t("Inhalt")},
+ {"name": "context", "type": "string", "required": False, "frontendType": "textarea",
+ "description": t("Kontext / Brief-Beschreibung für die KI-Komposition"), "default": ""},
{"name": "to", "type": "string", "required": False, "frontendType": "text",
- "description": t("Empfänger"), "default": ""},
- {"name": "attachments", "type": "json", "required": False, "frontendType": "attachmentBuilder",
- "description": t(
- "Anhänge: Liste von { contentRef | csvFromVariable | base64Content, name, mimeType }. "
- "Per Wire befüllbar (z.B. CSV aus data.consolidate)."),
- "default": []},
+ "description": t("Empfänger (komma-separiert, optional für Entwurf)"), "default": ""},
+ {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
+ "description": t("Anhang-Dokumente (via Wire oder DataRef)"), "default": ""},
+ {"name": "emailContent", "type": "string", "required": False, "frontendType": "hidden",
+ "description": t("Direkt vorbereiteter Inhalt {subject, body, to} (via Wire — überspringt KI)"),
+ "default": ""},
+ {"name": "emailStyle", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["formal", "casual", "business"]},
+ "description": t("Stil"), "default": "business"},
],
"inputs": 1,
"outputs": 1,
diff --git a/modules/features/graphicalEditor/nodeDefinitions/flow.py b/modules/features/graphicalEditor/nodeDefinitions/flow.py
index be5f5a43..04a44197 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/flow.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/flow.py
@@ -88,7 +88,9 @@ FLOW_NODES = [
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["Transit", "UdmDocument"]}},
+ "inputPorts": {0: {"accepts": [
+ "Transit", "UdmDocument", "EmailList", "DocumentList", "FileList", "TaskList", "ActionResult",
+ ]}},
"outputPorts": {0: {"schema": "LoopItem"}},
"executor": "flow",
"meta": {"icon": "mdi-repeat", "color": "#FF9800", "usesAi": False},
diff --git a/modules/features/graphicalEditor/nodeDefinitions/input.py b/modules/features/graphicalEditor/nodeDefinitions/input.py
index e6d88c6b..647e9ac2 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/input.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/input.py
@@ -22,7 +22,7 @@ INPUT_NODES = [
"inputs": 1,
"outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
- "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "fields"}},
+ "outputPorts": {0: {"schema": {"kind": "fromGraph", "parameter": "fields"}}},
"executor": "input",
"meta": {"icon": "mdi-form-textbox", "color": "#9C27B0", "usesAi": False},
},
diff --git a/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py
index 1faa6bbb..7e52ef8d 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py
@@ -43,7 +43,7 @@ SHAREPOINT_NODES = [
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
+ "inputPorts": {0: {"accepts": ["FileList", "Transit", "LoopItem"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-document", "color": "#0078D4", "usesAi": False},
"_method": "sharepoint",
@@ -61,6 +61,8 @@ SHAREPOINT_NODES = [
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Zielordner-Pfad")},
+ {"name": "content", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""},
],
"inputs": 1,
"outputs": 1,
@@ -106,7 +108,7 @@ SHAREPOINT_NODES = [
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
+ "inputPorts": {0: {"accepts": ["FileList", "Transit", "LoopItem"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-download", "color": "#0078D4", "usesAi": False},
"_method": "sharepoint",
diff --git a/modules/features/graphicalEditor/nodeDefinitions/triggers.py b/modules/features/graphicalEditor/nodeDefinitions/triggers.py
index d4122527..7b55d5d7 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/triggers.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/triggers.py
@@ -34,7 +34,7 @@ TRIGGER_NODES = [
"inputs": 0,
"outputs": 1,
"inputPorts": {},
- "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "formFields"}},
+ "outputPorts": {0: {"schema": {"kind": "fromGraph", "parameter": "formFields"}}},
"executor": "trigger",
"meta": {"icon": "mdi-form-select", "color": "#9C27B0", "usesAi": False},
},
diff --git a/modules/features/graphicalEditor/nodeDefinitions/trustee.py b/modules/features/graphicalEditor/nodeDefinitions/trustee.py
index 0eb5e119..5f7de2b2 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/trustee.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/trustee.py
@@ -46,8 +46,11 @@ TRUSTEE_NODES = [
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
- "outputPorts": {0: {"schema": "DocumentList"}},
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit", "AiResult", "LoopItem", "ActionResult"]}},
+ # Runtime returns ActionResult.isSuccess(documents=[...]) — see
+ # actions/extractFromFiles.py. Declaring DocumentList here was adapter
+ # drift and broke the DataPicker for downstream nodes.
+ "outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50", "usesAi": True},
"_method": "trustee",
"_action": "extractFromFiles",
@@ -58,14 +61,17 @@ TRUSTEE_NODES = [
"label": t("Dokumente verarbeiten"),
"description": t("TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen."),
"parameters": [
- {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
- "description": t("Automatisch via Wire-Verbindung befüllt")},
+ # Type matches what producers actually emit: ActionResult.documents
+ # is `List[ActionDocument]` (see datamodelChat.ActionResult). The
+ # DataPicker uses this string to filter compatible upstream paths.
+ {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
+ "description": t("Dokumentenliste eines Upstream-Producers (z.B. trustee.extractFromFiles → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")},
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
"description": t("Trustee Feature-Instanz-ID")},
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "inputPorts": {0: {"accepts": ["ActionResult", "DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-file-document-check", "color": "#4CAF50", "usesAi": False},
"_method": "trustee",
@@ -77,14 +83,17 @@ TRUSTEE_NODES = [
"label": t("In Buchhaltung synchronisieren"),
"description": t("Trustee-Positionen in Buchhaltungssystem übertragen."),
"parameters": [
- {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
- "description": t("Automatisch via Wire-Verbindung befüllt")},
+ # Type matches what producers actually emit: ActionResult.documents
+ # is `List[ActionDocument]` (see datamodelChat.ActionResult). The
+ # DataPicker uses this string to filter compatible upstream paths.
+ {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
+ "description": t("Verarbeitete Dokumentenliste eines Upstream-Producers (z.B. trustee.processDocuments → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")},
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
"description": t("Trustee Feature-Instanz-ID")},
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["Transit"]}},
+ "inputPorts": {0: {"accepts": ["ActionResult", "DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-calculator", "color": "#4CAF50", "usesAi": False},
"_method": "trustee",
@@ -122,7 +131,7 @@ TRUSTEE_NODES = [
],
"inputs": 1,
"outputs": 1,
- "inputPorts": {0: {"accepts": ["Transit", "AiResult", "ConsolidateResult"]}},
+ "inputPorts": {0: {"accepts": ["Transit", "AiResult", "ConsolidateResult", "UdmDocument"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-database-search", "color": "#4CAF50", "usesAi": False},
"_method": "trustee",
diff --git a/modules/features/graphicalEditor/nodeRegistry.py b/modules/features/graphicalEditor/nodeRegistry.py
index 577b530f..dd302282 100644
--- a/modules/features/graphicalEditor/nodeRegistry.py
+++ b/modules/features/graphicalEditor/nodeRegistry.py
@@ -6,9 +6,10 @@ Nodes are defined first; IO/method actions are used at execution time.
"""
import logging
-from typing import Dict, List, Any
+from typing import Dict, List, Any, Optional
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+from modules.features.graphicalEditor.nodeAdapter import _bindsActionFromLegacy
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES
from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText
@@ -41,12 +42,21 @@ def _pickFromLangMap(d: Any, lang: str) -> Any:
def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
- """Apply request language via resolveText (t() keys + multilingual dicts)."""
+ """Apply request language via resolveText (t() keys + multilingual dicts).
+
+ Also exposes Schicht-3 metadata (`bindsAction`) derived from the legacy
+ `_method`/`_action` pair, so frontend consumers can resolve back to the
+ Schicht-2 Action signature without parsing internal underscore-prefixed
+ fields.
+ """
lang = normalizePrimaryLanguageTag(language, "en")
+ bindsAction = _bindsActionFromLegacy(node)
out = dict(node)
for key in list(out.keys()):
if key.startswith("_"):
del out[key]
+ if bindsAction:
+ out["bindsAction"] = bindsAction
lbl = node.get("label")
if lbl is not None:
out["label"] = resolveText(lbl, lang) or node.get("id", "")
@@ -124,3 +134,46 @@ def getNodeTypeToMethodAction() -> Dict[str, tuple]:
if method and action:
mapping[node["id"]] = (method, action)
return mapping
+
+
+def validateAdaptersAgainstMethods(methodInstances: Optional[Dict[str, Any]] = None) -> Optional[str]:
+ """Run the Schicht-3 Adapter validator (5 drift rules) against the live methods.
+
+ Intended to be called once at startup after methodDiscovery has populated
+ the methods registry. Returns a human-readable report (None when healthy)
+ so the caller decides whether to log, raise, or surface to operators.
+
+ Pass `methodInstances` directly for testability; defaults to importing
+ the live registry from `methodDiscovery.methods`.
+ """
+ from modules.features.graphicalEditor.adapterValidator import (
+ _buildActionsRegistryFromMethods,
+ _formatAdapterReport,
+ _validateAllAdapters,
+ )
+
+ if methodInstances is None:
+ try:
+ from modules.workflows.processing.shared.methodDiscovery import methods
+ except Exception as exc:
+ logger.warning("Adapter validator skipped: cannot import methodDiscovery (%s)", exc)
+ return None
+
+ methodInstances = {}
+ for fullName, info in (methods or {}).items():
+ shortName = fullName.replace("Method", "").lower() if fullName[:1].isupper() else fullName
+ instance = info.get("instance") if isinstance(info, dict) else None
+ if instance is not None:
+ methodInstances[shortName] = instance
+
+ if not methodInstances:
+ return None
+
+ actionsRegistry = _buildActionsRegistryFromMethods(methodInstances)
+ report = _validateAllAdapters(list(STATIC_NODE_TYPES), actionsRegistry)
+ formatted = _formatAdapterReport(report)
+ if not report.isHealthy:
+ logger.warning("[adapterValidator] %s", formatted)
+ elif report.warnings:
+ logger.info("[adapterValidator] %s", formatted)
+ return formatted
diff --git a/modules/features/graphicalEditor/portTypes.py b/modules/features/graphicalEditor/portTypes.py
index 1ac90665..b607316a 100644
--- a/modules/features/graphicalEditor/portTypes.py
+++ b/modules/features/graphicalEditor/portTypes.py
@@ -4,13 +4,14 @@
Typed Port System for the Graphical Editor.
Defines PortSchema, PORT_TYPE_CATALOG, SYSTEM_VARIABLES,
-output normalizers, input extractors, and Transit helpers.
+output normalizers, and Transit helpers.
+
"""
import logging
import time
import uuid
-from typing import Any, Callable, Dict, List, Optional
+from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
@@ -25,9 +26,14 @@ logger = logging.getLogger(__name__)
class PortField(BaseModel):
name: str
- type: str # str, int, bool, List[str], List[Document], Dict[str,Any]
+ type: str # str, int, bool, List[str], List[Document], Dict[str,Any], ConnectionRef, …
description: str = ""
required: bool = True
+ enumValues: Optional[List[str]] = None
+ # Marks this field as the discriminator for a Ref-Schema (e.g. ConnectionRef.authority,
+ # FeatureInstanceRef.featureCode). Pickers/validators use it to filter compatible
+ # producers by sub-type. Type must be "str" when discriminator is True.
+ discriminator: bool = False
class PortSchema(BaseModel):
@@ -57,13 +63,113 @@ class OutputPortDef(BaseModel):
# ---------------------------------------------------------------------------
PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
+ # -----------------------------------------------------------------
+ # Refs (handles to external resources, pickable by user)
+ # -----------------------------------------------------------------
+ "ConnectionRef": PortSchema(name="ConnectionRef", fields=[
+ PortField(name="id", type="str", description="UserConnection.id (UUID)"),
+ PortField(name="authority", type="str", discriminator=True,
+ description="Auth-Provider-Code: msft | clickup | google | …"),
+ PortField(name="label", type="str", required=False, description="Anzeigename"),
+ ]),
+ "FeatureInstanceRef": PortSchema(name="FeatureInstanceRef", fields=[
+ PortField(name="id", type="str", description="FeatureInstance.id (UUID)"),
+ PortField(name="featureCode", type="str", discriminator=True,
+ description="Feature-Modul-Code: trustee | redmine | clickup | sharepoint | …"),
+ PortField(name="label", type="str", required=False, description="Anzeigename"),
+ PortField(name="mandateId", type="str", required=False, description="Zugehöriger Mandant"),
+ ]),
+ "ClickUpListRef": PortSchema(name="ClickUpListRef", fields=[
+ PortField(name="listId", type="str", description="ClickUp-Listen-ID"),
+ PortField(name="name", type="str", required=False, description="Listenname"),
+ PortField(name="spaceId", type="str", required=False, description="Space-ID"),
+ PortField(name="folderId", type="str", required=False, description="Ordner-ID"),
+ PortField(name="connection", type="ConnectionRef", required=False,
+ description="ClickUp-Verbindung"),
+ ]),
+ "PromptTemplateRef": PortSchema(name="PromptTemplateRef", fields=[
+ PortField(name="id", type="str", description="Prompt-Template-ID"),
+ PortField(name="name", type="str", required=False, description="Anzeigename"),
+ PortField(name="version", type="str", required=False, description="Version / Tag"),
+ ]),
+ "SharePointFolderRef": PortSchema(name="SharePointFolderRef", fields=[
+ PortField(name="siteUrl", type="str", required=False, description="SharePoint Site"),
+ PortField(name="driveId", type="str", required=False, description="Drive ID"),
+ PortField(name="folderPath", type="str", required=False, description="Ordnerpfad"),
+ PortField(name="label", type="str", required=False, description="Kurzlabel für Picker"),
+ ]),
+ "SharePointFileRef": PortSchema(name="SharePointFileRef", fields=[
+ PortField(name="siteUrl", type="str", required=False, description="SharePoint Site"),
+ PortField(name="driveId", type="str", required=False, description="Drive ID"),
+ PortField(name="filePath", type="str", required=False, description="Dateipfad"),
+ PortField(name="fileName", type="str", required=False, description="Dateiname"),
+ PortField(name="label", type="str", required=False, description="Kurzlabel"),
+ ]),
+ "Document": PortSchema(name="Document", fields=[
+ PortField(name="id", type="str", required=False, description="Dokument-/Datei-ID"),
+ PortField(name="name", type="str", required=False, description="Anzeigename"),
+ PortField(name="mimeType", type="str", required=False, description="MIME-Typ"),
+ PortField(name="sizeBytes", type="int", required=False, description="Grösse"),
+ PortField(name="downloadUrl", type="str", required=False, description="Download-URL"),
+ PortField(name="filePath", type="str", required=False, description="Logischer Pfad"),
+ ]),
+ "FileItem": PortSchema(name="FileItem", fields=[
+ PortField(name="id", type="str", required=False, description="Datei-ID"),
+ PortField(name="name", type="str", required=False, description="Name"),
+ PortField(name="path", type="str", required=False, description="Pfad"),
+ PortField(name="mimeType", type="str", required=False, description="MIME"),
+ PortField(name="sizeBytes", type="int", required=False, description="Grösse"),
+ ]),
+ "EmailItem": PortSchema(name="EmailItem", fields=[
+ PortField(name="id", type="str", required=False, description="Message-ID"),
+ PortField(name="subject", type="str", required=False, description="Betreff"),
+ PortField(name="fromAddress", type="str", required=False, description="Absender"),
+ PortField(name="toAddresses", type="List[str]", required=False, description="Empfänger"),
+ PortField(name="receivedAt", type="str", required=False, description="Empfangen am"),
+ PortField(name="hasAttachments", type="bool", required=False, description="Hat Anhänge"),
+ PortField(name="bodyPreview", type="str", required=False, description="Vorschau"),
+ ]),
+ "TaskItem": PortSchema(name="TaskItem", fields=[
+ PortField(name="id", type="str", required=False, description="Task-ID"),
+ PortField(name="title", type="str", required=False, description="Titel"),
+ PortField(name="status", type="str", required=False, description="Status"),
+ PortField(name="assignee", type="str", required=False, description="Assignee"),
+ PortField(name="dueDate", type="str", required=False, description="Fälligkeit"),
+ PortField(name="listId", type="str", required=False, description="ClickUp-Liste"),
+ ]),
+ "QueryResult": PortSchema(name="QueryResult", fields=[
+ PortField(name="rows", type="List[Any]", description="Ergebniszeilen"),
+ PortField(name="columns", type="List[str]", required=False, description="Spaltennamen"),
+ PortField(name="count", type="int", required=False, description="Zeilenanzahl"),
+ ]),
+ "UdmPage": PortSchema(name="UdmPage", fields=[
+ PortField(name="pageNumber", type="int", required=False, description="Seitennummer"),
+ PortField(name="blocks", type="List[Any]", required=False, description="ContentBlocks"),
+ ]),
+ "UdmBlock": PortSchema(name="UdmBlock", fields=[
+ PortField(name="kind", type="str", required=False, description="Block-Typ"),
+ PortField(name="text", type="str", required=False, description="Textinhalt"),
+ PortField(name="children", type="List[Any]", required=False, description="Unterblöcke"),
+ ]),
"DocumentList": PortSchema(name="DocumentList", fields=[
PortField(name="documents", type="List[Document]",
description="Dokumentenliste"),
+ PortField(name="connection", type="ConnectionRef", required=False,
+ description="Verbindung, mit der die Liste erzeugt wurde"),
+ PortField(name="source", type="SharePointFolderRef", required=False,
+ description="Herkunftsordner / Quelle"),
+ PortField(name="count", type="int", required=False,
+ description="Anzahl Dokumente"),
]),
"FileList": PortSchema(name="FileList", fields=[
- PortField(name="files", type="List[File]",
+ PortField(name="files", type="List[FileItem]",
description="Dateiliste"),
+ PortField(name="connection", type="ConnectionRef", required=False,
+ description="Verbindung"),
+ PortField(name="source", type="SharePointFolderRef", required=False,
+ description="Listen-Kontext"),
+ PortField(name="count", type="int", required=False,
+ description="Anzahl Dateien"),
]),
"EmailDraft": PortSchema(name="EmailDraft", fields=[
PortField(name="subject", type="str",
@@ -76,14 +182,26 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
description="CC"),
PortField(name="attachments", type="List[Document]", required=False,
description="Anhänge"),
+ PortField(name="connection", type="ConnectionRef", required=False,
+ description="Outlook-/Graph-Verbindung"),
]),
"EmailList": PortSchema(name="EmailList", fields=[
- PortField(name="emails", type="List[Email]",
+ PortField(name="emails", type="List[EmailItem]",
description="E-Mails"),
+ PortField(name="connection", type="ConnectionRef", required=False,
+ description="Verbindung"),
+ PortField(name="count", type="int", required=False,
+ description="Anzahl"),
]),
"TaskList": PortSchema(name="TaskList", fields=[
- PortField(name="tasks", type="List[Task]",
+ PortField(name="tasks", type="List[TaskItem]",
description="Aufgaben"),
+ PortField(name="connection", type="ConnectionRef", required=False,
+ description="Verbindung"),
+ PortField(name="listId", type="str", required=False,
+ description="ClickUp-Listen-ID"),
+ PortField(name="count", type="int", required=False,
+ description="Anzahl"),
]),
"TaskResult": PortSchema(name="TaskResult", fields=[
PortField(name="success", type="bool",
@@ -143,11 +261,29 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
PortField(name="merged", type="Dict",
description="Zusammengeführte Daten"),
]),
+ "ActionDocument": PortSchema(name="ActionDocument", fields=[
+ PortField(name="documentName", type="str",
+ description="Dokumentname"),
+ PortField(name="documentData", type="Any",
+ description="Inhalt / Rohdaten (z.B. JSON-String, Bytes)"),
+ PortField(name="mimeType", type="str",
+ description="MIME-Typ"),
+ PortField(name="fileId", type="str", required=False,
+ description="Persistierte FileItem.id (vom Engine ergänzt)"),
+ PortField(name="fileName", type="str", required=False,
+ description="Persistierter Dateiname (vom Engine ergänzt)"),
+ ]),
"ActionResult": PortSchema(name="ActionResult", fields=[
PortField(name="success", type="bool",
description="Erfolg"),
PortField(name="error", type="str", required=False,
description="Fehler"),
+ # `documents` is populated for every action that returns ActionResult
+ # (see datamodelChat.ActionResult.documents and actionNodeExecutor.out).
+ # Without it in the catalog the DataPicker cannot offer downstream
+ # bindings like `processDocuments → documents → *` for syncToAccounting.
+ PortField(name="documents", type="List[ActionDocument]", required=False,
+ description="Erzeugte Dokumente (immer befüllt für Trustee/AI/Email/...)"),
PortField(name="data", type="Dict", required=False,
description="Ergebnisdaten"),
]),
@@ -156,7 +292,11 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
PortField(name="id", type="str", description="Dokument-ID"),
PortField(name="sourceType", type="str", description="Quellformat (pdf, docx, …)"),
PortField(name="sourcePath", type="str", description="Quellpfad"),
- PortField(name="children", type="List[Any]", description="StructuralNodes"),
+ PortField(name="children", type="List[Any]", description="StructuralNodes / Seiten"),
+ PortField(name="connection", type="ConnectionRef", required=False,
+ description="Optionale Verbindungsreferenz"),
+ PortField(name="source", type="SharePointFileRef", required=False,
+ description="Optionale Datei-Herkunft"),
]),
"UdmNodeList": PortSchema(name="UdmNodeList", fields=[
PortField(name="nodes", type="List[Any]", description="UDM StructuralNodes oder ContentBlocks"),
@@ -167,9 +307,287 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
PortField(name="mode", type="str", description="Konsolidierungsmodus"),
PortField(name="count", type="int", description="Anzahl verarbeiteter Elemente"),
]),
+
+ # -----------------------------------------------------------------
+ # Shared sub-types (used inside Result schemas)
+ # -----------------------------------------------------------------
+ "ProcessError": PortSchema(name="ProcessError", fields=[
+ PortField(name="documentId", type="str", required=False,
+ description="Betroffenes Dokument (falls zuordbar)"),
+ PortField(name="stage", type="str",
+ description="Pipeline-Stufe: extract | parse | sync | validate | …"),
+ PortField(name="message", type="str", description="Fehlermeldung"),
+ PortField(name="code", type="str", required=False, description="Fehler-Code"),
+ ]),
+ "JournalLine": PortSchema(name="JournalLine", fields=[
+ PortField(name="id", type="str", required=False, description="Buchungszeilen-ID"),
+ PortField(name="bookingDate", type="str", description="Buchungsdatum (ISO)"),
+ PortField(name="account", type="str", description="Konto"),
+ PortField(name="contraAccount", type="str", required=False, description="Gegenkonto"),
+ PortField(name="amount", type="float", description="Betrag"),
+ PortField(name="currency", type="str", required=False, description="Währung"),
+ PortField(name="text", type="str", required=False, description="Buchungstext"),
+ PortField(name="reference", type="str", required=False, description="Beleg-Referenz"),
+ ]),
+
+ # -----------------------------------------------------------------
+ # Trustee Action Results
+ # -----------------------------------------------------------------
+ "TrusteeRefreshResult": PortSchema(name="TrusteeRefreshResult", fields=[
+ PortField(name="syncCounts", type="Dict[str,int]",
+ description="Tabellen → Anzahl synchronisierter Datensätze"),
+ PortField(name="oldestBookingDate", type="str", required=False,
+ description="Ältestes Buchungsdatum (ISO)"),
+ PortField(name="newestBookingDate", type="str", required=False,
+ description="Neuestes Buchungsdatum (ISO)"),
+ PortField(name="durationMs", type="int", required=False,
+ description="Dauer in Millisekunden"),
+ PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
+ description="Trustee-Instanz"),
+ PortField(name="errors", type="List[ProcessError]", required=False,
+ description="Fehler-Liste"),
+ ]),
+ "TrusteeProcessResult": PortSchema(name="TrusteeProcessResult", fields=[
+ PortField(name="documents", type="List[Document]",
+ description="Verarbeitete Dokumente mit angereicherten Daten"),
+ PortField(name="processedCount", type="int", required=False,
+ description="Anzahl erfolgreich verarbeiteter Dokumente"),
+ PortField(name="failedCount", type="int", required=False,
+ description="Anzahl fehlgeschlagener Dokumente"),
+ PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
+ description="Trustee-Instanz"),
+ PortField(name="errors", type="List[ProcessError]", required=False,
+ description="Fehler-Liste"),
+ ]),
+ "TrusteeSyncResult": PortSchema(name="TrusteeSyncResult", fields=[
+ PortField(name="syncedCount", type="int",
+ description="Erfolgreich in das Buchhaltungssystem übertragene Datensätze"),
+ PortField(name="failedCount", type="int", required=False,
+ description="Fehlgeschlagene Übertragungen"),
+ PortField(name="journalLines", type="List[JournalLine]", required=False,
+ description="Erzeugte Buchungszeilen"),
+ PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
+ description="Ziel-Trustee-Instanz"),
+ PortField(name="errors", type="List[ProcessError]", required=False,
+ description="Fehler-Liste"),
+ ]),
+
+ # -----------------------------------------------------------------
+ # Redmine Action Results
+ # -----------------------------------------------------------------
+ "RedmineTicket": PortSchema(name="RedmineTicket", fields=[
+ PortField(name="id", type="str", description="Ticket-ID"),
+ PortField(name="subject", type="str", description="Betreff"),
+ PortField(name="description", type="str", required=False, description="Beschreibung"),
+ PortField(name="status", type="str", description="Status-Name"),
+ PortField(name="tracker", type="str", required=False,
+ description="Tracker (Bug, Feature, Task, …)"),
+ PortField(name="priority", type="str", required=False, description="Priorität"),
+ PortField(name="assignee", type="str", required=False, description="Zugewiesen an"),
+ PortField(name="author", type="str", required=False, description="Autor"),
+ PortField(name="project", type="str", required=False, description="Projekt"),
+ PortField(name="createdOn", type="str", required=False, description="Erstellt (ISO)"),
+ PortField(name="updatedOn", type="str", required=False, description="Aktualisiert (ISO)"),
+ PortField(name="dueDate", type="str", required=False, description="Fälligkeitsdatum"),
+ PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
+ description="Redmine-Instanz"),
+ ]),
+ "RedmineTicketList": PortSchema(name="RedmineTicketList", fields=[
+ PortField(name="tickets", type="List[RedmineTicket]", description="Ticket-Liste"),
+ PortField(name="count", type="int", required=False, description="Anzahl Tickets"),
+ PortField(name="filters", type="Dict[str,Any]", required=False,
+ description="Angewendete Filter"),
+ PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
+ description="Redmine-Instanz"),
+ ]),
+ "RedmineStats": PortSchema(name="RedmineStats", fields=[
+ PortField(name="kpis", type="Dict[str,Any]",
+ description="Key Performance Indicators"),
+ PortField(name="throughput", type="Dict[str,Any]", required=False,
+ description="Durchsatz pro Zeitraum"),
+ PortField(name="statusDistribution", type="Dict[str,int]", required=False,
+ description="Tickets pro Status"),
+ PortField(name="backlog", type="Dict[str,Any]", required=False,
+ description="Backlog-Statistik"),
+ PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
+ description="Redmine-Instanz"),
+ ]),
+
+ # -----------------------------------------------------------------
+ # ClickUp / SharePoint / Email helper results
+ # -----------------------------------------------------------------
+ "TaskAttachmentRef": PortSchema(name="TaskAttachmentRef", fields=[
+ PortField(name="taskId", type="str", description="Aufgaben-ID"),
+ PortField(name="attachmentId", type="str", required=False, description="Attachment-ID"),
+ PortField(name="fileName", type="str", required=False, description="Dateiname"),
+ PortField(name="url", type="str", required=False, description="Download-URL"),
+ ]),
+ "AttachmentSpec": PortSchema(name="AttachmentSpec", fields=[
+ PortField(name="source", type="str",
+ description="Quellart: path | document | url",
+ enumValues=["path", "document", "url"]),
+ PortField(name="ref", type="str",
+ description="Referenzwert (Pfad / Document.id / URL)"),
+ PortField(name="fileName", type="str", required=False,
+ description="Override-Dateiname"),
+ PortField(name="mimeType", type="str", required=False, description="MIME-Override"),
+ ]),
+
+ # -----------------------------------------------------------------
+ # Expressions (replace string-typed condition / cron params)
+ # -----------------------------------------------------------------
+ "CronExpression": PortSchema(name="CronExpression", fields=[
+ PortField(name="expression", type="str",
+ description="Cron-Ausdruck (5 oder 6 Felder)"),
+ PortField(name="timezone", type="str", required=False,
+ description="IANA Timezone (z.B. Europe/Zurich)"),
+ ]),
+ "ConditionExpression": PortSchema(name="ConditionExpression", fields=[
+ PortField(name="expression", type="str", description="Boolescher Ausdruck"),
+ PortField(name="syntax", type="str", required=False,
+ description="jmespath | jsonlogic | python | template",
+ enumValues=["jmespath", "jsonlogic", "python", "template"]),
+ ]),
+
+ # -----------------------------------------------------------------
+ # Semantic primitives (give meaning to scalar str values)
+ # -----------------------------------------------------------------
+ "DateTime": PortSchema(name="DateTime", fields=[
+ PortField(name="iso", type="str", description="ISO-8601 Datum/Zeit"),
+ PortField(name="timezone", type="str", required=False,
+ description="IANA Timezone"),
+ ]),
+ "Url": PortSchema(name="Url", fields=[
+ PortField(name="url", type="str", description="Vollständige URL"),
+ PortField(name="label", type="str", required=False, description="Anzeigename"),
+ ]),
}
+# ---------------------------------------------------------------------------
+# Catalog validator
+# ---------------------------------------------------------------------------
+
+# Primitives accepted as PortField.type in addition to catalog schema names.
+PRIMITIVE_TYPES: frozenset = frozenset({
+ "str", "int", "bool", "float", "Any", "Dict", "List",
+})
+
+
+def _stripContainer(typeStr: str) -> List[str]:
+ """
+ Extract referenced type names from a PortField.type string.
+
+ Examples:
+ "str" -> ["str"]
+ "List[Document]" -> ["Document"]
+ "Dict[str,Any]" -> ["str", "Any"]
+ "ConnectionRef" -> ["ConnectionRef"]
+ "List[ProcessError]" -> ["ProcessError"]
+ """
+ s = (typeStr or "").strip()
+ if not s:
+ return []
+ if "[" in s and s.endswith("]"):
+ # outer container ignored, inner parts split by comma
+ inner = s[s.index("[") + 1 : -1]
+ parts = [p.strip() for p in inner.split(",") if p.strip()]
+ return parts or [s]
+ return [s]
+
+
+def _isKnownType(typeName: str) -> bool:
+ return typeName in PRIMITIVE_TYPES or typeName in PORT_TYPE_CATALOG
+
+
+def _validateCatalog() -> List[str]:
+ """
+ Validate PORT_TYPE_CATALOG integrity.
+
+ Returns a list of error messages. Empty list means catalog is healthy.
+
+ Checks:
+ 1. Every PortField.type references either a primitive or a known schema.
+ 2. Discriminator fields exist, are typed "str", and at most one per schema.
+ 3. No cyclic references via required schema-typed fields
+ (optional fields may form cycles intentionally, e.g. provenance).
+ 4. Schema name in catalog key matches PortSchema.name.
+ """
+ errors: List[str] = []
+
+ # Check 4: key consistency
+ for key, schema in PORT_TYPE_CATALOG.items():
+ if schema.name != key:
+ errors.append(f"Catalog key '{key}' does not match schema.name '{schema.name}'")
+
+ # Check 1 + 2: type refs and discriminators
+ for schemaName, schema in PORT_TYPE_CATALOG.items():
+ discriminatorCount = 0
+ for field in schema.fields:
+ for refName in _stripContainer(field.type):
+ if not _isKnownType(refName):
+ errors.append(
+ f"{schemaName}.{field.name}: unknown type '{refName}' "
+ f"(not a primitive and not in catalog)"
+ )
+ if field.discriminator:
+ discriminatorCount += 1
+ if field.type != "str":
+ errors.append(
+ f"{schemaName}.{field.name}: discriminator must be 'str', got '{field.type}'"
+ )
+ if discriminatorCount > 1:
+ errors.append(
+ f"{schemaName}: has {discriminatorCount} discriminator fields, max 1 allowed"
+ )
+
+ # Check 3: cycles via required schema-typed fields
+ def _requiredSchemaRefs(name: str) -> List[str]:
+ sch = PORT_TYPE_CATALOG.get(name)
+ if not sch:
+ return []
+ out: List[str] = []
+ for field in sch.fields:
+ if not field.required:
+ continue
+ for ref in _stripContainer(field.type):
+ if ref in PORT_TYPE_CATALOG:
+ out.append(ref)
+ return out
+
+ def _hasCycle(start: str) -> Optional[List[str]]:
+ stack: List[str] = [start]
+ path: List[str] = []
+ visiting: set = set()
+
+ def _dfs(name: str) -> Optional[List[str]]:
+ if name in visiting:
+ return path + [name]
+ visiting.add(name)
+ path.append(name)
+ for ref in _requiredSchemaRefs(name):
+ if ref == start and len(path) > 0:
+ return path + [ref]
+ cycle = _dfs(ref)
+ if cycle:
+ return cycle
+ path.pop()
+ visiting.discard(name)
+ return None
+
+ return _dfs(start)
+
+ for schemaName in PORT_TYPE_CATALOG.keys():
+ cycle = _hasCycle(schemaName)
+ if cycle and cycle[0] == schemaName:
+ errors.append(
+ f"{schemaName}: cyclic required-ref chain: {' -> '.join(cycle)}"
+ )
+ break # one cycle is enough — avoid spamming
+
+ return errors
+
+
# ---------------------------------------------------------------------------
# SYSTEM_VARIABLES
# ---------------------------------------------------------------------------
@@ -259,6 +677,8 @@ def _defaultForType(typeStr: str) -> Any:
return 0
if typeStr == "str":
return ""
+ if typeStr in PORT_TYPE_CATALOG:
+ return {}
return None
@@ -272,210 +692,6 @@ def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]:
return result
-# ---------------------------------------------------------------------------
-# Input extractors (one per input port type)
-# ---------------------------------------------------------------------------
-
-def _extractEmailDraft(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract EmailDraft fields from upstream output."""
- result = {}
- if upstream.get("responseData") and isinstance(upstream["responseData"], dict):
- rd = upstream["responseData"]
- for key in ("subject", "body", "to", "cc"):
- if key in rd:
- result[key] = rd[key]
- if not result:
- for key in ("subject", "body", "to", "cc"):
- if key in upstream:
- result[key] = upstream[key]
- return result
-
-
-def _extractDocuments(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract documents from upstream output."""
- docs = upstream.get("documents") or upstream.get("documentList") or []
- if not docs and isinstance(upstream.get("data"), dict):
- docs = upstream["data"].get("documents") or upstream["data"].get("documentList") or []
- # input.upload format
- if not docs:
- files = upstream.get("files") or []
- fileObj = upstream.get("file")
- fileIds = upstream.get("fileIds") or []
- if fileObj:
- docs = [fileObj]
- elif files:
- docs = files
- elif fileIds:
- docs = [{"validationMetadata": {"fileId": fid}} for fid in fileIds]
- normalized = docs if isinstance(docs, list) else [docs]
- return {"documents": normalized, "documentList": normalized} if docs else {}
-
-
-def _extractText(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract text from upstream output."""
- text = upstream.get("text") or upstream.get("response") or upstream.get("context") or ""
- if not text and upstream.get("payload"):
- import json
- payload = upstream["payload"]
- text = json.dumps(payload, ensure_ascii=False) if isinstance(payload, dict) else str(payload)
- return {"text": str(text)} if text else {}
-
-
-def _extractEmailList(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract email list from upstream output."""
- emails = upstream.get("emails") or []
- if not emails:
- docs = upstream.get("documents") or upstream.get("documentList") or []
- if docs:
- import json
- for doc in docs:
- raw = doc.get("documentData") if isinstance(doc, dict) else None
- if raw:
- try:
- data = json.loads(raw) if isinstance(raw, str) else raw
- if isinstance(data, dict):
- found = (data.get("emails", {}).get("emails", [])
- or data.get("searchResults", {}).get("results", []))
- if found:
- emails = found
- break
- except (json.JSONDecodeError, TypeError):
- pass
- return {"emails": emails} if emails else {}
-
-
-def _extractTaskList(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract task list from upstream output."""
- tasks = upstream.get("tasks") or []
- if not tasks:
- docs = upstream.get("documents") or upstream.get("documentList") or []
- if docs:
- import json
- for doc in docs:
- raw = doc.get("documentData") if isinstance(doc, dict) else None
- if raw:
- try:
- data = json.loads(raw) if isinstance(raw, str) else raw
- if isinstance(data, dict) and "tasks" in data:
- tasks = data["tasks"]
- break
- except (json.JSONDecodeError, TypeError):
- pass
- return {"tasks": tasks} if tasks else {}
-
-
-def _extractFileList(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract file list from upstream output."""
- files = upstream.get("files") or []
- return {"files": files} if files else {}
-
-
-def _extractFormPayload(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract form payload from upstream output."""
- payload = upstream.get("payload")
- if payload and isinstance(payload, dict):
- return {"payload": payload}
- return {}
-
-
-def _extractAiResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract AI result fields from upstream output."""
- result = {}
- for key in ("prompt", "response", "responseData", "context", "documents"):
- if key in upstream:
- result[key] = upstream[key]
- return result
-
-
-def _extractBoolResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract bool result from upstream output."""
- result = upstream.get("result")
- if isinstance(result, bool):
- return {"result": result, "reason": upstream.get("reason", "")}
- approved = upstream.get("approved")
- if isinstance(approved, bool):
- return {"result": approved, "reason": upstream.get("reason", "")}
- return {}
-
-
-def _extractTaskResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract task result from upstream output."""
- result = {}
- if "taskId" in upstream:
- result["taskId"] = upstream["taskId"]
- if "task" in upstream:
- result["task"] = upstream["task"]
- elif "clickupTask" in upstream:
- result["task"] = upstream["clickupTask"]
- if "success" in upstream:
- result["success"] = upstream["success"]
- return result
-
-
-def _extractAggregateResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract aggregate result from upstream output."""
- items = upstream.get("items") or []
- return {"items": items, "count": len(items)}
-
-
-def _extractMergeResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract merge result from upstream output."""
- return {
- "inputs": upstream.get("inputs", {}),
- "first": upstream.get("first"),
- "merged": upstream.get("merged", {}),
- }
-
-
-def _extractUdmDocument(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract UdmDocument fields from upstream output."""
- if upstream.get("children") is not None and upstream.get("sourceType"):
- return upstream
- udm = upstream.get("udm")
- if isinstance(udm, dict) and udm.get("children") is not None:
- return udm
- return {}
-
-
-def _extractUdmNodeList(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract UdmNodeList fields from upstream output."""
- nodes = upstream.get("nodes")
- if isinstance(nodes, list):
- return {"nodes": nodes, "count": len(nodes)}
- children = upstream.get("children")
- if isinstance(children, list):
- return {"nodes": children, "count": len(children)}
- return {}
-
-
-def _extractConsolidateResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
- """Extract ConsolidateResult fields from upstream output."""
- result = {}
- for key in ("result", "mode", "count"):
- if key in upstream:
- result[key] = upstream[key]
- return result
-
-
-INPUT_EXTRACTORS: Dict[str, Callable] = {
- "EmailDraft": _extractEmailDraft,
- "DocumentList": _extractDocuments,
- "TextResult": _extractText,
- "EmailList": _extractEmailList,
- "TaskList": _extractTaskList,
- "FileList": _extractFileList,
- "FormPayload": _extractFormPayload,
- "AiResult": _extractAiResult,
- "BoolResult": _extractBoolResult,
- "TaskResult": _extractTaskResult,
- "AggregateResult": _extractAggregateResult,
- "MergeResult": _extractMergeResult,
- "UdmDocument": _extractUdmDocument,
- "UdmNodeList": _extractUdmNodeList,
- "ConsolidateResult": _extractConsolidateResult,
-}
-
-
# ---------------------------------------------------------------------------
# Transit helpers
# ---------------------------------------------------------------------------
@@ -522,27 +738,83 @@ def _resolveTransitChain(
# Schema derivation for dynamic outputs
# ---------------------------------------------------------------------------
-def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
- """Derive output schema from form field definitions."""
- fields_param = (node.get("parameters") or {}).get("fields")
+def _derive_form_payload_schema_from_param(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]:
+ """Derive output schema from a field-builder JSON list (``fields``, ``formFields``, …)."""
+ fields_param = (node.get("parameters") or {}).get(param_key)
if not fields_param or not isinstance(fields_param, list):
return None
- portFields = []
+ portFields: List[PortField] = []
+
+ def _append_field(fname: str, ftype: Any, lab: Any, required: bool) -> None:
+ _desc = resolveText(lab) if lab is not None else fname
+ if not str(_desc).strip():
+ _desc = fname
+ portFields.append(PortField(
+ name=fname,
+ type=str(ftype) if ftype is not None else "str",
+ description=_desc,
+ required=required,
+ ))
+
for f in fields_param:
- if isinstance(f, dict) and f.get("name"):
- _lab = f.get("label")
- _desc = resolveText(_lab) if _lab is not None else f["name"]
- if not _desc.strip():
- _desc = f["name"]
- portFields.append(PortField(
- name=f["name"],
- type=f.get("type", "str"),
- description=_desc,
- required=f.get("required", False),
- ))
+ if not isinstance(f, dict) or not f.get("name"):
+ continue
+ fname = str(f["name"])
+ if str(f.get("type", "")).lower() == "group" and isinstance(f.get("fields"), list):
+ for sub in f["fields"]:
+ if isinstance(sub, dict) and sub.get("name"):
+ _append_field(
+ f"{fname}.{sub['name']}",
+ sub.get("type", "str"),
+ sub.get("label"),
+ bool(sub.get("required", False)),
+ )
+ continue
+ _append_field(fname, f.get("type", "str"), f.get("label"), bool(f.get("required", False)))
return PortSchema(name="FormPayload_dynamic", fields=portFields) if portFields else None
+def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
+ """Derive output schema from form field definitions (``parameters.fields``)."""
+ return _derive_form_payload_schema_from_param(node, "fields")
+
+
+def parse_graph_defined_output_schema(
+ node: Dict[str, Any],
+ output_port: Dict[str, Any],
+) -> Optional[PortSchema]:
+ """
+ Resolve a node's output port to a concrete PortSchema.
+
+ Supports:
+ - Static catalog name: ``schema: "ActionResult"``
+ - Graph-defined: ``schema: {"kind": "fromGraph", "parameter": "fields"}``
+ - Legacy: ``dynamic`` + ``deriveFrom`` on the port dict.
+ """
+ if not isinstance(output_port, dict):
+ return None
+ schema_spec = output_port.get("schema")
+ if isinstance(schema_spec, dict) and schema_spec.get("kind") == "fromGraph":
+ param_key = str(schema_spec.get("parameter") or "fields")
+ return _derive_form_payload_schema_from_param(node, param_key)
+ if output_port.get("dynamic") and output_port.get("deriveFrom"):
+ return _derive_form_payload_schema_from_param(node, str(output_port.get("deriveFrom")))
+ if isinstance(schema_spec, str) and schema_spec:
+ return PORT_TYPE_CATALOG.get(schema_spec)
+ return None
+
+
+def resolve_output_schema_name(node: Dict[str, Any], output_port: Dict[str, Any]) -> str:
+ """Return a schema name for port compatibility / path listing."""
+ derived = parse_graph_defined_output_schema(node, output_port)
+ if derived:
+ return derived.name
+ spec = output_port.get("schema") if isinstance(output_port, dict) else None
+ if isinstance(spec, str) and spec:
+ return spec
+ return "Any"
+
+
def _deriveTransformSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
"""Derive output schema from transform mappings."""
mappings = (node.get("parameters") or {}).get("mappings")
diff --git a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
index 11d9d3e9..4332df50 100644
--- a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
+++ b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
@@ -26,6 +26,7 @@ from modules.workflows.automation2.runEnvelope import (
normalize_run_envelope,
)
from modules.features.graphicalEditor.entryPoints import find_invocation
+from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths
from modules.shared.i18nRegistry import apiRouteContext, resolveText
routeApiMsg = apiRouteContext("routeFeatureGraphicalEditor")
@@ -135,6 +136,48 @@ def get_node_types(
return result
+@router.post("/{instanceId}/upstream-paths")
+@limiter.limit("60/minute")
+def post_upstream_paths(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ body: Dict[str, Any] = Body(...),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Return pickable upstream DataRef paths for a node (draft graph in body)."""
+ _validateInstanceAccess(instanceId, context)
+ graph = body.get("graph")
+ node_id = body.get("nodeId")
+ if not isinstance(graph, dict) or not node_id:
+ raise HTTPException(status_code=400, detail=routeApiMsg("graph and nodeId are required"))
+ paths = compute_upstream_paths(graph, str(node_id))
+ return {"paths": paths}
+
+
+@router.get("/{instanceId}/upstream-paths/{node_id}")
+@limiter.limit("60/minute")
+def get_upstream_paths_saved(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ node_id: str = Path(..., description="Target node id"),
+ workflowId: str = Query(..., description="Workflow id whose saved graph is used"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Return upstream paths using the persisted workflow graph (same payload as POST variant)."""
+ mandate_id = _validateInstanceAccess(instanceId, context)
+ if not workflowId:
+ raise HTTPException(status_code=400, detail=routeApiMsg("workflowId is required"))
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+
+ iface = getGraphicalEditorInterface(context.user, mandate_id, featureInstanceId=instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+ graph = wf.get("graph") or {}
+ paths = compute_upstream_paths(graph if isinstance(graph, dict) else {}, str(node_id))
+ return {"paths": paths}
+
+
@router.get("/{instanceId}/options/user.connection")
@limiter.limit("60/minute")
def get_user_connection_options(
@@ -813,6 +856,7 @@ async def _runEditorAgent(
"\n\nAvailable tools (all valid — use whichever the user's intent calls for):"
"\n Graph-mutating: readWorkflowGraph, listAvailableNodeTypes, "
"describeNodeType, addNode, removeNode, connectNodes, setNodeParameter, "
+ "listUpstreamPaths, bindNodeParameter, "
"autoLayoutWorkflow, validateGraph."
"\n Workflow lifecycle: createWorkflow (new empty workflow), "
"updateWorkflowMetadata (rename / change description / tags / activate), "
@@ -844,6 +888,8 @@ async def _runEditorAgent(
"description, sane defaults, or — for required user-connection fields — "
"an actual connectionId). Do NOT pass position; the layout step handles it."
"\n6. connectNodes — wire the nodes consistent with port schemas from describeNodeType."
+ "\n6b. When a parameter must take data from an upstream node, call listUpstreamPaths(nodeId=target) "
+ "then bindNodeParameter(producerNodeId, path, parameterName) — do not rely on implicit wire fill."
"\n7. autoLayoutWorkflow — call exactly once as the LAST graph-mutating step so the "
"canvas shows a readable top-down layout instead of overlapping boxes."
"\n8. validateGraph — sanity check, then answer the user."
diff --git a/modules/features/graphicalEditor/upstreamPathsService.py b/modules/features/graphicalEditor/upstreamPathsService.py
new file mode 100644
index 00000000..8075fd00
--- /dev/null
+++ b/modules/features/graphicalEditor/upstreamPathsService.py
@@ -0,0 +1,128 @@
+# Copyright (c) 2025 Patrick Motsch
+"""Compute pickable upstream paths for DataPicker / AI workflow tools."""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Set
+
+from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, PortSchema, parse_graph_defined_output_schema
+from modules.workflows.automation2.graphUtils import buildConnectionMap
+
+_NODE_BY_TYPE = {n["id"]: n for n in STATIC_NODE_TYPES}
+
+
+def _paths_for_port_schema(schema: PortSchema, producer_node_id: str) -> List[Dict[str, Any]]:
+ out: List[Dict[str, Any]] = []
+ for field in schema.fields:
+ path = [field.name]
+ out.append(
+ {
+ "producerNodeId": producer_node_id,
+ "path": path,
+ "type": field.type,
+ "label": ".".join(str(p) for p in path),
+ "scopeOrigin": "data",
+ }
+ )
+ out.append(
+ {
+ "producerNodeId": producer_node_id,
+ "path": [],
+ "type": schema.name,
+ "label": "(whole output)",
+ "scopeOrigin": "data",
+ }
+ )
+ return out
+
+
+def _paths_for_schema(schema_name: str, producer_node_id: str) -> List[Dict[str, Any]]:
+ if not schema_name or schema_name == "Transit":
+ return []
+ schema = PORT_TYPE_CATALOG.get(schema_name)
+ if not schema:
+ return []
+ return _paths_for_port_schema(schema, producer_node_id)
+
+
+def compute_upstream_paths(graph: Dict[str, Any], target_node_id: str) -> List[Dict[str, Any]]:
+ """
+ Return flattened first-level paths for every ancestor node's primary output schema.
+ """
+ nodes = graph.get("nodes") or []
+ connections = graph.get("connections") or []
+ node_by_id = {n["id"]: n for n in nodes if n.get("id")}
+ if target_node_id not in node_by_id:
+ return []
+
+ conn_map = buildConnectionMap(connections)
+ # predecessors: walk backwards along edges (target -> source)
+ preds: Dict[str, Set[str]] = {}
+ for tgt, pairs in conn_map.items():
+ for src, _, _ in pairs:
+ preds.setdefault(tgt, set()).add(src)
+
+ seen: Set[str] = set()
+ stack = [target_node_id]
+ ancestors: Set[str] = set()
+ while stack:
+ cur = stack.pop()
+ for p in preds.get(cur, ()):
+ if p not in seen:
+ seen.add(p)
+ ancestors.add(p)
+ stack.append(p)
+
+ paths: List[Dict[str, Any]] = []
+ for aid in sorted(ancestors):
+ anode = node_by_id.get(aid)
+ if not anode:
+ continue
+ nt = anode.get("type", "")
+ ndef = _NODE_BY_TYPE.get(nt)
+ if not ndef:
+ continue
+ out0 = (ndef.get("outputPorts") or {}).get(0, {})
+ derived = parse_graph_defined_output_schema(anode, out0 if isinstance(out0, dict) else {})
+ if derived:
+ for entry in _paths_for_port_schema(derived, aid):
+ entry["producerLabel"] = (anode.get("title") or "").strip() or aid
+ paths.append(entry)
+ else:
+ raw_schema = out0.get("schema") if isinstance(out0, dict) else None
+ schema_name = raw_schema if isinstance(raw_schema, str) and raw_schema else "ActionResult"
+ for entry in _paths_for_schema(schema_name, aid):
+ entry["producerLabel"] = (anode.get("title") or "").strip() or aid
+ paths.append(entry)
+
+ # Lexical loop hints (flow.loop): any loop node in ancestors adds synthetic paths
+ for aid in ancestors:
+ anode = node_by_id.get(aid) or {}
+ if anode.get("type") == "flow.loop":
+ paths.extend(
+ [
+ {
+ "producerNodeId": aid,
+ "path": ["currentItem"],
+ "type": "Any",
+ "label": "loop.currentItem",
+ "scopeOrigin": "loop",
+ },
+ {
+ "producerNodeId": aid,
+ "path": ["currentIndex"],
+ "type": "int",
+ "label": "loop.currentIndex",
+ "scopeOrigin": "loop",
+ },
+ {
+ "producerNodeId": aid,
+ "path": ["count"],
+ "type": "int",
+ "label": "loop.count",
+ "scopeOrigin": "loop",
+ },
+ ]
+ )
+
+ return paths
diff --git a/modules/features/teamsbot/datamodelTeamsbot.py b/modules/features/teamsbot/datamodelTeamsbot.py
index f19b4c6c..76c9fb83 100644
--- a/modules/features/teamsbot/datamodelTeamsbot.py
+++ b/modules/features/teamsbot/datamodelTeamsbot.py
@@ -4,7 +4,8 @@
Teamsbot Feature - Data Models.
Pydantic models for Teams Bot sessions, transcripts, bot responses, and configuration.
"""
-from typing import Optional, List, Dict, Any
+from typing import Optional, List, Dict, Any, Literal
+from datetime import datetime, timezone
from pydantic import BaseModel, Field
from enum import Enum
import uuid
@@ -12,6 +13,14 @@ import uuid
from modules.datamodels.datamodelBase import PowerOnModel
+# ============================================================================
+# Director Prompt Limits
+# ============================================================================
+
+DIRECTOR_PROMPT_TEXT_LIMIT = 8000
+DIRECTOR_PROMPT_FILE_LIMIT = 10
+
+
# ============================================================================
# Enums
# ============================================================================
@@ -267,6 +276,56 @@ class SpeechTeamsResponse(BaseModel):
reasoning: str = Field(default="", description="Reasoning for the decision (for logging/debug)")
detectedIntent: str = Field(default="none", description="Detected intent: addressed, question, proactive, stop, none")
commands: Optional[List[TeamsbotCommand]] = Field(default=None, description="Optional list of commands to execute (e.g. toggle transcript, send chat, change language)")
+ needsAgent: bool = Field(default=False, description="If True, escalate to agentService.runAgent for complex multi-step processing (web research, mail, etc.)")
+ agentReason: Optional[str] = Field(default=None, description="Why escalation to the full agent is required (used as task brief for the agent)")
+
+
+# ============================================================================
+# Director Prompts (private operator instructions sent during a live meeting)
+# ============================================================================
+
+class TeamsbotDirectorPromptStatus(str, Enum):
+ """Lifecycle status of a Director Prompt."""
+ QUEUED = "queued"
+ RUNNING = "running"
+ SUCCEEDED = "succeeded"
+ FAILED = "failed"
+ CONSUMED = "consumed" # one-shot consumed; persistent prompts stay active
+
+
+class TeamsbotDirectorPromptMode(str, Enum):
+ """How long a Director Prompt remains effective."""
+ ONE_SHOT = "oneShot"
+ PERSISTENT = "persistent"
+
+
+class TeamsbotDirectorPrompt(PowerOnModel):
+ """A private operator instruction injected into the bot during a live meeting.
+
+ Stored in PostgreSQL so it survives reconnects (persistent prompts) and is
+ auditable. Visible only to the session owner via SSE; invisible to other
+ meeting participants.
+ """
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Director prompt ID")
+ sessionId: str = Field(description="Teams Bot session ID (FK)")
+ instanceId: str = Field(description="Feature instance ID (FK)")
+ operatorUserId: str = Field(description="User ID of the operator who issued the prompt")
+ text: str = Field(description="The director instruction text", max_length=DIRECTOR_PROMPT_TEXT_LIMIT)
+ mode: TeamsbotDirectorPromptMode = Field(default=TeamsbotDirectorPromptMode.ONE_SHOT, description="oneShot or persistent")
+ fileIds: List[str] = Field(default_factory=list, description="UDB-selected file/object IDs to attach as RAG context")
+ status: TeamsbotDirectorPromptStatus = Field(default=TeamsbotDirectorPromptStatus.QUEUED, description="Lifecycle status")
+ statusMessage: Optional[str] = Field(default=None, description="Optional error or status detail")
+ createdAt: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat(), description="ISO timestamp when created")
+ consumedAt: Optional[str] = Field(default=None, description="ISO timestamp when consumed (one-shot) or marked done")
+ agentRunId: Optional[str] = Field(default=None, description="Reference to the agent run that processed this prompt")
+ responseText: Optional[str] = Field(default=None, description="Final agent text delivered to the meeting")
+
+
+class TeamsbotDirectorPromptCreateRequest(BaseModel):
+ """Request body for submitting a new Director Prompt."""
+ text: str = Field(description="Director instruction text", min_length=1, max_length=DIRECTOR_PROMPT_TEXT_LIMIT)
+ mode: TeamsbotDirectorPromptMode = Field(default=TeamsbotDirectorPromptMode.ONE_SHOT, description="oneShot or persistent")
+ fileIds: List[str] = Field(default_factory=list, description="UDB file IDs to attach (max 10)")
# ============================================================================
diff --git a/modules/features/teamsbot/interfaceFeatureTeamsbot.py b/modules/features/teamsbot/interfaceFeatureTeamsbot.py
index 5395d922..2408e4cb 100644
--- a/modules/features/teamsbot/interfaceFeatureTeamsbot.py
+++ b/modules/features/teamsbot/interfaceFeatureTeamsbot.py
@@ -21,6 +21,9 @@ from .datamodelTeamsbot import (
TeamsbotSystemBot,
TeamsbotUserSettings,
TeamsbotUserAccount,
+ TeamsbotDirectorPrompt,
+ TeamsbotDirectorPromptStatus,
+ TeamsbotDirectorPromptMode,
)
logger = logging.getLogger(__name__)
@@ -114,11 +117,10 @@ class TeamsbotObjects:
return self.db.recordModify(TeamsbotSession, sessionId, updates)
def deleteSession(self, sessionId: str) -> bool:
- """Delete a session and all related transcripts and responses."""
- # Delete related records first
+ """Delete a session and all related transcripts, responses and director prompts."""
self._deleteTranscriptsBySession(sessionId)
self._deleteResponsesBySession(sessionId)
- # Delete session
+ self._deletePromptsBySession(sessionId)
return self.db.recordDelete(TeamsbotSession, sessionId)
# =========================================================================
@@ -272,6 +274,62 @@ class TeamsbotObjects:
"""Delete saved MS credentials."""
return self.db.recordDelete(TeamsbotUserAccount, accountId)
+ # =========================================================================
+ # Director Prompts (private operator instructions during a live meeting)
+ # =========================================================================
+
+ def createDirectorPrompt(self, promptData: Dict[str, Any]) -> Dict[str, Any]:
+ """Create a new director prompt record."""
+ return self.db.recordCreate(TeamsbotDirectorPrompt, promptData)
+
+ def getDirectorPrompt(self, promptId: str) -> Optional[Dict[str, Any]]:
+ """Get a single director prompt by ID."""
+ records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter={"id": promptId})
+ return records[0] if records else None
+
+ def getDirectorPrompts(self, sessionId: str, operatorUserId: str | None = None) -> List[Dict[str, Any]]:
+ """Get all director prompts for a session, optionally filtered by operator."""
+ recordFilter: Dict[str, Any] = {"sessionId": sessionId}
+ if operatorUserId:
+ recordFilter["operatorUserId"] = operatorUserId
+ records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter=recordFilter)
+ records.sort(key=lambda r: r.get("createdAt") or "")
+ return records
+
+ def getActivePersistentPrompts(self, sessionId: str) -> List[Dict[str, Any]]:
+ """Get persistent prompts that are still active (not consumed/failed) for a session."""
+ records = self.db.getRecordset(
+ TeamsbotDirectorPrompt,
+ recordFilter={
+ "sessionId": sessionId,
+ "mode": TeamsbotDirectorPromptMode.PERSISTENT.value,
+ },
+ )
+ terminal = {
+ TeamsbotDirectorPromptStatus.CONSUMED.value,
+ TeamsbotDirectorPromptStatus.FAILED.value,
+ }
+ active = [r for r in records if r.get("status") not in terminal]
+ active.sort(key=lambda r: r.get("createdAt") or "")
+ return active
+
+ def updateDirectorPrompt(self, promptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
+ """Update a director prompt (status, response text, etc.)."""
+ return self.db.recordModify(TeamsbotDirectorPrompt, promptId, updates)
+
+ def deleteDirectorPrompt(self, promptId: str) -> bool:
+ """Delete a director prompt (e.g. when operator removes a persistent prompt)."""
+ return self.db.recordDelete(TeamsbotDirectorPrompt, promptId)
+
+ def _deletePromptsBySession(self, sessionId: str) -> int:
+ """Delete all director prompts for a session (called from deleteSession)."""
+ records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter={"sessionId": sessionId})
+ count = 0
+ for record in records:
+ self.db.recordDelete(TeamsbotDirectorPrompt, record.get("id"))
+ count += 1
+ return count
+
# =========================================================================
# Stats / Aggregation
# =========================================================================
diff --git a/modules/features/teamsbot/routeFeatureTeamsbot.py b/modules/features/teamsbot/routeFeatureTeamsbot.py
index e5ed9425..37cb2d77 100644
--- a/modules/features/teamsbot/routeFeatureTeamsbot.py
+++ b/modules/features/teamsbot/routeFeatureTeamsbot.py
@@ -36,6 +36,11 @@ from .datamodelTeamsbot import (
TeamsbotUserAccount,
TeamsbotResponseChannel,
TeamsbotResponseMode,
+ TeamsbotDirectorPromptCreateRequest,
+ TeamsbotDirectorPromptMode,
+ TeamsbotDirectorPromptStatus,
+ DIRECTOR_PROMPT_FILE_LIMIT,
+ DIRECTOR_PROMPT_TEXT_LIMIT,
)
# Import service
@@ -382,7 +387,12 @@ async def streamSession(
# Send initial session state
yield f"data: {json.dumps({'type': 'sessionState', 'data': session})}\n\n"
-
+
+ # Send current bot WebSocket connection state so the operator UI can
+ # render the live indicator without waiting for the next connect/disconnect.
+ from .service import getActiveService as _getActiveService
+ yield f"data: {json.dumps({'type': 'botConnectionState', 'data': {'connected': _getActiveService(sessionId) is not None}})}\n\n"
+
# Stream events
eventQueue = _sessionEvents.get(sessionId)
if not eventQueue:
@@ -832,6 +842,132 @@ async def submitMfaCode(
raise HTTPException(status_code=404, detail=routeApiMsg("No active MFA challenge for this session"))
+# =========================================================================
+# Director Prompts (private operator instructions during a live meeting)
+# =========================================================================
+
+@router.post("/{instanceId}/sessions/{sessionId}/directorPrompts")
+@limiter.limit("30/minute")
+async def submitDirectorPrompt(
+ request: Request,
+ instanceId: str,
+ sessionId: str,
+ body: TeamsbotDirectorPromptCreateRequest,
+ context: RequestContext = Depends(getRequestContext),
+):
+ """Submit a private director prompt to the running bot. Triggers the
+ full agent path (web, mail, RAG, etc.) and delivers the answer into the
+ meeting via TTS + chat. Only the session owner can submit prompts."""
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+
+ session = interface.getSession(sessionId)
+ if not session:
+ raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found")
+ _validateSessionOwnership(session, context)
+
+ if session.get("status") not in (
+ TeamsbotSessionStatus.ACTIVE.value,
+ TeamsbotSessionStatus.JOINING.value,
+ ):
+ raise HTTPException(status_code=400, detail=routeApiMsg("Session is not active"))
+
+ text = (body.text or "").strip()
+ if not text:
+ raise HTTPException(status_code=400, detail=routeApiMsg("Prompt text is required"))
+ if len(text) > DIRECTOR_PROMPT_TEXT_LIMIT:
+ raise HTTPException(
+ status_code=400,
+ detail=routeApiMsg(f"Prompt text exceeds limit of {DIRECTOR_PROMPT_TEXT_LIMIT} characters"),
+ )
+ fileIds = list(body.fileIds or [])
+ if len(fileIds) > DIRECTOR_PROMPT_FILE_LIMIT:
+ raise HTTPException(
+ status_code=400,
+ detail=routeApiMsg(f"Too many files ({len(fileIds)}); max {DIRECTOR_PROMPT_FILE_LIMIT}"),
+ )
+
+ from .service import getActiveService
+ service = getActiveService(sessionId)
+ if not service:
+ raise HTTPException(
+ status_code=409,
+ detail=routeApiMsg(
+ "Bot is not yet live in the meeting (no WebSocket connection). "
+ "Wait until the bot status indicator turns green and try again."
+ ),
+ )
+
+ created = await service.submitDirectorPrompt(
+ sessionId=sessionId,
+ operatorUserId=str(context.user.id),
+ text=text,
+ mode=body.mode,
+ fileIds=fileIds,
+ )
+ return {"prompt": created}
+
+
+@router.get("/{instanceId}/sessions/{sessionId}/directorPrompts")
+@limiter.limit("30/minute")
+async def listDirectorPrompts(
+ request: Request,
+ instanceId: str,
+ sessionId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ """List director prompts for a session (only operator's own prompts)."""
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+
+ session = interface.getSession(sessionId)
+ if not session:
+ raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found")
+ _validateSessionOwnership(session, context)
+
+ operatorUserId = None if context.isPlatformAdmin else str(context.user.id)
+ prompts = interface.getDirectorPrompts(sessionId, operatorUserId=operatorUserId)
+ return {"prompts": prompts}
+
+
+@router.delete("/{instanceId}/sessions/{sessionId}/directorPrompts/{promptId}")
+@limiter.limit("30/minute")
+async def deleteDirectorPrompt(
+ request: Request,
+ instanceId: str,
+ sessionId: str,
+ promptId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ """Remove a (typically persistent) director prompt. Marks it consumed so
+ it no longer influences the bot. The DB record is kept for audit."""
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+
+ session = interface.getSession(sessionId)
+ if not session:
+ raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found")
+ _validateSessionOwnership(session, context)
+
+ prompt = interface.getDirectorPrompt(promptId)
+ if not prompt or prompt.get("sessionId") != sessionId:
+ raise HTTPException(status_code=404, detail=f"Prompt '{promptId}' not found")
+ if not context.isPlatformAdmin and prompt.get("operatorUserId") != str(context.user.id):
+ raise HTTPException(status_code=404, detail=f"Prompt '{promptId}' not found")
+
+ from .service import getActiveService
+ service = getActiveService(sessionId)
+ if service:
+ await service.removePersistentPrompt(promptId)
+ else:
+ # Bot not connected: mark consumed directly
+ interface.updateDirectorPrompt(promptId, {
+ "status": TeamsbotDirectorPromptStatus.CONSUMED.value,
+ "statusMessage": "Removed by operator (bot offline)",
+ })
+ return {"deleted": True, "promptId": promptId}
+
+
# =========================================================================
# Voice Test Endpoint
# =========================================================================
@@ -845,7 +981,7 @@ async def testVoice(
):
"""Test TTS voice with AI-generated sample text in the correct language."""
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- from modules.serviceCenter.services.serviceAi.mainServiceAi import AiService
+ from .service import _createAiService
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
mandateId = _validateInstanceAccess(instanceId, context)
@@ -856,12 +992,7 @@ async def testVoice(
botName = body.get("botName", "AI Assistant")
try:
- # Generate test text dynamically via AI in the correct language
- serviceContext = type('Ctx', (), {
- 'user': context.user, 'mandateId': mandateId,
- 'featureInstanceId': instanceId, 'featureCode': 'teamsbot'
- })()
- aiService = AiService(serviceCenter=serviceContext)
+ aiService = _createAiService(context.user, mandateId, instanceId)
await aiService.ensureAiObjectsInitialized()
aiRequest = AiCallRequest(
diff --git a/modules/features/teamsbot/service.py b/modules/features/teamsbot/service.py
index 9e59f653..2067a7f2 100644
--- a/modules/features/teamsbot/service.py
+++ b/modules/features/teamsbot/service.py
@@ -7,10 +7,11 @@ Manages the audio processing pipeline: STT -> Context Buffer -> SPEECH_TEAMS ->
import logging
import json
+import re
import asyncio
import time
import base64
-from typing import Optional, Dict, Any, List
+from typing import Optional, Dict, Any, List, Callable
from fastapi import WebSocket
@@ -28,13 +29,504 @@ from .datamodelTeamsbot import (
TeamsbotConfig,
TeamsbotResponseMode,
TeamsbotResponseChannel,
+ TeamsbotDetectedIntent,
SpeechTeamsResponse,
TeamsbotCommand,
+ TeamsbotDirectorPrompt,
+ TeamsbotDirectorPromptStatus,
+ TeamsbotDirectorPromptMode,
+ DIRECTOR_PROMPT_TEXT_LIMIT,
+ DIRECTOR_PROMPT_FILE_LIMIT,
)
from .browserBotConnector import BrowserBotConnector
logger = logging.getLogger(__name__)
+# Agent run limits for director prompts / speech escalation (meeting context).
+# Higher than default workspace agent: Teams research + tool chains need depth.
+TEAMSBOT_AGENT_MAX_ROUNDS = 8
+TEAMSBOT_AGENT_MAX_COST_CHF = 0.12
+
+# How many recent director-prompt briefings (one-shot + persistent) we keep in
+# session memory so SPEECH_TEAMS triggers and speech escalation can still see
+# the operator's attached files + analysis after the prompt itself was consumed.
+_RECENT_DIRECTOR_BRIEFINGS_MAX = 6
+
+# Quick-ack ("Moment...") UX: fire a SHORT TTS the moment the bot's name is
+# detected so the speaker hears within ~1s that the bot reacted, instead of
+# waiting for the full debounce + SPEECH_TEAMS + agent pipeline (~5-30s).
+# Throttled per session to avoid acking every fragment of a long utterance.
+_QUICK_ACK_MIN_INTERVAL_SEC = 25.0
+
+# Number of phrase variants we generate per kind (rotated round-robin so back-
+# to-back acks/notices don't sound identical).
+_EPHEMERAL_PHRASE_VARIANTS = 4
+
+# Localisation INTENTS for ephemeral phrases. Each kind describes WHAT the
+# phrase should express; the actual wording is produced at runtime by the AI
+# in the bot's configured language + persona. The intent text below is the
+# instruction passed to the LLM (English, since it's a model directive — the
+# OUTPUT will be in the configured spoken language). Add new ephemeral phrase
+# kinds here, never inline string literals at the call site.
+_EPHEMERAL_PHRASE_INTENTS: Dict[str, str] = {
+ "quickAck": (
+ "Very short verbal acknowledgment (1 to 4 words) the assistant says "
+ "the moment its name is recognised, BEFORE it has formulated a full "
+ "answer. The intent is purely 'I heard you, I'm thinking' — natural, "
+ "conversational, never a complete sentence."
+ ),
+ "agentBusy": (
+ "One short sentence (max ~12 words) the assistant says BEFORE starting "
+ "a longer research / tool-call task, so the audience knows the answer "
+ "will take a few seconds. Polite, professional, calm."
+ ),
+ "agentRound": (
+ "One short sentence (max ~14 words) the assistant says BETWEEN rounds "
+ "of a longer agent task to signal that work is still in progress. "
+ "Include the placeholder tokens '{round}' and '{maxRounds}' so the "
+ "caller can substitute the actual numbers — e.g. 'Step {round} of "
+ "{maxRounds}, still working.'"
+ ),
+}
+
+
+def _voiceLineLooksLikeBillingOrMeta(line: str) -> bool:
+ """Heuristic: trailing lines that are separators or billing/usage footers."""
+ s = line.strip()
+ if not s:
+ return True
+ lower = s.lower()
+ if re.match(r"^[-=*_]{3,}\s*$", s):
+ return True
+ if re.match(r"^#{1,6}\s*(usage|billing|costs?|meta|technical|statistics)\b", lower):
+ return True
+ if "chf" in lower and re.search(r"\d", s):
+ if re.search(
+ r"\b(total|usage|cost|billing|token|spent|used|price|estimate|"
+ r"rounds?|calls?|duration|processing\s*time|model\s*calls?)\b",
+ lower,
+ ):
+ return True
+ if "token" in lower and re.search(r"\d", s):
+ if re.search(r"\b(total|usage|prompt|completion)\b", lower):
+ return True
+ pl = lower.replace(" ", "")
+ if "progressafter" in pl and ("aicalls:" in pl or "toolcalls:" in pl):
+ return True
+ return False
+
+
+_EMOJI_PATTERN = re.compile(
+ "["
+ "\U0001F300-\U0001FAFF" # symbols & pictographs, emoticons, transport, supplemental
+ "\U00002600-\U000027BF" # misc symbols + dingbats (incl. ⚙ 🔐 🔌 ✓ ✗)
+ "\U0001F1E6-\U0001F1FF" # regional indicator (flags)
+ "\U00002B00-\U00002BFF" # arrows, geometric
+ "\U0001F900-\U0001F9FF" # supplemental symbols (incl. 🤖 🧠)
+ "\U0000FE0F" # variation selector-16 (emoji presentation)
+ "]+",
+ flags=re.UNICODE,
+)
+
+
+def _voiceFriendlyMeetingText(raw: str) -> str:
+ """Sanitise a chat/markdown response so it can be SPOKEN naturally.
+
+ Aggressive cleanup — when a TTS engine reads raw markdown out loud the
+ listener hears "hash hash hash Zusammenfassung pipe pipe pipe", which
+ is unbearable in a meeting. The chat / DB / UI keep the original text;
+ only the audio path goes through this sanitiser.
+
+ What we strip:
+ * Code fences and inline code
+ * Markdown emphasis (**bold**, *italic*, __bold__, _italic_)
+ * Markdown links → keep label
+ * Headings (# .. ######)
+ * Markdown tables (any line with two or more pipes is dropped wholesale)
+ * Horizontal rules (---, ***, ___ on their own line)
+ * Bullet markers (-, *, •, ·) and numbered list markers (1., 2)) at line start
+ * Emojis (full Unicode pictograph ranges + variation selector)
+ * Decorative trailing colons on bullet headings
+ * Stray pipes left over from inline tables
+ * Trailing billing / "maximum rounds reached" / "budget exceeded" footers
+
+ Whitespace is then collapsed to single spaces.
+ """
+ if not raw:
+ return ""
+
+ # Trim trailing operator/billing footers BEFORE any structural rewrite
+ # so we don't waste effort sanitising a footer that gets dropped.
+ low = raw.lower()
+ if "maximum rounds reached" in low:
+ m = re.search(r"(?is)maximum\s+rounds\s+reached", raw)
+ if m:
+ head = raw[: m.start()].strip()
+ raw = head or (
+ "Die Abklaerung brauchte mehr Schritte als vorgesehen; Details stehen im Chat."
+ )
+ if "budget exceeded" in low:
+ m = re.search(r"(?is)budget\s+exceeded", raw)
+ if m:
+ head = raw[: m.start()].strip()
+ raw = head or "Das eingestellte Kostenlimit ist erreicht; Details stehen im Chat."
+
+ lines = raw.strip().split("\n")
+ while lines and _voiceLineLooksLikeBillingOrMeta(lines[-1]):
+ lines.pop()
+ t = "\n".join(lines).strip()
+ if not t:
+ t = raw.strip()
+
+ # 1) Strip code blocks (multi-line first, then inline)
+ t = re.sub(r"```[\s\S]*?```", " ", t)
+ t = re.sub(r"`([^`]+)`", r"\1", t)
+
+ # 2) Drop markdown table rows (any line with two or more pipes) and the
+ # separator lines they come with (|---|---|). A paragraph that just
+ # happens to contain ONE pipe survives.
+ cleanedLines: List[str] = []
+ for ln in t.split("\n"):
+ stripped = ln.strip()
+ if stripped.count("|") >= 2:
+ continue
+ if re.fullmatch(r"\s*\|?[\s\-:|]+\|?\s*", stripped) and "-" in stripped:
+ continue
+ cleanedLines.append(ln)
+ t = "\n".join(cleanedLines)
+
+ # 3) Drop horizontal rule lines (---, ***, ___, with optional spaces)
+ t = re.sub(r"(?m)^\s*([-*_])\s*\1\s*\1[\s\1]*$", "", t)
+
+ # 4) Headings: drop the leading hashes
+ t = re.sub(r"(?m)^\s*#{1,6}\s+", "", t)
+
+ # 5) Bullet markers at line start — keep the content, drop the bullet
+ t = re.sub(r"(?m)^\s*[-*•·]\s+", "", t)
+ # 6) Numbered list markers at line start ("1.", "2)", "3 -")
+ t = re.sub(r"(?m)^\s*\d+[\.\)]\s+", "", t)
+
+ # 7) Emphasis markers (after bullets so a "**Bold:**" heading is handled)
+ t = re.sub(r"\*\*([^*]+)\*\*", r"\1", t)
+ t = re.sub(r"\*([^*\n]+)\*", r"\1", t)
+ t = re.sub(r"__([^_]+)__", r"\1", t)
+ t = re.sub(r"(?` `{` `}` `[` `]` `(` `)`
+ # `_` `&` `@` `$` `%` `` -- replaced with a space so word
+ # boundaries are preserved.
+ t = re.sub(r"[*#~^=+|\\<>{}\[\]()_&@$%`]+", " ", t)
+
+ # 10e) Drop ASCII double-quote (single quotes are legitimate apostrophes
+ # in contractions like "don't" / "geht's", so we keep U+0027).
+ t = t.replace('"', "")
+
+ # 10f) Slash between letters/digits — TTS reads "slash". Replace with
+ # " or " for readability when it separates words like "und/oder".
+ t = re.sub(r"(?<=\w)\s*/\s*(?=\w)", " oder ", t)
+ # Any remaining stray slash is just whitespace.
+ t = t.replace("/", " ")
+
+ # 10g) Trim multiple punctuation runs ("...!!!" → "..." / "!" / etc.)
+ t = re.sub(r"([\.,;:!\?])\1{1,}", r"\1", t)
+ # Remove orphan punctuation directly preceded by whitespace
+ # (common after symbol stripping: " , ", " . ").
+ t = re.sub(r"\s+([\.,;:!\?])", r"\1", t)
+ # Collapse trailing colon at end of meaningful phrase to a period for
+ # nicer cadence ("Was ist PowerOn:" → "Was ist PowerOn.").
+ t = re.sub(r":\s*$", ".", t.rstrip())
+ # 10h) Collapse " :" tail of MULTI-LINE blocks the same way.
+ t = re.sub(r"\s+:\s*$", ":", t, flags=re.MULTILINE)
+
+ # 11) Collapse whitespace to single spaces; protect sentence breaks by
+ # turning paragraph blanks into a period if the previous chunk
+ # didn't already terminate.
+ paragraphs = [p.strip() for p in re.split(r"\n\s*\n", t) if p.strip()]
+ rebuilt: List[str] = []
+ for p in paragraphs:
+ p = re.sub(r"\s+", " ", p).strip()
+ if not p:
+ continue
+ if not re.search(r"[\.!\?\u2026:]\s*$", p):
+ p = p.rstrip() + "."
+ rebuilt.append(p)
+ t = " ".join(rebuilt)
+ t = re.sub(r"\s+", " ", t).strip()
+
+ # If we sanitised away everything (e.g. the input was *only* a markdown
+ # table or a wall of pictographs) return empty — the caller (TTS / voice
+ # summary) treats empty as "nothing to say", which is the safe default.
+ # Falling back to raw markdown here would leak the very symbols we just
+ # spent ten passes removing.
+ return t
+
+
+# Google Cloud TTS rejects single sentences that exceed ~5000 bytes. The Chirp3
+# voices are stricter: long, comma-heavy sentences (no terminating punctuation)
+# also fail with "Sentence ... is too long". We chunk well below the documented
+# limit AND inject sentence terminators so the synthesizer accepts every chunk.
+_TTS_MAX_CHUNK_CHARS = 800
+
+
+def _splitTextForTts(text: str, maxChars: int = _TTS_MAX_CHUNK_CHARS) -> List[str]:
+ """Split a long voice line into TTS-safe chunks at sentence/paragraph boundaries.
+
+ The result preserves order and contains no empty strings. A single
+ sentence longer than ``maxChars`` is hard-cut at word boundaries.
+ """
+ cleaned = (text or "").strip()
+ if not cleaned:
+ return []
+ if len(cleaned) <= maxChars:
+ return [cleaned]
+
+ sentencePattern = re.compile(r"(?<=[\.!\?\u2026])\s+|\n+")
+ rawSentences = [s.strip() for s in sentencePattern.split(cleaned) if s and s.strip()]
+ if not rawSentences:
+ rawSentences = [cleaned]
+
+ chunks: List[str] = []
+ buffer = ""
+ for sentence in rawSentences:
+ if len(sentence) > maxChars:
+ if buffer:
+ chunks.append(buffer.strip())
+ buffer = ""
+ words = sentence.split(" ")
+ current = ""
+ for word in words:
+ candidate = (current + " " + word).strip() if current else word
+ if len(candidate) > maxChars and current:
+ chunks.append(current.strip())
+ current = word
+ else:
+ current = candidate
+ if current:
+ if not re.search(r"[\.!\?\u2026]\s*$", current):
+ current = current.rstrip() + "."
+ chunks.append(current.strip())
+ continue
+
+ candidate = (buffer + " " + sentence).strip() if buffer else sentence
+ if len(candidate) > maxChars and buffer:
+ chunks.append(buffer.strip())
+ buffer = sentence
+ else:
+ buffer = candidate
+
+ if buffer:
+ chunks.append(buffer.strip())
+
+ finalized: List[str] = []
+ for c in chunks:
+ if not c:
+ continue
+ if not re.search(r"[\.!\?\u2026]\s*$", c):
+ c = c.rstrip() + "."
+ finalized.append(c)
+ return finalized
+
+
+async def _speakTextChunked(
+ websocket: Optional[WebSocket],
+ voiceInterface: Any,
+ sessionId: str,
+ voiceText: str,
+ languageCode: str,
+ voiceName: Optional[str],
+ isCancelled: Optional[Callable[[], bool]] = None,
+) -> Dict[str, Any]:
+ """Run TTS in chunks and dispatch each ``playAudio`` over the websocket.
+
+ Returns ``{"success": bool, "chunks": int, "played": int, "error": Optional[str], "cancelled": bool}``.
+ Failure for one chunk does NOT abort the rest; partial playback still
+ counts as ``success=True`` so the caller can decide whether to add a chat
+ fallback for the missing parts.
+
+ ``isCancelled`` is an optional zero-arg predicate the caller passes in to
+ signal "abort the remaining chunks". It is checked BEFORE each Google
+ TTS round-trip and again BEFORE each websocket send, so a stop word in
+ the meeting can interrupt a multi-chunk dispatch within at most one
+ chunk boundary instead of waiting for the whole answer to finish.
+ """
+ chunks = _splitTextForTts(voiceText)
+ result: Dict[str, Any] = {"success": False, "chunks": len(chunks), "played": 0, "error": None, "cancelled": False}
+ if not chunks:
+ result["error"] = "no text"
+ return result
+ if voiceInterface is None:
+ result["error"] = "no voice interface"
+ return result
+
+ lastError: Optional[str] = None
+ for idx, chunk in enumerate(chunks, start=1):
+ if isCancelled is not None and isCancelled():
+ result["cancelled"] = True
+ logger.info(
+ f"Session {sessionId}: TTS chunk loop cancelled before chunk "
+ f"{idx}/{len(chunks)} (user stop or newer answer in flight)"
+ )
+ break
+ try:
+ ttsResult = await voiceInterface.textToSpeech(
+ text=chunk,
+ languageCode=languageCode,
+ voiceName=voiceName,
+ )
+ except Exception as ttsErr: # pragma: no cover - network/runtime errors
+ lastError = f"chunk {idx}/{len(chunks)} raised: {ttsErr}"
+ logger.warning(f"Session {sessionId}: TTS {lastError}")
+ continue
+
+ if not isinstance(ttsResult, dict) or ttsResult.get("success") is False:
+ err = (ttsResult or {}).get("error", "unknown") if isinstance(ttsResult, dict) else "no result"
+ lastError = f"chunk {idx}/{len(chunks)} failed: {err}"
+ logger.warning(f"Session {sessionId}: TTS {lastError}")
+ continue
+
+ audioContent = ttsResult.get("audioContent")
+ if not audioContent:
+ lastError = f"chunk {idx}/{len(chunks)} returned no audioContent"
+ logger.warning(f"Session {sessionId}: TTS {lastError}")
+ continue
+
+ if websocket is None:
+ lastError = "websocket unavailable"
+ break
+
+ if isCancelled is not None and isCancelled():
+ result["cancelled"] = True
+ logger.info(
+ f"Session {sessionId}: TTS chunk loop cancelled before "
+ f"sending chunk {idx}/{len(chunks)} (audio dropped)"
+ )
+ break
+
+ try:
+ await websocket.send_text(json.dumps({
+ "type": "playAudio",
+ "sessionId": sessionId,
+ "audio": {
+ "data": base64.b64encode(
+ audioContent if isinstance(audioContent, bytes) else audioContent.encode()
+ ).decode(),
+ "format": "mp3",
+ },
+ }))
+ result["played"] += 1
+ except Exception as wsErr: # pragma: no cover - websocket failures
+ lastError = f"chunk {idx}/{len(chunks)} websocket send failed: {wsErr}"
+ logger.warning(f"Session {sessionId}: TTS {lastError}")
+ break
+
+ result["success"] = result["played"] > 0
+ if lastError:
+ result["error"] = lastError
+ return result
+
+
+def _coercePersistedDetectedIntent(raw: Optional[str]) -> tuple:
+ """Map free-form intent labels (e.g. agent:directorPrompt) to TeamsbotDetectedIntent
+ for DB persistence; return (enum, meta_suffix_or_None for reasoning)."""
+ if not raw or not str(raw).strip():
+ return TeamsbotDetectedIntent.NONE, None
+ s = str(raw).strip().lower()
+ for member in TeamsbotDetectedIntent:
+ if member.value == s:
+ return member, None
+ if s.startswith("agent:"):
+ return TeamsbotDetectedIntent.PROACTIVE, str(raw).strip()[:120]
+ return TeamsbotDetectedIntent.NONE, str(raw).strip()[:120]
+
+
+# Director prompts are PRIVATE operator instructions — they must NOT be echoed
+# verbatim into the meeting. The agent is asked to start its FINAL answer with
+# either ``MEETING_REPLY:`` (followed by the text actually meant for the meeting)
+# or ``SILENT:`` / ``INTERNAL_ONLY:`` (followed by an internal note for the
+# operator UI). Anything else → treat as silent (safe default).
+_DIRECTOR_REPLY_PATTERN = re.compile(
+ r"^\s*(MEETING_REPLY|MEETING|REPLY|SAY|SPEAK)\s*:\s*",
+ re.IGNORECASE,
+)
+_DIRECTOR_SILENT_PATTERN = re.compile(
+ r"^\s*(SILENT|INTERNAL(?:_ONLY)?|NOTE|NO_MEETING_OUTPUT|ACK(?:NOWLEDGE)?)\s*:\s*",
+ re.IGNORECASE,
+)
+
+
+def _parseDirectorPromptFinal(finalText: str) -> Dict[str, Any]:
+ """Parse the agent's final answer for a director prompt.
+
+ Returns ``{"kind": "meeting"|"silent", "meetingText": str, "internalNote": str}``.
+
+ Default is ``silent`` so unmarked replies are NOT broadcast into the meeting.
+ """
+ text = (finalText or "").strip()
+ if not text:
+ return {"kind": "silent", "meetingText": "", "internalNote": ""}
+
+ meetingMatch = _DIRECTOR_REPLY_PATTERN.match(text)
+ if meetingMatch:
+ body = text[meetingMatch.end():].strip()
+ return {"kind": "meeting", "meetingText": body, "internalNote": ""}
+
+ silentMatch = _DIRECTOR_SILENT_PATTERN.match(text)
+ if silentMatch:
+ body = text[silentMatch.end():].strip()
+ return {"kind": "silent", "meetingText": "", "internalNote": body}
+
+ # No marker → safe default: do NOT spam the meeting with the agent's
+ # internal reasoning. Keep the full text as an internal note for the
+ # operator UI so nothing is lost.
+ return {"kind": "silent", "meetingText": "", "internalNote": text}
+
+
+# =========================================================================
+# Active Service Registry (sessionId -> running TeamsbotService instance)
+#
+# Required so HTTP endpoints (e.g. director-prompt POST) can reach the
+# TeamsbotService instance currently holding the live websocket + voice
+# interface for that session, without going through the websocket loop.
+# =========================================================================
+_activeServices: Dict[str, "TeamsbotService"] = {}
+
+
+def getActiveService(sessionId: str) -> Optional["TeamsbotService"]:
+ """Return the running TeamsbotService for a session, or None if not active."""
+ return _activeServices.get(sessionId)
+
# =========================================================================
# AI Service Factory (for billing-aware AI calls)
@@ -65,6 +557,25 @@ async def _emitSessionEvent(sessionId: str, eventType: str, data: Any):
await _sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getIsoTimestamp()})
+def _normalizeGatewayHostForBotWs(host: str) -> str:
+ """Use IPv4 loopback for local dev WebSocket URLs passed to the Node browser-bot.
+
+ Node on Windows often resolves ``localhost`` to ``::1`` first; Uvicorn bound to
+ ``0.0.0.0`` typically accepts IPv4 only, so the bot gets ``ECONNREFUSED ::1``.
+ """
+ h = host.strip()
+ lower = h.lower()
+ if lower == "localhost":
+ return "127.0.0.1"
+ if lower.startswith("localhost:"):
+ return "127.0.0.1" + h[len("localhost"):]
+ if lower.startswith("[::1]:"):
+ return "127.0.0.1" + h.partition("]")[2]
+ if lower in ("[::1]", "::1"):
+ return "127.0.0.1"
+ return h
+
+
class TeamsbotService:
"""
Pipeline Orchestrator for Teams Bot sessions.
@@ -102,6 +613,75 @@ class TeamsbotService:
self._pendingNameTrigger: Optional[Dict[str, Any]] = None
self._followUpWindowEnd: float = 0.0
+ # Quick-ack throttle (timestamp of the last short "Moment..." ack we
+ # spoke into the meeting). Without this guard a long sentence with
+ # multiple name mentions would trigger several acks in a row.
+ self._lastQuickAckTs: float = 0.0
+
+ # Session-scoped phrase pool for SHORT ephemeral utterances (quick
+ # acks, "checking..." notices, per-round progress). Lazily populated
+ # by the AI in the bot's configured language + persona — no hardcoded
+ # strings or hardcoded language branching anywhere downstream. Keyed
+ # by the kinds defined in ``_EPHEMERAL_PHRASE_INTENTS``.
+ # * ``self._phrasePool[kind]`` -> list of variants for that kind
+ # * ``self._phrasePoolIdx[kind]`` -> round-robin pointer
+ # Concurrent generation calls for the same kind are serialised by the
+ # lock so we don't spawn duplicate AI requests on a burst.
+ self._phrasePool: Dict[str, List[str]] = {}
+ self._phrasePoolIdx: Dict[str, int] = {}
+ self._phrasePoolLock: asyncio.Lock = asyncio.Lock()
+
+ # Voice pipeline: a single per-session lock that serialises every TTS
+ # dispatch into the meeting. Without it three independent code paths
+ # (SPEECH_TEAMS direct answer, agent escalation final answer, and
+ # operator-driven director prompt) can all reach
+ # ``websocket.send_text({"type": "playAudio", ...})`` at the same time
+ # and the browser bot then plays interleaved chunks — i.e. "two bots
+ # talking over each other" exactly as the operator suspects. Chat
+ # (text) sends are NOT locked: they're cheap and can interleave fine.
+ self._meetingTtsLock: asyncio.Lock = asyncio.Lock()
+ # Generation counter incremented every time we begin producing a NEW
+ # meeting answer OR every time the user issues a hard stop. Any TTS
+ # chunk loop captures the counter value at start; before sending
+ # each chunk to the bot it re-checks the counter and bails out if
+ # it has moved on. This is what makes "Stopp" actually feel
+ # instantaneous: the in-flight TTS dispatch loop drops itself the
+ # moment the next chunk would have been sent, without waiting for
+ # any AI round-trip or extra Google TTS call to come back.
+ self._answerGenerationCounter: int = 0
+ # Tracking handles for cancellable background tasks. Keeping a
+ # reference lets ``_cancelInFlightSpeech`` actually call
+ # ``task.cancel()`` instead of just hoping the task notices the
+ # generation counter has moved on. Cleared in the task's own
+ # ``finally`` block.
+ self._currentEscalationTask: Optional[asyncio.Task] = None
+ self._currentQuickAckTask: Optional[asyncio.Task] = None
+ # Whether an agent escalation task is in flight. Kept separate from
+ # ``_aiAnalysisInProgress`` (which only covers the SPEECH_TEAMS phase)
+ # so a new speech trigger that arrives WHILE the agent is still
+ # researching does not start a parallel SPEECH_TEAMS that would then
+ # answer at the same time as the agent.
+ self._agentEscalationInFlight: bool = False
+
+ # Live transport handles for out-of-band actions (director prompts, agent escalation).
+ # Set in handleBotWebSocket once the bot connects; cleared on disconnect.
+ self._activeSessionId: Optional[str] = None
+ self._websocket: Optional[WebSocket] = None
+ self._voiceInterface = None
+
+ # Persistent director prompts kept in memory for context injection across triggers.
+ # Loaded from DB on (re)connect; mutated by submit/delete director prompt routes.
+ self._activePersistentPrompts: List[Dict[str, Any]] = []
+
+ # Recent director-prompt briefings (one-shot AND persistent) — keeps the
+ # operator's attached files and the agent's internal analysis available
+ # for later SPEECH_TEAMS triggers, even after a one-shot prompt has been
+ # consumed. Without this pool, the bot "forgets" attached docs as soon
+ # as the director prompt finished, and answers later meeting questions
+ # ("summarize the doc") with general babble instead of the file content.
+ # Capped by ``_RECENT_DIRECTOR_BRIEFINGS_MAX`` to bound prompt size.
+ self._recentDirectorBriefings: List[Dict[str, Any]] = []
+
# =========================================================================
# Session Lifecycle
# =========================================================================
@@ -145,6 +725,7 @@ class TeamsbotService:
# gatewayBaseUrl is passed from the route handler (derived from request.base_url)
wsScheme = "wss" if gatewayBaseUrl.startswith("https") else "ws"
gatewayHost = gatewayBaseUrl.replace("https://", "").replace("http://", "").rstrip("/")
+ gatewayHost = _normalizeGatewayHostForBotWs(gatewayHost)
fullGatewayWsUrl = f"{wsScheme}://{gatewayHost}/api/teamsbot/{self.instanceId}/bot/ws/{sessionId}"
hasAuth = bool(botAccountEmail and botAccountPassword)
@@ -262,6 +843,41 @@ class TeamsbotService:
except Exception:
self._botAccountEmail = None
+ # Register the live service so out-of-band callers (director prompts,
+ # agent escalation) can deliver text/audio through this same websocket.
+ self._activeSessionId = sessionId
+ self._websocket = websocket
+ self._voiceInterface = voiceInterface
+ _activeServices[sessionId] = self
+
+ # Notify the operator UI that the bot's WebSocket is now live so the
+ # director-prompt panel can enable its submit button.
+ try:
+ await _emitSessionEvent(sessionId, "botConnectionState", {
+ "connected": True,
+ "timestamp": getIsoTimestamp(),
+ })
+ except Exception:
+ pass
+
+ # Restore active persistent director prompts from DB (survives reconnects).
+ try:
+ self._activePersistentPrompts = interface.getActivePersistentPrompts(sessionId) or []
+ if self._activePersistentPrompts:
+ logger.info(
+ f"Session {sessionId}: Loaded {len(self._activePersistentPrompts)} active persistent director prompt(s)"
+ )
+ except Exception as restoreErr:
+ logger.warning(f"Session {sessionId}: Could not restore persistent director prompts: {restoreErr}")
+ self._activePersistentPrompts = []
+
+ # Pre-warm the ephemeral phrase pool in the background so the first
+ # quick-ack ("Moment...") and interim agent notice don't have to wait
+ # for the AI round-trip. Best-effort: if generation fails, the
+ # corresponding ephemeral cue is silently skipped at runtime — never
+ # falls back to hardcoded language strings.
+ asyncio.create_task(self._warmEphemeralPhrasePool(sessionId))
+
logger.info(f"[WS] Handler started for session {sessionId}")
try:
@@ -339,83 +955,64 @@ class TeamsbotService:
)
elif msgType == "voiceGreeting":
+ # Legacy path: older bot images send a pre-built greeting
+ # text. New bots use ``requestGreeting`` and let the
+ # Gateway own greeting generation.
greetingText = message.get("text", "")
greetingLang = message.get("language", self.config.language)
- logger.info(f"[WS] Voice greeting: text={greetingText[:60]}..., language={greetingLang}")
+ logger.info(
+ f"[WS] Voice greeting (legacy): text={greetingText[:60]}..., language={greetingLang}"
+ )
if greetingText and voiceInterface:
+ await self._dispatchGreetingToMeeting(
+ sessionId=sessionId,
+ greetingText=greetingText,
+ greetingLang=greetingLang,
+ sendToChat=False,
+ interface=interface,
+ voiceInterface=voiceInterface,
+ websocket=websocket,
+ )
+
+ elif msgType == "requestGreeting":
+ # New path: bot just signals "I have joined" — Gateway
+ # generates the greeting text via AI in the configured
+ # language + persona, then dispatches it to BOTH the
+ # meeting chat (sendChatMessage command) and TTS. No
+ # hardcoded language strings on the bot side.
+ requestedLang = (
+ message.get("language") or self.config.language or ""
+ ).strip() or "en-US"
+ botNameHint = (
+ message.get("botName") or self.config.botName or ""
+ ).strip() or self.config.botName
+ logger.info(
+ f"[WS] Greeting request from bot: language={requestedLang}, name={botNameHint}"
+ )
+ if voiceInterface:
try:
- await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
- "status": "requested",
- "hasWebSocket": True,
- "message": "Voice greeting TTS requested",
- "timestamp": getIsoTimestamp(),
- })
- ttsResult = await voiceInterface.textToSpeech(
- text=greetingText,
- languageCode=greetingLang,
- voiceName=self.config.voiceId
+ greetingText = await self._generateGreetingText(
+ requestedLang
)
- if ttsResult and isinstance(ttsResult, dict):
- audioContent = ttsResult.get("audioContent")
- if audioContent:
- await websocket.send_text(json.dumps({
- "type": "playAudio",
- "sessionId": sessionId,
- "audio": {
- "data": base64.b64encode(audioContent if isinstance(audioContent, bytes) else audioContent.encode()).decode(),
- "format": "mp3",
- }
- }))
- logger.info(f"Voice greeting TTS sent for session {sessionId}")
- await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
- "status": "dispatched",
- "hasWebSocket": True,
- "message": "Voice greeting TTS dispatched to bot",
- "timestamp": getIsoTimestamp(),
- })
-
- greetingTranscriptData = TeamsbotTranscript(
+ except Exception as genErr:
+ logger.warning(
+ f"Greeting generation failed for session {sessionId}: {genErr}"
+ )
+ greetingText = ""
+ if greetingText:
+ await self._dispatchGreetingToMeeting(
sessionId=sessionId,
- speaker=self.config.botName,
- text=greetingText,
- timestamp=getIsoTimestamp(),
- confidence=1.0,
- language=greetingLang,
- isFinal=True,
- source="botResponse",
- ).model_dump()
- greetingTranscript = interface.createTranscript(greetingTranscriptData)
-
- self._contextBuffer.append({
- "speaker": self.config.botName,
- "text": greetingText,
- "timestamp": getUtcTimestamp(),
- "source": "botResponse",
- })
- self._lastTranscriptSpeaker = self.config.botName
- self._lastTranscriptText = greetingText
- self._lastTranscriptId = greetingTranscript.get("id")
-
- await _emitSessionEvent(sessionId, "botResponse", {
- "id": greetingTranscript.get("id"),
- "responseText": greetingText,
- "responseType": TeamsbotResponseType.AUDIO.value,
- "detectedIntent": "greeting",
- "reasoning": "Automatic join greeting",
- "timestamp": getIsoTimestamp(),
- })
- await _emitSessionEvent(sessionId, "transcript", {
- "id": greetingTranscript.get("id"),
- "speaker": self.config.botName,
- "text": greetingText,
- "confidence": 1.0,
- "timestamp": getIsoTimestamp(),
- "isContinuation": False,
- "source": "botResponse",
- "speakerResolvedFromHint": False,
- })
- except Exception as ttsErr:
- logger.warning(f"Voice greeting TTS failed for session {sessionId}: {ttsErr}")
+ greetingText=greetingText,
+ greetingLang=requestedLang,
+ sendToChat=True,
+ interface=interface,
+ voiceInterface=voiceInterface,
+ websocket=websocket,
+ )
+ else:
+ logger.warning(
+ f"Session {sessionId}: Skipping greeting — AI generation produced no text"
+ )
elif msgType == "ping":
await websocket.send_text(json.dumps({"type": "pong"}))
@@ -516,6 +1113,19 @@ class TeamsbotService:
except Exception as e:
if "disconnect" not in str(e).lower():
logger.error(f"[WS] Error for session {sessionId}: {type(e).__name__}: {e}")
+ finally:
+ if _activeServices.get(sessionId) is self:
+ _activeServices.pop(sessionId, None)
+ self._websocket = None
+ self._voiceInterface = None
+ self._activeSessionId = None
+ try:
+ await _emitSessionEvent(sessionId, "botConnectionState", {
+ "connected": False,
+ "timestamp": getIsoTimestamp(),
+ })
+ except Exception:
+ pass
logger.info(f"[WS] Handler ended for session {sessionId} after {msgCount} messages")
@@ -723,6 +1333,12 @@ class TeamsbotService:
if isNew:
logger.info(f"Session {sessionId}: Bot name in caption, debounce trigger started")
asyncio.create_task(self._checkPendingNameTrigger())
+ # Fire a short audible "Moment..." in parallel so the
+ # speaker hears the bot react immediately, instead of
+ # waiting for debounce + SPEECH_TEAMS + agent (~5-30s).
+ self._currentQuickAckTask = asyncio.create_task(
+ self._runQuickAck(sessionId)
+ )
return
# Chat history: messages sent before the bot joined the meeting.
@@ -854,10 +1470,23 @@ class TeamsbotService:
if source == "chat" and isBotSpeaker:
return
- # Stop phrases: trigger immediately without debounce (root cause: 3s debounce delayed stop)
+ # Stop phrases: HARD STOP, no AI round-trip. We previously routed
+ # this through ``_analyzeAndRespond`` which spent 1-2 seconds in
+ # the speech LLM just to classify the intent, during which the
+ # current TTS kept playing — and the LLM round-trip would also
+ # produce yet another response that joined the queue. The new
+ # path goes straight to the browser bot's audio cancel and
+ # invalidates everything else in flight.
if self._isStopPhrase(text):
- logger.info(f"Session {sessionId}: Stop phrase detected, triggering analysis immediately")
- await self._analyzeAndRespond(sessionId, interface, voiceInterface, websocket, createdTranscript)
+ logger.info(
+ f"Session {sessionId}: Stop phrase detected ('{text.strip()[:60]}'), "
+ f"hard-cancelling in-flight speech immediately"
+ )
+ await self._cancelInFlightSpeech(
+ sessionId=sessionId,
+ websocket=websocket,
+ reason="userStopPhrase",
+ )
return
# Update activity for any pending debounced trigger
@@ -869,6 +1498,12 @@ class TeamsbotService:
isNew = self._setPendingNameTrigger(sessionId, interface, voiceInterface, websocket, createdTranscript)
if isNew:
asyncio.create_task(self._checkPendingNameTrigger())
+ # Audible early-feedback ack ("Moment...") in parallel — runs
+ # while we still wait the debounce window and SPEECH_TEAMS
+ # decides what to actually answer.
+ self._currentQuickAckTask = asyncio.create_task(
+ self._runQuickAck(sessionId)
+ )
return
# Follow-up window: after a bot response, trigger AI for any human speech
@@ -938,19 +1573,128 @@ class TeamsbotService:
return False
def _isStopPhrase(self, text: str) -> bool:
- """Check if text is a stop command (stop, halt, be quiet, etc.). Triggers immediate analysis."""
+ """Check if text is an immediate-cancel command from the meeting.
+
+ Recognised intents (any language we hear in practice):
+ * Hard stop: stop / stopp / halt / ruhe / stille / arrete / quiet / shut
+ * Pause / wait: warte / wait / moment / pause / hold (hold on)
+ * Silence: sei still / be quiet / shut up / aufhoeren / aufhören / silence
+ Hits trigger the direct stop pipeline in ``_cancelInFlightSpeech``:
+ kill TTS, invalidate pending generations, clear name-trigger debounce.
+ Critically: NO new AI call is fired — the user explicitly asked the
+ bot to be quiet, so the worst thing we could do is generate yet
+ another response on top of the one we just cancelled.
+ """
if not text or len(text.strip()) < 2:
return False
t = text.strip().lower()
words = [w.strip(".,!?:;\"'()[]") for w in t.split() if w.strip()]
wordSet = set(words)
- stopWords = {"stop", "stopp", "halt", "ruhe", "stille", "schweig", "arrete", "quiet", "shut"}
+ stopWords = {
+ # Hard-stop verbs
+ "stop", "stopp", "halt", "ruhe", "stille", "schweig",
+ "arrete", "quiet", "shut", "silence",
+ # Pause / wait verbs (still "be quiet now" semantics)
+ "warte", "wait", "moment", "pause",
+ }
if wordSet & stopWords:
return True
- if "sei still" in t or "be quiet" in t or "shut up" in t or "aufhoeren" in t or "aufhören" in t:
+ if (
+ "sei still" in t
+ or "be quiet" in t
+ or "shut up" in t
+ or "hold on" in t
+ or "aufhoeren" in t
+ or "aufhören" in t
+ ):
return True
return False
+ def _makeAnswerCancelHook(self) -> Callable[[], bool]:
+ """Capture the current ``_answerGenerationCounter`` and return a
+ zero-arg predicate that returns ``True`` once a hard stop (or any
+ future "supersede this answer" event) has bumped the counter.
+
+ Pass the returned predicate as ``isCancelled`` into
+ ``_speakTextChunked`` so a multi-chunk dispatch can bail out
+ between chunks instead of speaking a 30-second answer to the end.
+ """
+ snapshot = self._answerGenerationCounter
+ return lambda: self._answerGenerationCounter != snapshot
+
+ async def _cancelInFlightSpeech(
+ self,
+ sessionId: str,
+ websocket: Optional[WebSocket],
+ reason: str,
+ ) -> None:
+ """Hard stop everything the bot is currently doing in the meeting.
+
+ Pipeline (ALL synchronous from the caller's point of view, no AI
+ round-trips):
+
+ 1. Bump ``_answerGenerationCounter`` so any in-flight TTS chunk
+ loop, agent escalation or quick-ack drops its remaining work
+ the moment it next checks the counter.
+ 2. Clear ``_pendingNameTrigger`` so a debounced "speaker just said
+ the bot name" trigger that was queued before the stop word
+ cannot wake up 3 seconds later and answer anyway.
+ 3. Cancel tracked background tasks (escalation, quick-ack). The
+ tasks themselves swallow ``CancelledError`` in their finally
+ block.
+ 4. Send ``{"type":"stopAudio"}`` to the browser bot — it stops the
+ current playback in the AudioContext and clears its play queue
+ so nothing buffered comes through afterwards.
+
+ Deliberately does NOT generate a new response. The user just told
+ the bot to be quiet; producing a "Okay, ich bin still" reply on
+ top would be the exact opposite of what was asked for.
+ """
+ self._answerGenerationCounter += 1
+ gen = self._answerGenerationCounter
+ logger.info(
+ f"Session {sessionId}: Cancelling in-flight speech "
+ f"(reason={reason}, gen={gen})"
+ )
+
+ if self._pendingNameTrigger:
+ logger.info(
+ f"Session {sessionId}: Dropping pending debounced name "
+ f"trigger (was queued before stop)"
+ )
+ self._pendingNameTrigger = None
+
+ for taskAttr in ("_currentEscalationTask", "_currentQuickAckTask"):
+ task = getattr(self, taskAttr, None)
+ if task is not None and not task.done():
+ logger.info(
+ f"Session {sessionId}: Cancelling background task "
+ f"{taskAttr}"
+ )
+ task.cancel()
+
+ if websocket is not None:
+ try:
+ await websocket.send_text(json.dumps({
+ "type": "stopAudio",
+ "sessionId": sessionId,
+ "reason": reason,
+ }))
+ except Exception as stopErr:
+ logger.warning(
+ f"Session {sessionId}: Failed to send stopAudio to "
+ f"browser bot: {stopErr}"
+ )
+
+ try:
+ await _emitSessionEvent(sessionId, "speechCancelled", {
+ "reason": reason,
+ "generation": gen,
+ "timestamp": getIsoTimestamp(),
+ })
+ except Exception:
+ pass
+
def _detectBotName(self, text: str) -> bool:
"""Check if text contains the bot's name (exact or phonetically similar)."""
botNameLower = self.config.botName.lower()
@@ -990,6 +1734,376 @@ class TeamsbotService:
}
return True
+ async def _warmEphemeralPhrasePool(self, sessionId: str) -> None:
+ """Fire-and-forget background task: generate the ephemeral phrase
+ pool for every kind defined in ``_EPHEMERAL_PHRASE_INTENTS`` so the
+ first quick-ack / interim notice doesn't pay the AI round-trip
+ latency at runtime. Failures are logged but never raised — the
+ runtime selectors handle empty pools by silently skipping the cue."""
+ try:
+ for kind in _EPHEMERAL_PHRASE_INTENTS:
+ try:
+ await self._getEphemeralPhrases(kind)
+ except Exception as innerErr:
+ logger.warning(
+ f"Session {sessionId}: Phrase pool warmup failed for "
+ f"kind={kind}: {innerErr}"
+ )
+ except Exception as warmErr:
+ logger.warning(
+ f"Session {sessionId}: Phrase pool warmup task crashed: {warmErr}"
+ )
+
+ # ---------------------------------------------------------------- Voice
+ # When the bot's full answer is a long structured chat post (markdown
+ # tables, bullet lists, headings, multi-paragraph) we MUST NOT read it
+ # out verbatim into the meeting — even after sanitisation it sounds
+ # like a wall of text and easily takes 5+ minutes. The chat keeps the
+ # full answer; the audio path goes through ``_summarizeForVoice`` which
+ # asks the AI for a 1-3 sentence spoken paraphrase in the configured
+ # bot persona / language.
+
+ # Threshold: anything longer than this many characters (after sanitise)
+ # OR any answer whose source contains markdown structure (tables /
+ # multiple bullets / multiple headings) gets condensed before TTS.
+ _VOICE_DIRECT_MAX_CHARS = 600
+ _VOICE_SUMMARY_MAX_CHARS = 350
+
+ @staticmethod
+ def _looksLikeStructuredText(raw: str) -> bool:
+ """Heuristic: does the original answer have markdown structure that
+ would be miserable to listen to verbatim? Used to trigger the
+ AI summary path even when the sanitised text is short enough."""
+ if not raw:
+ return False
+ if raw.count("|") >= 4: # at least one markdown table row
+ return True
+ if raw.count("\n#") >= 1: # at least one heading after newline
+ return True
+ if raw.count("\n- ") + raw.count("\n* ") + raw.count("\n• ") >= 3:
+ return True # 3+ bullets → list-like
+ if re.search(r"\n\d+[\.\)]\s", raw): # numbered list
+ count = len(re.findall(r"(?m)^\s*\d+[\.\)]\s", raw))
+ if count >= 3:
+ return True
+ return False
+
+ async def _summarizeForVoice(
+ self,
+ sessionId: str,
+ rawAnswer: str,
+ ) -> str:
+ """Return a SHORT, naturally-spoken paraphrase of ``rawAnswer`` for
+ TTS playback. Falls back to the sanitised + truncated original if
+ the AI call fails — never blocks the response.
+
+ The chat / DB / UI keep the original ``rawAnswer`` untouched. Only
+ the voice channel goes through this condensation.
+ """
+ if not rawAnswer or not rawAnswer.strip():
+ return ""
+
+ sanitised = _voiceFriendlyMeetingText(rawAnswer)
+ # Short + unstructured → speak as-is, no AI round-trip
+ if (
+ len(sanitised) <= self._VOICE_DIRECT_MAX_CHARS
+ and not self._looksLikeStructuredText(rawAnswer)
+ ):
+ return sanitised
+
+ targetLang = (self.config.language or "de-DE").strip()
+ botName = (self.config.botName or "").strip() or "the assistant"
+ persona = (self.config.aiSystemPrompt or "").strip()
+ personaBlock = (
+ f"\n\nBOT PERSONA / TONE:\n{persona}\n"
+ if persona else ""
+ )
+
+ prompt = (
+ f"You are condensing a long written answer into a SHORT spoken "
+ f"paraphrase that the assistant '{botName}' will say out loud "
+ f"into a Microsoft Teams meeting. The full written answer is "
+ f"already in the meeting chat — your job is to summarise it for "
+ f"the EAR, not the eye.\n\n"
+ f"STRICT REQUIREMENTS:\n"
+ f"1. Output language: BCP-47 '{targetLang}'. No other language.\n"
+ f"2. 1 to 3 sentences, max ~{self._VOICE_SUMMARY_MAX_CHARS} characters total.\n"
+ f"3. Natural spoken style — no headings, no bullet points, no "
+ f"tables, no markdown, no emojis, no enumerations like 'Erstens... "
+ f"Zweitens...' unless that genuinely flows in speech.\n"
+ f"4. Capture the essence and the most important conclusion. Do "
+ f"NOT try to fit every detail. Listeners can read the chat for "
+ f"the full version.\n"
+ f"5. End by gently pointing the audience to the chat for details, "
+ f"e.g. 'Details stehen im Chat.' (adapted to the target language).\n"
+ f"6. Output ONLY the spoken text. No JSON, no quotes around it, "
+ f"no preamble like 'Here is the summary:'.\n"
+ f"{personaBlock}\n"
+ f"FULL WRITTEN ANSWER (markdown-formatted, sometimes long):\n"
+ f"---\n{rawAnswer.strip()[:6000]}\n---\n"
+ )
+
+ try:
+ aiService = _createAiService(
+ self.currentUser, self.mandateId, self.instanceId
+ )
+ await aiService.ensureAiObjectsInitialized()
+ request = AiCallRequest(
+ prompt=prompt,
+ context="",
+ options=AiCallOptions(
+ operationType=OperationTypeEnum.DATA_ANALYSE,
+ priority=PriorityEnum.SPEED,
+ ),
+ )
+ response = await aiService.callAi(request)
+ except Exception as aiErr:
+ logger.warning(
+ f"Session {sessionId}: Voice summary AI call failed: {aiErr}"
+ )
+ return sanitised[: self._VOICE_DIRECT_MAX_CHARS]
+
+ if not response or response.errorCount != 0 or not response.content:
+ logger.warning(
+ f"Session {sessionId}: Voice summary returned empty/error"
+ )
+ return sanitised[: self._VOICE_DIRECT_MAX_CHARS]
+
+ spoken = response.content.strip()
+ # Defensive sanitiser pass — the model usually obeys the
+ # "no markdown" instruction but not always.
+ spoken = _voiceFriendlyMeetingText(spoken)
+ if not spoken:
+ return sanitised[: self._VOICE_DIRECT_MAX_CHARS]
+
+ logger.info(
+ f"Session {sessionId}: Voice summary generated "
+ f"(orig={len(rawAnswer)} chars, sanitised={len(sanitised)}, "
+ f"spoken={len(spoken)})"
+ )
+ return spoken
+
+ async def _pickQuickAckText(self) -> Optional[str]:
+ """Return a short ack text in the bot's configured language. The
+ actual phrases are AI-generated once per session (cached) and rotated
+ round-robin so consecutive acks don't sound identical. Returns
+ ``None`` only if AI generation completely failed and no fallback
+ variant could be produced — in that case the caller silently skips
+ the ack."""
+ return await self._pickEphemeralPhrase("quickAck")
+
+ async def _pickEphemeralPhrase(
+ self,
+ kind: str,
+ substitutions: Optional[Dict[str, Any]] = None,
+ ) -> Optional[str]:
+ """Round-robin selector over the cached phrase pool for ``kind``.
+ Lazily generates the pool on first use. ``substitutions`` is applied
+ to the chosen phrase via ``str.format(**substitutions)`` so kinds
+ like ``agentRound`` can render ``{round}`` / ``{maxRounds}``.
+ Returns ``None`` if no phrases are available."""
+ variants = await self._getEphemeralPhrases(kind)
+ if not variants:
+ return None
+ idx = self._phrasePoolIdx.get(kind, 0) % len(variants)
+ self._phrasePoolIdx[kind] = (idx + 1) % len(variants)
+ chosen = variants[idx]
+ if substitutions:
+ try:
+ chosen = chosen.format(**substitutions)
+ except (KeyError, IndexError, ValueError) as fmtErr:
+ # The AI didn't include the expected placeholder — return the
+ # raw phrase rather than crash. The user still hears something
+ # in the right language; only the numeric hint is missing.
+ logger.debug(
+ f"Ephemeral phrase substitution failed for kind={kind}: {fmtErr}"
+ )
+ return chosen
+
+ async def _getEphemeralPhrases(self, kind: str) -> List[str]:
+ """Return the cached pool of AI-generated variants for ``kind``,
+ generating it on first request. Subsequent calls hit the in-memory
+ cache. Concurrent first-time callers are serialised by the pool lock
+ so only ONE AI request is fired per kind per session."""
+ cached = self._phrasePool.get(kind)
+ if cached:
+ return cached
+ async with self._phrasePoolLock:
+ cached = self._phrasePool.get(kind)
+ if cached:
+ return cached
+ phrases = await self._generateEphemeralPhrases(
+ kind, _EPHEMERAL_PHRASE_VARIANTS
+ )
+ if phrases:
+ self._phrasePool[kind] = phrases
+ return phrases
+
+ async def _generateEphemeralPhrases(
+ self, kind: str, count: int
+ ) -> List[str]:
+ """Ask the AI to produce ``count`` short utterances for ``kind`` in
+ the bot's configured language and persona. Returns ``[]`` on any
+ failure — callers must treat empty as 'silently skip this ephemeral
+ cue', NEVER fall back to a hardcoded localized string."""
+ intent = _EPHEMERAL_PHRASE_INTENTS.get(kind)
+ if not intent:
+ logger.warning(f"Unknown ephemeral phrase kind requested: {kind}")
+ return []
+
+ targetLang = (self.config.language or "").strip() or "en-US"
+ botName = (self.config.botName or "the assistant").strip()
+ persona = (self.config.aiSystemPrompt or "").strip()
+
+ # The prompt is in English on purpose — these are instructions to the
+ # LLM, not user-facing text. The OUTPUT is required to be in
+ # ``targetLang``. We ask for a strict JSON array so parsing is robust.
+ prompt = (
+ f"You are localizing short SPOKEN-LANGUAGE utterances for a "
+ f"meeting assistant named '{botName}'.\n\n"
+ f"Persona / style guide for the assistant:\n"
+ f"{persona or '(no persona configured — use a neutral, polite, professional tone)'}\n\n"
+ f"Target spoken language (BCP-47 code): {targetLang}\n\n"
+ f"Utterance intent:\n{intent}\n\n"
+ f"Generate {count} DIFFERENT variants matching this intent, in "
+ f"the target language. Variants should feel natural when spoken "
+ f"aloud, not robotic. Do NOT include the assistant's name in "
+ f"the variants.\n\n"
+ f"Output STRICTLY a JSON array of {count} plain-text strings, "
+ f"with no markdown fences, no commentary, no surrounding "
+ f"quotation marks beyond the JSON syntax itself. Example "
+ f"format: [\"...\", \"...\", \"...\", \"...\"]"
+ )
+
+ try:
+ aiService = _createAiService(
+ self.currentUser, self.mandateId, self.instanceId
+ )
+ await aiService.ensureAiObjectsInitialized()
+ request = AiCallRequest(
+ prompt=prompt,
+ context="",
+ options=AiCallOptions(
+ operationType=OperationTypeEnum.DATA_ANALYSE,
+ priority=PriorityEnum.SPEED,
+ ),
+ )
+ response = await aiService.callAi(request)
+ except Exception as aiErr:
+ logger.warning(
+ f"Ephemeral phrase generation failed (kind={kind}, lang={targetLang}): {aiErr}"
+ )
+ return []
+
+ if not response or response.errorCount != 0 or not response.content:
+ logger.warning(
+ f"Ephemeral phrase generation returned empty/error "
+ f"(kind={kind}, lang={targetLang})"
+ )
+ return []
+
+ raw = response.content.strip()
+ # Strip optional ```json ... ``` fences before parsing.
+ raw = re.sub(r"^```(?:json)?\s*", "", raw)
+ raw = re.sub(r"\s*```\s*$", "", raw)
+ try:
+ arr = json.loads(raw)
+ except json.JSONDecodeError as parseErr:
+ logger.warning(
+ f"Ephemeral phrase generation: could not parse JSON "
+ f"(kind={kind}, lang={targetLang}): {parseErr} "
+ f"raw={raw[:200]}"
+ )
+ return []
+ if not isinstance(arr, list):
+ return []
+ cleaned = [
+ str(v).strip()
+ for v in arr
+ if isinstance(v, str) and str(v).strip()
+ ]
+ cleaned = cleaned[:count]
+ if cleaned:
+ logger.info(
+ f"Ephemeral phrase pool generated (kind={kind}, "
+ f"lang={targetLang}, count={len(cleaned)})"
+ )
+ return cleaned
+
+ def _shouldFireQuickAck(self) -> bool:
+ """Centralized gate so the call sites stay short and consistent."""
+ now = time.time()
+ if (now - self._lastQuickAckTs) < _QUICK_ACK_MIN_INTERVAL_SEC:
+ return False
+ # If we are already producing a real response, the ack would step on
+ # the actual answer's TTS — skip it. Same for an in-flight agent
+ # escalation: the agent will deliver its own answer (and we already
+ # spoke an interim "moment please" when it started).
+ if self._aiAnalysisInProgress or self._agentEscalationInFlight:
+ return False
+ # Voice channel must be active. Chat-only mode would just spam "...".
+ channelRaw = self.config.responseChannel
+ channelStr = (
+ channelRaw.value if hasattr(channelRaw, "value") else str(channelRaw)
+ ).lower().strip()
+ if channelStr not in ("voice", "both"):
+ return False
+ if self.config.responseMode in (
+ TeamsbotResponseMode.MANUAL,
+ TeamsbotResponseMode.TRANSCRIBE_ONLY,
+ ):
+ return False
+ return True
+
+ async def _runQuickAck(self, sessionId: str) -> None:
+ """Background task: speak the short ack into the meeting via TTS.
+
+ Designed to be fired as ``asyncio.create_task(self._runQuickAck(...))``
+ the moment the bot's name is detected — does not block the regular
+ debounced analysis pipeline. Persists nothing to the DB and emits no
+ botResponse event; this is purely an audio cue ("Moment...") so the
+ speaker hears within ~1s that the bot is reacting.
+ """
+ websocket = self._websocket
+ voiceInterface = self._voiceInterface
+ if websocket is None or voiceInterface is None:
+ return
+ if not self._shouldFireQuickAck():
+ return
+ ackText = await self._pickQuickAckText()
+ if not ackText:
+ return
+ # Mark the throttle BEFORE TTS so two near-simultaneous detections
+ # don't both fire (TTS dispatch can take a few hundred ms).
+ self._lastQuickAckTs = time.time()
+ try:
+ await _emitSessionEvent(sessionId, "quickAck", {
+ "text": ackText,
+ "timestamp": getIsoTimestamp(),
+ })
+ cancelHook = self._makeAnswerCancelHook()
+ async with self._meetingTtsLock:
+ outcome = await _speakTextChunked(
+ websocket=websocket,
+ voiceInterface=voiceInterface,
+ sessionId=sessionId,
+ voiceText=ackText,
+ languageCode=self.config.language,
+ voiceName=self.config.voiceId,
+ isCancelled=cancelHook,
+ )
+ if not outcome.get("success"):
+ logger.info(
+ f"Session {sessionId}: Quick ack TTS failed silently "
+ f"({outcome.get('error')}) — main response will still go through"
+ )
+ except asyncio.CancelledError:
+ logger.info(f"Session {sessionId}: Quick ack cancelled by stop signal")
+ except Exception as ackErr:
+ logger.warning(f"Session {sessionId}: Quick ack failed: {ackErr}")
+ finally:
+ self._currentQuickAckTask = None
+
async def _checkPendingNameTrigger(self, delaySec: float = 3.0):
"""Async loop: fire the pending name trigger once the speaker is quiet."""
await asyncio.sleep(delaySec)
@@ -1032,6 +2146,19 @@ class TeamsbotService:
if self._aiAnalysisInProgress:
logger.info(f"Session {sessionId}: AI analysis already in progress, skipping duplicate trigger")
return
+ # An agent escalation from a previous trigger may still be researching
+ # (it lives in its own task, ``_aiAnalysisInProgress`` was already
+ # released when SPEECH_TEAMS returned). If we let a fresh SPEECH_TEAMS
+ # run now, both pipelines would race to the meeting voice channel and
+ # the operator would hear "two bots talking". Skip until the agent
+ # finishes; the speaker can re-trigger by saying the bot name again
+ # if they have a new question.
+ if self._agentEscalationInFlight:
+ logger.info(
+ f"Session {sessionId}: Agent escalation still in flight — "
+ f"skipping new SPEECH_TEAMS trigger to prevent overlapping replies"
+ )
+ return
self._aiAnalysisInProgress = True
self._lastAiCallTime = time.time()
@@ -1060,7 +2187,11 @@ class TeamsbotService:
if self._contextSummary:
summaryStr = f"\nEARLIER_CONVERSATION_SUMMARY:\n{self._contextSummary}\n"
- transcriptContext = f"BOT_NAME:{self.config.botName}{sessionContextStr}{summaryStr}\nRECENT_TRANSCRIPT:\n" + "\n".join(contextLines)
+ # Persistent director prompts: private operator instructions that stay
+ # in effect across triggers (e.g. "respond in English", "always be brief").
+ directorStr = self._buildPersistentDirectorContext()
+
+ transcriptContext = f"BOT_NAME:{self.config.botName}{sessionContextStr}{summaryStr}{directorStr}\nRECENT_TRANSCRIPT:\n" + "\n".join(contextLines)
# Call SPEECH_TEAMS
try:
@@ -1112,8 +2243,68 @@ class TeamsbotService:
"modelName": response.modelName,
"processingTime": response.processingTime,
"priceCHF": response.priceCHF,
+ "needsAgent": speechResult.needsAgent,
+ "agentReason": speechResult.agentReason,
})
+ # Hybrid routing: SPEECH_TEAMS detected a complex request that
+ # requires the full agent (web research, mail, multi-step). Hand
+ # off to the agent path; do NOT speak the SPEECH_TEAMS placeholder.
+ if speechResult.needsAgent:
+ # Director prompts (persistent + recent one-shot) have already
+ # delivered files to the operator. The escalation agent MUST see
+ # them — otherwise it answers "summarize the doc" with general
+ # babble because the SPEECH_TEAMS prompt itself never had file
+ # access. We also forward the prior agent analysis so the
+ # escalation can build on, not duplicate, the earlier work.
+ briefings = self._collectActiveDirectorBriefings()
+ briefingFileIds = self._collectDirectorFileIds()
+ briefingBlock = ""
+ if briefings:
+ parts = []
+ for b in briefings:
+ seg = f"- ({b.get('mode')}) {b.get('text', '')}".rstrip()
+ if b.get("fileIds"):
+ seg += f"\n attachedFileIds: {', '.join(b['fileIds'])}"
+ if b.get("note"):
+ note = b["note"]
+ seg += (
+ "\n priorAgentAnalysis: "
+ + (note if len(note) <= 800 else note[:800] + "...")
+ )
+ parts.append(seg)
+ briefingBlock = (
+ "\n\nACTIVE_OPERATOR_BRIEFINGS (private; you may read the "
+ "attached files via summarizeContent / readFile / "
+ "readContentObjects to answer the user precisely; do NOT "
+ "quote the directive text itself):\n" + "\n".join(parts)
+ )
+ logger.info(
+ f"Session {sessionId}: SPEECH_TEAMS escalates to agent. "
+ f"Reason: {speechResult.agentReason or speechResult.reasoning} | "
+ f"briefings={len(briefings)}, fileIds={len(briefingFileIds)}"
+ )
+ taskBrief = (
+ (speechResult.agentReason
+ or speechResult.responseText
+ or "Verarbeite die juengste Sprecheranfrage und antworte ins Meeting.")
+ + briefingBlock
+ )
+ # Mark escalation as in-flight BEFORE we create the task so the
+ # ``_aiAnalysisInProgress=False`` released in our finally block
+ # cannot let a competing speech trigger sneak past the gate
+ # before the agent task has even been scheduled.
+ self._agentEscalationInFlight = True
+ self._currentEscalationTask = asyncio.create_task(
+ self._runEscalationAndRelease(
+ sessionId=sessionId,
+ taskBrief=taskBrief,
+ briefingFileIds=briefingFileIds,
+ triggerTranscriptId=triggerTranscript.get("id"),
+ )
+ )
+ return
+
# Step 4a: Handle STOP intent -- stop audio immediately
if speechResult.detectedIntent == "stop":
logger.info(f"Session {sessionId}: AI detected STOP intent: {speechResult.reasoning}")
@@ -1190,70 +2381,69 @@ class TeamsbotService:
textForChat = speechResult.responseTextForChat or speechResult.responseText
storedText = textForChat or textForVoice or speechResult.responseText
- # 4a: Voice response (TTS -> Audio to bot)
+ # 4a: Voice response (TTS -> Audio to bot, chunked for long replies)
if sendVoice and textForVoice:
- try:
+ await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
+ "status": "requested",
+ "hasWebSocket": websocket is not None,
+ "message": "TTS generation requested",
+ "timestamp": getIsoTimestamp(),
+ })
+ logger.info(
+ f"Session {sessionId}: TTS requested (websocket_available={websocket is not None})"
+ )
+ if not websocket:
+ logger.warning(
+ f"Session {sessionId}: TTS skipped (bot websocket unavailable, likely fallback mode)"
+ )
await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
- "status": "requested",
- "hasWebSocket": websocket is not None,
- "message": "TTS generation requested",
+ "status": "unavailable",
+ "hasWebSocket": False,
+ "message": "TTS skipped — bot websocket unavailable",
"timestamp": getIsoTimestamp(),
})
- logger.info(
- f"Session {sessionId}: TTS requested (websocket_available={websocket is not None})"
- )
- ttsResult = await voiceInterface.textToSpeech(
- text=textForVoice,
- languageCode=self.config.language,
- voiceName=self.config.voiceId
- )
-
- if not ttsResult or not isinstance(ttsResult, dict):
- raise RuntimeError("TTS returned invalid result payload")
-
- if ttsResult.get("success") is False:
- raise RuntimeError(f"TTS backend error: {ttsResult.get('error', 'unknown')}")
-
- audioContent = ttsResult.get("audioContent")
- if not audioContent:
- raise RuntimeError("TTS returned no audioContent")
-
- if websocket:
- await websocket.send_text(json.dumps({
- "type": "playAudio",
- "sessionId": sessionId,
- "audio": {
- "data": base64.b64encode(audioContent if isinstance(audioContent, bytes) else audioContent.encode()).decode(),
- "format": "mp3",
- },
- }))
- logger.info(f"Session {sessionId}: TTS audio dispatched to bot")
+ if not sendChat:
+ sendChat = True
+ else:
+ # Long / structured answers → AI condenses for ear; chat keeps full text.
+ spokenText = await self._summarizeForVoice(sessionId, textForVoice)
+ cancelHook = self._makeAnswerCancelHook()
+ async with self._meetingTtsLock:
+ ttsOutcome = await _speakTextChunked(
+ websocket=websocket,
+ voiceInterface=voiceInterface,
+ sessionId=sessionId,
+ voiceText=spokenText,
+ languageCode=self.config.language,
+ voiceName=self.config.voiceId,
+ isCancelled=cancelHook,
+ )
+ if ttsOutcome.get("success"):
+ logger.info(
+ f"Session {sessionId}: TTS audio dispatched to bot "
+ f"(chunks={ttsOutcome.get('chunks')}, played={ttsOutcome.get('played')})"
+ )
await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
"status": "dispatched",
"hasWebSocket": True,
- "message": "TTS audio dispatched to bot",
+ "chunks": ttsOutcome.get("chunks"),
+ "played": ttsOutcome.get("played"),
"timestamp": getIsoTimestamp(),
})
else:
logger.warning(
- f"Session {sessionId}: TTS audio generated but cannot be played (bot websocket unavailable, likely fallback mode)"
+ f"TTS failed for session {sessionId}: {ttsOutcome.get('error')}"
)
await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
- "status": "unavailable",
- "hasWebSocket": False,
- "message": "TTS audio generated but bot websocket unavailable",
+ "status": "failed",
+ "hasWebSocket": True,
+ "chunks": ttsOutcome.get("chunks"),
+ "played": ttsOutcome.get("played"),
+ "message": ttsOutcome.get("error"),
"timestamp": getIsoTimestamp(),
})
- except Exception as ttsErr:
- logger.warning(f"TTS failed for session {sessionId}: {ttsErr}")
- await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
- "status": "failed",
- "hasWebSocket": websocket is not None,
- "message": str(ttsErr),
- "timestamp": getIsoTimestamp(),
- })
- if not sendChat:
- sendChat = True # Fallback to chat if voice-only and TTS failed
+ if not sendChat:
+ sendChat = True # Fallback to chat if voice-only and TTS failed
# 4b: Chat response (send text message to meeting chat)
if sendChat and textForChat:
@@ -1399,6 +2589,41 @@ class TeamsbotService:
finally:
self._aiAnalysisInProgress = False
+ async def _runEscalationAndRelease(
+ self,
+ sessionId: str,
+ taskBrief: str,
+ briefingFileIds: List[str],
+ triggerTranscriptId: Optional[str],
+ ) -> None:
+ """Background wrapper for ``_runAgentForMeeting`` that holds the
+ ``_agentEscalationInFlight`` flag for the entire duration of the agent
+ run — not just for the moment we schedule the task. Without this
+ wrapper, ``_aiAnalysisInProgress`` would already be ``False`` while
+ the agent is still researching, and a fresh SPEECH_TEAMS trigger from
+ a new utterance would race the agent to the voice channel."""
+ try:
+ await self._runAgentForMeeting(
+ sessionId=sessionId,
+ taskText=taskBrief,
+ fileIds=briefingFileIds,
+ sourceLabel="speechEscalation",
+ triggerTranscriptId=triggerTranscriptId,
+ )
+ except asyncio.CancelledError:
+ logger.info(
+ f"Session {sessionId}: Escalation agent task cancelled by stop signal"
+ )
+ except Exception as escErr:
+ logger.error(
+ f"Session {sessionId}: Escalation agent task failed: "
+ f"{type(escErr).__name__}: {escErr}",
+ exc_info=True,
+ )
+ finally:
+ self._agentEscalationInFlight = False
+ self._currentEscalationTask = None
+
# =========================================================================
# AI Command Execution
# =========================================================================
@@ -1535,23 +2760,18 @@ class TeamsbotService:
if not summary:
summary = "Keine Chat-Nachrichten im angegebenen Zeitraum."
if voiceInterface and websocket:
- ttsResult = await voiceInterface.textToSpeech(
- text=summary[:2000],
- languageCode=self.config.language,
- voiceName=self.config.voiceId,
- )
- if ttsResult and isinstance(ttsResult, dict) and ttsResult.get("audioContent"):
- audioContent = ttsResult["audioContent"]
- await websocket.send_text(json.dumps({
- "type": "playAudio",
- "sessionId": sessionId,
- "audio": {
- "data": base64.b64encode(
- audioContent if isinstance(audioContent, bytes) else audioContent.encode()
- ).decode(),
- "format": "mp3",
- },
- }))
+ spokenSummary = await self._summarizeForVoice(sessionId, summary[:2000])
+ cancelHook = self._makeAnswerCancelHook()
+ async with self._meetingTtsLock:
+ await _speakTextChunked(
+ websocket=websocket,
+ voiceInterface=voiceInterface,
+ sessionId=sessionId,
+ voiceText=spokenSummary,
+ languageCode=self.config.language,
+ voiceName=self.config.voiceId,
+ isCancelled=cancelHook,
+ )
async def _cmdReadAloud(
self,
@@ -1562,25 +2782,18 @@ class TeamsbotService:
):
"""Read text aloud via TTS and play in meeting."""
readText = params.get("text", "")
- if readText and voiceInterface:
- ttsResult = await voiceInterface.textToSpeech(
- text=readText,
- languageCode=self.config.language,
- voiceName=self.config.voiceId,
- )
- if ttsResult and isinstance(ttsResult, dict):
- audioContent = ttsResult.get("audioContent")
- if audioContent and websocket:
- await websocket.send_text(json.dumps({
- "type": "playAudio",
- "sessionId": sessionId,
- "audio": {
- "data": base64.b64encode(
- audioContent if isinstance(audioContent, bytes) else audioContent.encode()
- ).decode(),
- "format": "mp3",
- },
- }))
+ if readText and voiceInterface and websocket:
+ cancelHook = self._makeAnswerCancelHook()
+ async with self._meetingTtsLock:
+ await _speakTextChunked(
+ websocket=websocket,
+ voiceInterface=voiceInterface,
+ sessionId=sessionId,
+ voiceText=_voiceFriendlyMeetingText(readText),
+ languageCode=self.config.language,
+ voiceName=self.config.voiceId,
+ isCancelled=cancelHook,
+ )
async def _cmdChangeLanguage(self, sessionId: str, params: dict):
"""Change bot language."""
@@ -1674,6 +2887,1041 @@ class TeamsbotService:
except Exception as e:
logger.warning(f"Session {sessionId}: storeDocument failed: {e}")
+ # =========================================================================
+ # Director Prompts (private operator instructions during a live meeting)
+ # =========================================================================
+
+ def _collectActiveDirectorBriefings(self) -> List[Dict[str, Any]]:
+ """Return the deduplicated list of director-prompt briefings that are
+ currently relevant for the meeting context: every active persistent
+ prompt PLUS every recent one-shot prompt that still sits in the
+ ``_recentDirectorBriefings`` pool. Each entry carries ``text``,
+ ``fileIds`` (UDB attachments), ``mode``, ``promptId`` and ``note``
+ (the agent's internal analysis from the SILENT director run, if any).
+ """
+ seen: Dict[str, Dict[str, Any]] = {}
+ for p in self._activePersistentPrompts:
+ pid = p.get("id") or ""
+ seen[pid] = {
+ "promptId": pid,
+ "mode": p.get("mode") or "persistent",
+ "text": (p.get("text") or "").strip(),
+ "fileIds": list(p.get("fileIds") or []),
+ "note": (p.get("responseText") or "").strip(),
+ }
+ for b in self._recentDirectorBriefings:
+ pid = b.get("promptId") or ""
+ if pid in seen:
+ # Refresh note with the latest analysis if the persistent run
+ # produced one after the prompt was first loaded from DB.
+ if b.get("note"):
+ seen[pid]["note"] = b["note"]
+ continue
+ seen[pid] = {
+ "promptId": pid,
+ "mode": b.get("mode") or "oneShot",
+ "text": (b.get("text") or "").strip(),
+ "fileIds": list(b.get("fileIds") or []),
+ "note": (b.get("note") or "").strip(),
+ }
+ return [v for v in seen.values() if v.get("text") or v.get("fileIds")]
+
+ def _collectDirectorFileIds(self) -> List[str]:
+ """Flat, deduplicated list of UDB file IDs attached to any currently
+ relevant director prompt (persistent + recent one-shot). Used when
+ SPEECH_TEAMS escalates to the agent so the agent can actually READ the
+ documents the operator already provided."""
+ out: List[str] = []
+ seen: set = set()
+ for b in self._collectActiveDirectorBriefings():
+ for fid in b.get("fileIds") or []:
+ if fid and fid not in seen:
+ seen.add(fid)
+ out.append(fid)
+ return out
+
+ def _buildPersistentDirectorContext(self) -> str:
+ """Render active director-prompt briefings as private operator guidance
+ for the SPEECH_TEAMS system prompt context block.
+
+ Surfaces three things SPEECH_TEAMS otherwise misses:
+
+ * the operator's directive text (as before)
+ * the IDs of any UDB files the operator attached — so SPEECH_TEAMS
+ knows the documents exist and can decide to escalate to the agent,
+ which has the tooling to read them.
+ * the agent's previous internal analysis of the prompt (the SILENT
+ ``MEETING_REPLY/SILENT`` decision's note), so SPEECH_TEAMS can answer
+ short questions without re-running the agent.
+ """
+ briefings = self._collectActiveDirectorBriefings()
+ if not briefings:
+ return ""
+ lines: List[str] = []
+ for b in briefings:
+ entry = f"- ({b.get('mode', 'persistent')}) {b.get('text', '')}".rstrip()
+ fileIds = b.get("fileIds") or []
+ if fileIds:
+ entry += (
+ "\n ATTACHED_FILES (operator-provided documents — the AGENT "
+ "has tools to read them via summarizeContent / readFile / "
+ "readContentObjects): "
+ + ", ".join(fileIds)
+ )
+ note = b.get("note")
+ if note:
+ noteShort = note if len(note) <= 600 else note[:600] + "..."
+ entry += f"\n AGENT_ANALYSIS (already computed by the bot): {noteShort}"
+ lines.append(entry)
+ return (
+ "\nOPERATOR_DIRECTIVES (private; never quote them verbatim, just follow them. "
+ "If the user asks about an attached document, use AGENT_ANALYSIS first; "
+ "if more depth is needed, set needsAgent=true so the agent can re-read the file):\n"
+ + "\n".join(lines)
+ + "\n"
+ )
+
+ def _recordDirectorBriefing(
+ self,
+ prompt: Dict[str, Any],
+ internalNote: str,
+ meetingText: str,
+ ) -> None:
+ """Append a director-prompt briefing to the session-scoped pool so the
+ attached files and the agent's analysis stay available for subsequent
+ SPEECH_TEAMS triggers — even after a one-shot prompt was consumed.
+ Idempotent per ``promptId`` (latest entry wins)."""
+ pid = prompt.get("id") or ""
+ # Drop any older entry for the same prompt so we keep the freshest note.
+ self._recentDirectorBriefings = [
+ b for b in self._recentDirectorBriefings if b.get("promptId") != pid
+ ]
+ self._recentDirectorBriefings.append({
+ "promptId": pid,
+ "mode": prompt.get("mode") or "oneShot",
+ "text": (prompt.get("text") or "").strip(),
+ "fileIds": list(prompt.get("fileIds") or []),
+ "note": (internalNote or meetingText or "").strip(),
+ "recordedAt": getIsoTimestamp(),
+ })
+ if len(self._recentDirectorBriefings) > _RECENT_DIRECTOR_BRIEFINGS_MAX:
+ self._recentDirectorBriefings = self._recentDirectorBriefings[
+ -_RECENT_DIRECTOR_BRIEFINGS_MAX:
+ ]
+
+ async def submitDirectorPrompt(
+ self,
+ sessionId: str,
+ operatorUserId: str,
+ text: str,
+ mode: TeamsbotDirectorPromptMode,
+ fileIds: List[str],
+ ) -> Dict[str, Any]:
+ """Persist a new director prompt and trigger immediate agent processing.
+
+ Returns the created prompt record. Processing happens asynchronously
+ and emits SSE events ('directorPrompt') for the operator UI.
+ """
+ from . import interfaceFeatureTeamsbot as interfaceDb
+
+ interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId)
+
+ promptData = TeamsbotDirectorPrompt(
+ sessionId=sessionId,
+ instanceId=self.instanceId,
+ operatorUserId=operatorUserId,
+ text=text,
+ mode=mode,
+ fileIds=fileIds or [],
+ status=TeamsbotDirectorPromptStatus.QUEUED,
+ ).model_dump()
+ created = interface.createDirectorPrompt(promptData)
+
+ # Persistent prompts join in-memory directives immediately so they
+ # also influence subsequent SPEECH_TEAMS triggers, not only the
+ # one-shot agent run we kick off below.
+ if mode == TeamsbotDirectorPromptMode.PERSISTENT:
+ self._activePersistentPrompts.append(created)
+
+ await _emitSessionEvent(sessionId, "directorPrompt", {
+ "id": created.get("id"),
+ "status": created.get("status"),
+ "mode": created.get("mode"),
+ "text": created.get("text"),
+ "fileIds": created.get("fileIds", []),
+ "createdAt": created.get("createdAt"),
+ })
+
+ asyncio.create_task(self._processDirectorPrompt(created))
+ return created
+
+ async def removePersistentPrompt(self, promptId: str) -> bool:
+ """Remove a persistent director prompt (operator clicked 'remove')."""
+ from . import interfaceFeatureTeamsbot as interfaceDb
+
+ interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId)
+ sessionId = self._activeSessionId
+ prompt = interface.getDirectorPrompt(promptId)
+ if not prompt:
+ return False
+ interface.updateDirectorPrompt(promptId, {
+ "status": TeamsbotDirectorPromptStatus.CONSUMED.value,
+ "consumedAt": getIsoTimestamp(),
+ "statusMessage": "Removed by operator",
+ })
+ self._activePersistentPrompts = [
+ p for p in self._activePersistentPrompts if p.get("id") != promptId
+ ]
+ # Also drop the briefing copy so SPEECH_TEAMS forgets the doc reference
+ # immediately; otherwise the bot would keep "remembering" a doc the
+ # operator just retired.
+ self._recentDirectorBriefings = [
+ b for b in self._recentDirectorBriefings if b.get("promptId") != promptId
+ ]
+ if sessionId:
+ await _emitSessionEvent(sessionId, "directorPrompt", {
+ "id": promptId,
+ "status": TeamsbotDirectorPromptStatus.CONSUMED.value,
+ "mode": prompt.get("mode"),
+ "text": prompt.get("text"),
+ "removed": True,
+ })
+ return True
+
+ async def _processDirectorPrompt(self, prompt: Dict[str, Any]) -> None:
+ """Run the agent for a director prompt and deliver the FINAL text into
+ the meeting via TTS + chat (using the bot's existing channels)."""
+ from . import interfaceFeatureTeamsbot as interfaceDb
+
+ sessionId = prompt.get("sessionId")
+ promptId = prompt.get("id")
+ interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId)
+
+ interface.updateDirectorPrompt(promptId, {
+ "status": TeamsbotDirectorPromptStatus.RUNNING.value,
+ })
+ await _emitSessionEvent(sessionId, "directorPrompt", {
+ "id": promptId,
+ "status": TeamsbotDirectorPromptStatus.RUNNING.value,
+ })
+
+ # Build a task brief for the agent that surfaces the meeting context.
+ recentTranscript = self._renderRecentTranscriptForAgent(maxLines=20)
+ directorText = (prompt.get("text") or "").strip()
+ attachedFileIds = list(prompt.get("fileIds") or [])
+ promptMode = (prompt.get("mode") or "").lower()
+ isPersistentPrompt = promptMode == TeamsbotDirectorPromptMode.PERSISTENT.value.lower()
+
+ # Make file attachment EXPLICIT in the brief. The agent service already
+ # prepends a "## Attached Files & Folders" header via _enrichPromptWithFiles
+ # when fileIds are passed, but without an explicit instruction the agent
+ # sometimes goes straight to a generic answer. We force the workflow:
+ # studyDocs -> form briefing -> decide MEETING_REPLY vs SILENT.
+ filesBlock = ""
+ if attachedFileIds:
+ filesBlock = (
+ "\nANGEHAENGTE DOKUMENTE (UDB-File-IDs): "
+ + ", ".join(attachedFileIds)
+ + "\nDu MUSST diese Dokumente VOR der finalen Antwort lesen / zusammenfassen "
+ "(z.B. summarizeContent, readFile, readContentObjects, describeImage). "
+ "Beziehe Fakten und Zitate aus den Dokumenten in deine Notiz / dein "
+ "Meeting-Reply ein, statt allgemein zu antworten.\n"
+ )
+
+ # Persistent prompts that ship documents are usually a "knowledge briefing"
+ # the operator wants the bot to STUDY now and USE LATER. The SILENT note
+ # in that case must be a useful, file-grounded summary that subsequent
+ # SPEECH_TEAMS triggers can pick up — not "noted".
+ persistentNoteHint = ""
+ if isPersistentPrompt and attachedFileIds:
+ persistentNoteHint = (
+ "\nSPEZIAL fuer PERSISTENT + Dokumente: Wenn die Anweisung KEIN explizites "
+ "Meeting-Statement verlangt, antworte mit 'SILENT:' und liefere als interne "
+ "Notiz eine STRUKTURIERTE, faktendichte Briefing-Zusammenfassung der Dokumente "
+ "(Stichpunkte, Kennzahlen, Aussagen, die fuer Folgefragen im Meeting relevant "
+ "sein koennen). Diese Notiz wird spaeteren Meeting-Antworten als Wissensbasis "
+ "vorgelegt — schreibe sie also so, dass der Bot daraus zitieren kann.\n"
+ )
+
+ taskText = (
+ f"Du bist der KI-Assistent in einem laufenden Teams-Meeting (Bot-Name: {self.config.botName}).\n"
+ f"Der Operator hat dir folgende PRIVATE Regieanweisung gegeben (die anderen Teilnehmer im "
+ f"Meeting sehen sie NICHT). Sie ist KEINE Frage an das Meeting, sondern eine interne "
+ f"Anweisung an dich:\n\n"
+ f"{directorText}\n"
+ f"{filesBlock}"
+ f"{persistentNoteHint}\n"
+ f"AKTUELLER MEETING-KONTEXT (juengste Aussagen):\n{recentTranscript}\n\n"
+ "ANTWORT-PROTOKOLL — Beginne deine FINALE Antwort mit GENAU EINEM dieser Marker:\n"
+ " • 'MEETING_REPLY:' gefolgt vom Text, der im Meeting gesprochen / in den Meeting-Chat "
+ "gepostet werden soll. Verwende diesen Marker NUR, wenn die Regieanweisung dich explizit "
+ "auffordert, jetzt etwas im Meeting zu sagen oder zu schreiben (Beispiele: 'stell dich vor', "
+ "'fasse zusammen', 'stelle Person X eine Frage', 'beantworte die letzte Frage'). Halte den "
+ "Text kurz, sprachlich passend zur Stimme und ohne Marker oder Meta-Kommentare.\n"
+ " • 'SILENT:' gefolgt von einer internen Notiz fuer das Operator-UI. "
+ "Verwende diesen Marker fuer interne Direktiven und Wissens-Briefings (Beispiele: "
+ "'achte ab jetzt auf X', 'merke dir Y', 'studiere Dokument Z'). "
+ "Dieser Text wird NICHT ins Meeting gegeben, dient aber spaeteren Meeting-Antworten "
+ "als Wissensbasis. Wenn Dokumente angehaengt sind, MUSS die Notiz konkrete, "
+ "zitierfaehige Fakten aus den Dokumenten enthalten.\n\n"
+ "Standard ist SILENT, wenn nicht eindeutig zur Meeting-Interaktion aufgefordert wurde. "
+ "Wiederhole NIEMALS die Regieanweisung selbst im MEETING_REPLY-Text."
+ )
+
+ try:
+ finalText = await self._runAgentForMeeting(
+ sessionId=sessionId,
+ taskText=taskText,
+ fileIds=attachedFileIds,
+ sourceLabel="directorPrompt",
+ triggerTranscriptId=None,
+ promptId=promptId,
+ directorPromptMode=True,
+ )
+
+ # One-shot: mark consumed; persistent: keep active but record success.
+ isPersistent = prompt.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value
+ updates: Dict[str, Any] = {
+ "status": TeamsbotDirectorPromptStatus.SUCCEEDED.value,
+ "responseText": finalText or "",
+ }
+ if not isPersistent:
+ updates["status"] = TeamsbotDirectorPromptStatus.CONSUMED.value
+ updates["consumedAt"] = getIsoTimestamp()
+ interface.updateDirectorPrompt(promptId, updates)
+ await _emitSessionEvent(sessionId, "directorPrompt", {
+ "id": promptId,
+ "status": updates["status"],
+ "responseText": finalText,
+ })
+
+ except Exception as e:
+ logger.error(
+ f"Session {sessionId}: Director prompt {promptId} failed: {type(e).__name__}: {e}",
+ exc_info=True,
+ )
+ interface.updateDirectorPrompt(promptId, {
+ "status": TeamsbotDirectorPromptStatus.FAILED.value,
+ "statusMessage": f"{type(e).__name__}: {str(e)[:300]}",
+ })
+ await _emitSessionEvent(sessionId, "directorPrompt", {
+ "id": promptId,
+ "status": TeamsbotDirectorPromptStatus.FAILED.value,
+ "error": f"{type(e).__name__}: {str(e)[:300]}",
+ })
+ self._activePersistentPrompts = [
+ p for p in self._activePersistentPrompts if p.get("id") != promptId
+ ]
+
+ def _renderRecentTranscriptForAgent(self, maxLines: int = 20) -> str:
+ """Render the most recent context buffer entries for inclusion in the
+ agent task brief (similar to SPEECH_TEAMS context, but plain text)."""
+ if not self._contextBuffer:
+ return "(noch keine Aussagen erfasst)"
+ recent = self._contextBuffer[-maxLines:]
+ lines = []
+ for seg in recent:
+ speaker = seg.get("speaker", "Unknown")
+ text = seg.get("text", "")
+ segSource = seg.get("source", "caption")
+ prefix = "Chat: " if segSource == "chat" else ""
+ if self._isBotSpeaker(speaker):
+ lines.append(f"[YOU ({self.config.botName})]: {text}")
+ else:
+ lines.append(f"[{prefix}{speaker}]: {text}")
+ return "\n".join(lines)
+
+ async def _interimAgentBusyMessage(self) -> Optional[str]:
+ """Short spoken/chat line before a potentially long agent run (web,
+ tools). Phrasing is AI-localised to ``self.config.language`` and
+ cached per session — no hardcoded language branching. Returns
+ ``None`` if generation failed; caller must treat that as
+ 'silently skip the interim notice'."""
+ return await self._pickEphemeralPhrase("agentBusy")
+
+ async def _interimAgentRoundMessage(
+ self, roundNum: int, maxRounds: int
+ ) -> Optional[str]:
+ """Per-round progress notice for long agent runs (meeting voice /
+ chat, ephemeral). Phrasing is AI-localised once per session;
+ ``{round}`` and ``{maxRounds}`` placeholders are substituted at
+ render time. Returns ``None`` if generation failed."""
+ return await self._pickEphemeralPhrase(
+ "agentRound",
+ substitutions={"round": roundNum, "maxRounds": maxRounds},
+ )
+
+ async def _notifyMeetingEphemeral(self, sessionId: str, text: str) -> None:
+ """Deliver a short line to the meeting (TTS + chat per config) without
+ persisting botResponses/transcripts, so the main agent answer stays the
+ single recorded follow-up."""
+ websocket = self._websocket
+ voiceInterface = self._voiceInterface
+ if not websocket:
+ logger.warning(f"Session {sessionId}: Interim notice skipped — no WebSocket")
+ return
+
+ channelRaw = self.config.responseChannel
+ channelStr = (
+ channelRaw.value if hasattr(channelRaw, "value") else str(channelRaw)
+ ).lower().strip()
+ sendVoice = channelStr in ("voice", "both")
+ sendChat = channelStr in ("chat", "both")
+
+ if sendVoice and voiceInterface:
+ cancelHook = self._makeAnswerCancelHook()
+ async with self._meetingTtsLock:
+ outcome = await _speakTextChunked(
+ websocket=websocket,
+ voiceInterface=voiceInterface,
+ sessionId=sessionId,
+ voiceText=_voiceFriendlyMeetingText(text),
+ languageCode=self.config.language,
+ voiceName=self.config.voiceId,
+ isCancelled=cancelHook,
+ )
+ if not outcome.get("success"):
+ logger.warning(
+ f"Session {sessionId}: Interim TTS failed ({outcome.get('error')}) — falling back to chat"
+ )
+ if not sendChat:
+ sendChat = True
+
+ if sendChat:
+ try:
+ await websocket.send_text(json.dumps({
+ "type": "sendChatMessage",
+ "sessionId": sessionId,
+ "text": text,
+ }))
+ except Exception as chatErr:
+ logger.warning(f"Session {sessionId}: Interim chat failed: {chatErr}")
+
+ await _emitSessionEvent(sessionId, "agentRun", {
+ "status": "interimNotice",
+ "message": text,
+ "timestamp": getIsoTimestamp(),
+ })
+
+ async def _runAgentForMeeting(
+ self,
+ sessionId: str,
+ taskText: str,
+ fileIds: List[str],
+ sourceLabel: str,
+ triggerTranscriptId: Optional[str] = None,
+ promptId: Optional[str] = None,
+ directorPromptMode: bool = False,
+ ) -> str:
+ """Run agentService.runAgent for a meeting context, deliver the FINAL
+ text via the bot's existing TTS + chat channels, and return that text.
+
+ sourceLabel is used for logging and SSE differentiation
+ ('directorPrompt' or 'speechEscalation').
+
+ ``directorPromptMode`` activates the silent-by-default protocol for
+ operator director prompts: interim notices are suppressed, no per-round
+ meeting updates, and the FINAL text is parsed for an explicit
+ ``MEETING_REPLY:`` / ``SILENT:`` marker. Only ``MEETING_REPLY`` content
+ is dispatched to the meeting; everything else stays internal.
+ """
+ from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
+ AgentConfig, AgentEventTypeEnum
+ )
+
+ ctx = ServiceCenterContext(
+ user=self.currentUser,
+ mandate_id=self.mandateId,
+ feature_instance_id=self.instanceId,
+ feature_code="teamsbot",
+ )
+ agentService = _getServiceCenterService("agent", ctx)
+
+ # Workflow id stable per session so RAG/round-memory accumulate per meeting.
+ workflowId = f"teamsbot:{sessionId}"
+
+ agentConfig = AgentConfig(
+ maxRounds=TEAMSBOT_AGENT_MAX_ROUNDS,
+ maxCostCHF=TEAMSBOT_AGENT_MAX_COST_CHF,
+ toolSet="core",
+ initialToolboxes=["core", "web"],
+ excludeActionTools=True,
+ )
+
+ await _emitSessionEvent(sessionId, "agentRun", {
+ "source": sourceLabel,
+ "promptId": promptId,
+ "status": "started",
+ "timestamp": getIsoTimestamp(),
+ })
+
+ # Director prompts run silently by default — no spontaneous "moment please"
+ # in the meeting just because the operator gave an internal directive.
+ if not directorPromptMode:
+ try:
+ interimText = await self._interimAgentBusyMessage()
+ if interimText:
+ await self._notifyMeetingEphemeral(sessionId, interimText)
+ except Exception as interimErr:
+ logger.warning(f"Session {sessionId}: Interim agent notice failed: {interimErr}")
+
+ finalText: str = ""
+ rounds = 0
+ try:
+ async for event in agentService.runAgent(
+ prompt=taskText,
+ fileIds=fileIds or None,
+ config=agentConfig,
+ toolSet="core",
+ workflowId=workflowId,
+ ):
+ if event.type == AgentEventTypeEnum.AGENT_PROGRESS:
+ rounds += 1
+ pdata = event.data or {}
+ roundNum = int(pdata.get("round", rounds))
+ maxR = int(pdata.get("maxRounds", TEAMSBOT_AGENT_MAX_ROUNDS))
+ await _emitSessionEvent(sessionId, "agentRun", {
+ "source": sourceLabel,
+ "promptId": promptId,
+ "status": "progress",
+ "round": roundNum,
+ "maxRounds": maxR,
+ })
+ # Runde 1: schon allgemeiner Start-Hinweis; ab Runde 2 ins Meeting melden.
+ # Director prompts bleiben still — keine Zwischen-Updates ins Meeting.
+ if roundNum >= 2 and not directorPromptMode:
+ try:
+ roundText = await self._interimAgentRoundMessage(roundNum, maxR)
+ if roundText:
+ await self._notifyMeetingEphemeral(sessionId, roundText)
+ except Exception as roundNoticeErr:
+ logger.warning(
+ f"Session {sessionId}: Per-round agent notice failed: {roundNoticeErr}"
+ )
+ elif event.type == AgentEventTypeEnum.TOOL_CALL:
+ toolName = (event.data or {}).get("toolName") if event.data else None
+ await _emitSessionEvent(sessionId, "agentRun", {
+ "source": sourceLabel,
+ "promptId": promptId,
+ "status": "toolCall",
+ "toolName": toolName,
+ })
+ elif event.type == AgentEventTypeEnum.FINAL:
+ finalText = (event.content or "").strip()
+ elif event.type == AgentEventTypeEnum.ERROR:
+ raise RuntimeError(event.content or "Agent error")
+ except Exception as runErr:
+ await _emitSessionEvent(sessionId, "agentRun", {
+ "source": sourceLabel,
+ "promptId": promptId,
+ "status": "error",
+ "error": str(runErr)[:500],
+ })
+ raise
+
+ await _emitSessionEvent(sessionId, "agentRun", {
+ "source": sourceLabel,
+ "promptId": promptId,
+ "status": "completed",
+ "rounds": rounds,
+ "hasText": bool(finalText),
+ })
+
+ if finalText:
+ if directorPromptMode:
+ decision = _parseDirectorPromptFinal(finalText)
+ kind = decision.get("kind", "silent")
+ meetingText = (decision.get("meetingText") or "").strip()
+ internalNote = (decision.get("internalNote") or "").strip()
+
+ logger.info(
+ f"Session {sessionId}: Director prompt {promptId} -> kind={kind}, "
+ f"meetingChars={len(meetingText)}, noteChars={len(internalNote)}"
+ )
+
+ await _emitSessionEvent(sessionId, "directorPrompt", {
+ "id": promptId,
+ "status": "decision",
+ "decision": kind,
+ "meetingText": meetingText,
+ "internalNote": internalNote,
+ })
+
+ # Record this prompt as a session-scoped briefing BEFORE we hand
+ # delivery off. This is what later SPEECH_TEAMS triggers see, so
+ # if the user attached a doc with mode=PERSISTENT and the agent
+ # produced a file-grounded SILENT note, that note (and the
+ # original fileIds) stays available for "summarize the doc"
+ # follow-up questions in the meeting.
+ try:
+ promptRecord: Dict[str, Any] = {}
+ if promptId:
+ try:
+ from . import interfaceFeatureTeamsbot as _ifaceDb
+ _iface = _ifaceDb.getInterface(
+ self.currentUser, self.mandateId, self.instanceId
+ )
+ promptRecord = _iface.getDirectorPrompt(promptId) or {}
+ except Exception as _lookupErr:
+ logger.debug(
+ f"Briefing pool: could not look up prompt {promptId}: {_lookupErr}"
+ )
+ if promptRecord or promptId:
+ self._recordDirectorBriefing(
+ prompt=promptRecord or {"id": promptId},
+ internalNote=internalNote,
+ meetingText=meetingText,
+ )
+ except Exception as briefErr:
+ logger.warning(
+ f"Session {sessionId}: Director briefing pool update failed: {briefErr}"
+ )
+
+ # If this was a persistent prompt, the live in-memory copy in
+ # ``_activePersistentPrompts`` was loaded BEFORE the agent ran
+ # — refresh its ``responseText`` so subsequent
+ # ``_collectActiveDirectorBriefings`` calls show the latest
+ # analysis without waiting for the next session reload.
+ if promptId:
+ for p in self._activePersistentPrompts:
+ if p.get("id") == promptId:
+ p["responseText"] = internalNote or meetingText or finalText
+ break
+
+ if kind == "meeting" and meetingText:
+ await self._deliverTextToMeeting(
+ sessionId=sessionId,
+ text=meetingText,
+ detectedIntent=f"agent:{sourceLabel}",
+ reasoning=f"Agent run ({sourceLabel})",
+ triggerTranscriptId=triggerTranscriptId,
+ )
+ else:
+ # Silent: persist as internal-only botResponse so the operator
+ # UI keeps a record, but DO NOT push into the meeting (no TTS,
+ # no chat send). The director prompt SSE above already carries
+ # the note for the operator UI.
+ await self._persistInternalDirectorReply(
+ sessionId=sessionId,
+ internalNote=internalNote or finalText,
+ promptId=promptId,
+ triggerTranscriptId=triggerTranscriptId,
+ )
+ return meetingText if kind == "meeting" else ""
+
+ await self._deliverTextToMeeting(
+ sessionId=sessionId,
+ text=finalText,
+ detectedIntent=f"agent:{sourceLabel}",
+ reasoning=f"Agent run ({sourceLabel})",
+ triggerTranscriptId=triggerTranscriptId,
+ )
+
+ return finalText
+
+ async def _deliverTextToMeeting(
+ self,
+ sessionId: str,
+ text: str,
+ detectedIntent: str,
+ reasoning: str,
+ triggerTranscriptId: Optional[str] = None,
+ ) -> None:
+ """Send agent text into the meeting via the same channels SPEECH_TEAMS
+ uses: TTS + chat per config, plus DB persistence and SSE events.
+
+ Uses the websocket/voiceInterface stored on this instance. If the bot
+ is not connected anymore, the call still records the response in the DB
+ and emits SSE so the operator UI shows the agent answer.
+ """
+ from . import interfaceFeatureTeamsbot as interfaceDb
+ interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId)
+
+ websocket = self._websocket
+ voiceInterface = self._voiceInterface
+
+ channelRaw = self.config.responseChannel
+ channelStr = (
+ channelRaw.value if hasattr(channelRaw, "value") else str(channelRaw)
+ ).lower().strip()
+ sendVoice = channelStr in ("voice", "both")
+ sendChat = channelStr in ("chat", "both")
+
+ if sendVoice and sendChat:
+ responseType = TeamsbotResponseType.BOTH
+ elif sendVoice:
+ responseType = TeamsbotResponseType.AUDIO
+ else:
+ responseType = TeamsbotResponseType.CHAT
+
+ # Voice (TTS input is voice-sanitized; chat + DB keep full structured text).
+ # Long agent answers must be chunked: Google TTS rejects single sentences
+ # > ~5000 bytes, and the Chirp3 voices fail on long comma-heavy lines too.
+ ttsOutcome: Optional[Dict[str, Any]] = None
+ if sendVoice and voiceInterface and websocket:
+ spokenText = await self._summarizeForVoice(sessionId, text)
+ cancelHook = self._makeAnswerCancelHook()
+ async with self._meetingTtsLock:
+ ttsOutcome = await _speakTextChunked(
+ websocket=websocket,
+ voiceInterface=voiceInterface,
+ sessionId=sessionId,
+ voiceText=spokenText,
+ languageCode=self.config.language,
+ voiceName=self.config.voiceId,
+ isCancelled=cancelHook,
+ )
+ await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
+ "status": "dispatched" if ttsOutcome.get("success") else "failed",
+ "hasWebSocket": True,
+ "chunks": ttsOutcome.get("chunks"),
+ "played": ttsOutcome.get("played"),
+ "error": ttsOutcome.get("error"),
+ "timestamp": getIsoTimestamp(),
+ })
+ if not ttsOutcome.get("success"):
+ logger.warning(
+ f"Session {sessionId}: Agent TTS delivery failed "
+ f"({ttsOutcome.get('error')}) — falling back to meeting chat"
+ )
+ if not sendChat:
+ sendChat = True
+
+ # Chat
+ if sendChat and websocket:
+ try:
+ await websocket.send_text(json.dumps({
+ "type": "sendChatMessage",
+ "sessionId": sessionId,
+ "text": text,
+ }))
+ logger.info(f"Session {sessionId}: Agent chat dispatched ({len(text)} chars)")
+ except Exception as chatErr:
+ logger.warning(f"Session {sessionId}: Agent chat delivery failed: {chatErr}")
+
+ # Persist as botResponse + transcript so it shows up in history/UI.
+ intentEnum, intentMeta = _coercePersistedDetectedIntent(detectedIntent)
+ reasoningForDb = (
+ f"{reasoning} [{intentMeta}]" if intentMeta else reasoning
+ )
+
+ botResponseData = TeamsbotBotResponse(
+ sessionId=sessionId,
+ responseText=text,
+ responseType=responseType,
+ detectedIntent=intentEnum,
+ reasoning=reasoningForDb,
+ triggeredByTranscriptId=triggerTranscriptId,
+ modelName="agent",
+ processingTime=0.0,
+ priceCHF=0.0,
+ timestamp=getIsoTimestamp(),
+ ).model_dump()
+ createdResponse = interface.createBotResponse(botResponseData)
+
+ await _emitSessionEvent(sessionId, "botResponse", {
+ "id": createdResponse.get("id"),
+ "responseText": text,
+ "responseType": responseType.value,
+ "detectedIntent": intentEnum.value,
+ "reasoning": reasoningForDb,
+ "modelName": "agent",
+ "processingTime": 0.0,
+ "priceCHF": 0.0,
+ "timestamp": botResponseData.get("timestamp"),
+ })
+
+ botTranscriptData = TeamsbotTranscript(
+ sessionId=sessionId,
+ speaker=self.config.botName,
+ text=text,
+ timestamp=getIsoTimestamp(),
+ confidence=1.0,
+ language=self.config.language,
+ isFinal=True,
+ source="botResponse",
+ ).model_dump()
+ botTranscript = interface.createTranscript(botTranscriptData)
+
+ self._contextBuffer.append({
+ "speaker": self.config.botName,
+ "text": text,
+ "timestamp": getUtcTimestamp(),
+ "source": "botResponse",
+ })
+ self._lastTranscriptSpeaker = self.config.botName
+ self._lastTranscriptText = text
+ self._lastTranscriptId = botTranscript.get("id")
+ self._lastBotResponseText = text.strip().lower()
+ self._lastBotResponseTs = time.time()
+ self._followUpWindowEnd = time.time() + 15.0
+
+ await _emitSessionEvent(sessionId, "transcript", {
+ "id": botTranscript.get("id"),
+ "speaker": self.config.botName,
+ "text": text,
+ "confidence": 1.0,
+ "timestamp": getIsoTimestamp(),
+ "isContinuation": False,
+ "source": "botResponse",
+ "speakerResolvedFromHint": False,
+ })
+
+ session = interface.getSession(sessionId)
+ if session:
+ count = session.get("botResponseCount", 0) + 1
+ interface.updateSession(sessionId, {"botResponseCount": count})
+
+ async def _persistInternalDirectorReply(
+ self,
+ sessionId: str,
+ internalNote: str,
+ promptId: Optional[str],
+ triggerTranscriptId: Optional[str] = None,
+ ) -> None:
+ """Record a director-prompt agent reply as INTERNAL (operator-UI only).
+
+ Unlike ``_deliverTextToMeeting`` this never dispatches TTS or chat into
+ the meeting, never appends to the meeting context buffer, and does not
+ create a meeting transcript line. It only persists a botResponse and
+ emits an SSE event so the operator UI shows what the agent decided.
+ """
+ from . import interfaceFeatureTeamsbot as interfaceDb
+
+ note = (internalNote or "").strip()
+ if not note:
+ return
+
+ interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId)
+
+ intentEnum, _intentMeta = _coercePersistedDetectedIntent("agent:directorPrompt")
+ reasoningForDb = (
+ f"Director prompt {promptId or ''} — silent / internal only "
+ f"(not sent to meeting)"
+ ).strip()
+
+ botResponseData = TeamsbotBotResponse(
+ sessionId=sessionId,
+ responseText=note,
+ responseType=TeamsbotResponseType.CHAT,
+ detectedIntent=intentEnum,
+ reasoning=reasoningForDb,
+ triggeredByTranscriptId=triggerTranscriptId,
+ modelName="agent",
+ processingTime=0.0,
+ priceCHF=0.0,
+ timestamp=getIsoTimestamp(),
+ ).model_dump()
+ createdResponse = interface.createBotResponse(botResponseData)
+
+ await _emitSessionEvent(sessionId, "botResponse", {
+ "id": createdResponse.get("id"),
+ "responseText": note,
+ "responseType": TeamsbotResponseType.CHAT.value,
+ "detectedIntent": intentEnum.value,
+ "reasoning": reasoningForDb,
+ "modelName": "agent",
+ "processingTime": 0.0,
+ "priceCHF": 0.0,
+ "timestamp": botResponseData.get("timestamp"),
+ "internalOnly": True,
+ "promptId": promptId,
+ })
+
+ logger.info(
+ f"Session {sessionId}: Director prompt {promptId} silent reply "
+ f"persisted internally ({len(note)} chars)"
+ )
+
+ # =========================================================================
+ # Greeting (AI-localised, no hardcoded language strings)
+ # =========================================================================
+
+ async def _generateGreetingText(self, languageCode: str) -> str:
+ """Generate the bot's join greeting via AI in ``languageCode`` and the
+ configured persona. Returns empty string on failure — the caller must
+ treat that as 'skip the greeting' (NEVER fall back to a hardcoded
+ localised string)."""
+ targetLang = (languageCode or self.config.language or "").strip() or "en-US"
+ botName = (self.config.botName or "the assistant").strip()
+ firstName = botName.split(" ")[0] if botName else botName
+ persona = (self.config.aiSystemPrompt or "").strip()
+
+ # English instructions to the LLM; the OUTPUT must be in ``targetLang``.
+ prompt = (
+ f"You are localizing the join greeting for a meeting assistant.\n\n"
+ f"Assistant display name (use exactly this, no translation): {firstName}\n\n"
+ f"Persona / style guide for the assistant:\n"
+ f"{persona or '(no persona configured — use a neutral, polite, professional tone)'}\n\n"
+ f"Target spoken language (BCP-47 code): {targetLang}\n\n"
+ f"Generate ONE short greeting (max ~14 words) for the assistant "
+ f"to say AND post in chat the moment it joins a meeting. The "
+ f"greeting MUST:\n"
+ f" - be in the target language\n"
+ f" - introduce the assistant by name ({firstName})\n"
+ f" - signal that it is now present and ready\n"
+ f" - sound natural when spoken aloud (this text is also TTS'd)\n\n"
+ f"Output ONLY the greeting text, no quotes, no markdown, no "
+ f"commentary, no surrounding punctuation beyond what naturally "
+ f"belongs to the sentence."
+ )
+
+ try:
+ aiService = _createAiService(
+ self.currentUser, self.mandateId, self.instanceId
+ )
+ await aiService.ensureAiObjectsInitialized()
+ request = AiCallRequest(
+ prompt=prompt,
+ context="",
+ options=AiCallOptions(
+ operationType=OperationTypeEnum.DATA_ANALYSE,
+ priority=PriorityEnum.SPEED,
+ ),
+ )
+ response = await aiService.callAi(request)
+ except Exception as aiErr:
+ logger.warning(
+ f"Greeting generation crashed (lang={targetLang}): {aiErr}"
+ )
+ return ""
+
+ if not response or response.errorCount != 0 or not response.content:
+ logger.warning(
+ f"Greeting generation returned empty/error (lang={targetLang})"
+ )
+ return ""
+
+ text = response.content.strip()
+ # Strip any wrapping quotes/code fences the model might have added.
+ text = re.sub(r"^```.*?\n", "", text, flags=re.DOTALL)
+ text = re.sub(r"\n```\s*$", "", text)
+ text = text.strip().strip("\"'`").strip()
+ if not text:
+ return ""
+ logger.info(
+ f"Greeting generated (lang={targetLang}, chars={len(text)}): {text[:80]}"
+ )
+ return text
+
+ async def _dispatchGreetingToMeeting(
+ self,
+ sessionId: str,
+ greetingText: str,
+ greetingLang: str,
+ sendToChat: bool,
+ interface: Any,
+ voiceInterface: Any,
+ websocket: WebSocket,
+ ) -> None:
+ """Centralised dispatcher for the bot's join greeting: speaks the
+ text via TTS into the meeting and (optionally) tells the bot to post
+ it in the meeting chat. Persists the greeting as a bot transcript /
+ botResponse so it appears in the operator UI history.
+
+ ``sendToChat`` is ``False`` for the legacy ``voiceGreeting`` path
+ (the bot already chatted itself) and ``True`` for the new
+ ``requestGreeting`` path where the Gateway owns chat dispatch too.
+ """
+ try:
+ await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
+ "status": "requested",
+ "hasWebSocket": True,
+ "message": "Greeting TTS requested",
+ "timestamp": getIsoTimestamp(),
+ })
+ cancelHook = self._makeAnswerCancelHook()
+ async with self._meetingTtsLock:
+ ttsOutcome = await _speakTextChunked(
+ websocket=websocket,
+ voiceInterface=voiceInterface,
+ sessionId=sessionId,
+ voiceText=_voiceFriendlyMeetingText(greetingText),
+ languageCode=greetingLang,
+ voiceName=self.config.voiceId,
+ isCancelled=cancelHook,
+ )
+ if ttsOutcome.get("success"):
+ logger.info(
+ f"Greeting TTS sent for session {sessionId} "
+ f"(chunks={ttsOutcome.get('chunks')})"
+ )
+ await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
+ "status": "dispatched",
+ "hasWebSocket": True,
+ "chunks": ttsOutcome.get("chunks"),
+ "played": ttsOutcome.get("played"),
+ "timestamp": getIsoTimestamp(),
+ })
+ else:
+ logger.warning(
+ f"Greeting TTS failed for session {sessionId}: {ttsOutcome.get('error')}"
+ )
+ await _emitSessionEvent(sessionId, "ttsDeliveryStatus", {
+ "status": "failed",
+ "hasWebSocket": True,
+ "message": ttsOutcome.get("error"),
+ "timestamp": getIsoTimestamp(),
+ })
+
+ if sendToChat:
+ try:
+ await websocket.send_text(json.dumps({
+ "type": "sendChatMessage",
+ "sessionId": sessionId,
+ "text": greetingText,
+ }))
+ logger.info(f"Greeting chat dispatch queued for session {sessionId}")
+ except Exception as chatErr:
+ logger.warning(
+ f"Greeting chat dispatch failed for session {sessionId}: {chatErr}"
+ )
+
+ greetingTranscriptData = TeamsbotTranscript(
+ sessionId=sessionId,
+ speaker=self.config.botName,
+ text=greetingText,
+ timestamp=getIsoTimestamp(),
+ confidence=1.0,
+ language=greetingLang,
+ isFinal=True,
+ source="botResponse",
+ ).model_dump()
+ greetingTranscript = interface.createTranscript(greetingTranscriptData)
+
+ self._contextBuffer.append({
+ "speaker": self.config.botName,
+ "text": greetingText,
+ "timestamp": getUtcTimestamp(),
+ "source": "botResponse",
+ })
+ self._lastTranscriptSpeaker = self.config.botName
+ self._lastTranscriptText = greetingText
+ self._lastTranscriptId = greetingTranscript.get("id")
+
+ await _emitSessionEvent(sessionId, "botResponse", {
+ "id": greetingTranscript.get("id"),
+ "responseText": greetingText,
+ "responseType": TeamsbotResponseType.AUDIO.value,
+ "detectedIntent": "greeting",
+ "reasoning": "Automatic join greeting",
+ "timestamp": getIsoTimestamp(),
+ })
+ await _emitSessionEvent(sessionId, "transcript", {
+ "id": greetingTranscript.get("id"),
+ "speaker": self.config.botName,
+ "text": greetingText,
+ "confidence": 1.0,
+ "timestamp": getIsoTimestamp(),
+ "isContinuation": False,
+ "source": "botResponse",
+ "speakerResolvedFromHint": False,
+ })
+ except Exception as dispatchErr:
+ logger.warning(
+ f"Greeting dispatch failed for session {sessionId}: {dispatchErr}"
+ )
+
# =========================================================================
# Context Summarization (for long sessions)
# =========================================================================
diff --git a/modules/features/trustee/accounting/accountingDataSync.py b/modules/features/trustee/accounting/accountingDataSync.py
index a606c58a..ef8789ea 100644
--- a/modules/features/trustee/accounting/accountingDataSync.py
+++ b/modules/features/trustee/accounting/accountingDataSync.py
@@ -144,6 +144,8 @@ class AccountingDataSync:
"journalLines": 0,
"contacts": 0,
"accountBalances": 0,
+ "oldestBookingDate": None,
+ "newestBookingDate": None,
"errors": [],
"startedAt": time.time(),
}
@@ -211,12 +213,14 @@ class AccountingDataSync:
)
_dumpSyncData("journalEntries", rawEntries)
_progress(60, f"Speichere {len(rawEntries)} Buchungssaetze...")
- entriesCount, linesCount = await asyncio.to_thread(
+ entriesCount, linesCount, oldestDate, newestDate = await asyncio.to_thread(
self._persistJournal, rawEntries, scope, featureInstanceId,
TrusteeDataJournalEntry, TrusteeDataJournalLine,
)
summary["journalEntries"] = entriesCount
summary["journalLines"] = linesCount
+ summary["oldestBookingDate"] = oldestDate
+ summary["newestBookingDate"] = newestDate
_progress(65, f"{entriesCount} Saetze + {linesCount} Buchungszeilen gespeichert.")
except Exception as e:
logger.error(f"Import journal entries failed: {e}", exc_info=True)
@@ -277,6 +281,11 @@ class AccountingDataSync:
"journalLines": int(summary.get("journalLines", 0)),
"contacts": int(summary.get("contacts", 0)),
"accountBalances": int(summary.get("accountBalances", 0)),
+ # Actual oldest/newest booking date observed in the
+ # imported journal entries. Lets the user verify that the
+ # full requested window was returned by the source system.
+ "oldestBookingDate": summary.get("oldestBookingDate"),
+ "newestBookingDate": summary.get("newestBookingDate"),
},
}
try:
@@ -321,6 +330,9 @@ class AccountingDataSync:
We pre-build the line rows in memory keyed by the freshly minted entryId
so a single ``execute_values`` call can persist all of them.
+
+ Returns ``(entriesCount, linesCount, oldestBookingDate, newestBookingDate)``
+ where the date strings are ISO ``YYYY-MM-DD`` (or ``None`` if no entries).
"""
import uuid as _uuid
t0 = time.time()
@@ -329,12 +341,22 @@ class AccountingDataSync:
entryRows: List[Dict[str, Any]] = []
lineRows: List[Dict[str, Any]] = []
+ oldestDate: Optional[str] = None
+ newestDate: Optional[str] = None
for raw in rawEntries:
entryId = str(_uuid.uuid4())
+ bookingDate = raw.get("bookingDate")
+ if bookingDate:
+ normalized = str(bookingDate).split("T")[0][:10]
+ if normalized:
+ if oldestDate is None or normalized < oldestDate:
+ oldestDate = normalized
+ if newestDate is None or normalized > newestDate:
+ newestDate = normalized
entryRows.append({
"id": entryId,
"externalId": raw.get("externalId"),
- "bookingDate": raw.get("bookingDate"),
+ "bookingDate": bookingDate,
"reference": raw.get("reference"),
"description": raw.get("description", ""),
"currency": raw.get("currency", "CHF"),
@@ -363,9 +385,10 @@ class AccountingDataSync:
linesCount = self._bulkCreate(modelLine, lineRows)
logger.info(
f"Persisted {entriesCount} entries + {linesCount} lines for "
- f"{featureInstanceId} in {time.time() - t0:.1f}s"
+ f"{featureInstanceId} in {time.time() - t0:.1f}s "
+ f"(window: {oldestDate or '?'} .. {newestDate or '?'})"
)
- return entriesCount, linesCount
+ return entriesCount, linesCount, oldestDate, newestDate
def _persistContacts(self, customers: list, vendors: list, scope: Dict[str, Any],
featureInstanceId: str, modelContact: Type) -> int:
diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
index 79a61d77..9e372099 100644
--- a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
+++ b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
@@ -437,7 +437,10 @@ class AccountingConnectorRma(BaseAccountingConnector):
"creditAmount": credit,
"description": desc,
})
- entry["totalAmount"] += max(debit, credit)
+ # Booking total = sum of debits (== sum of credits for a balanced
+ # booking). Summing max(debit, credit) per line would double-count
+ # a balanced 2-line booking (200 instead of 100).
+ entry["totalAmount"] += debit
return list(entriesByRef.values())
except Exception as e:
@@ -494,7 +497,9 @@ class AccountingConnectorRma(BaseAccountingConnector):
"creditAmount": credit,
"description": t.get("memo", ""),
})
- totalAmt += max(debit, credit)
+ # Sum debits only -- equals sum of credits for a balanced
+ # booking. max(debit, credit) per line would double-count.
+ totalAmt += debit
entries.append({
"externalId": str(batch.get("id", ref)),
diff --git a/modules/features/trustee/datamodelFeatureTrustee.py b/modules/features/trustee/datamodelFeatureTrustee.py
index 265227a0..a87f6f55 100644
--- a/modules/features/trustee/datamodelFeatureTrustee.py
+++ b/modules/features/trustee/datamodelFeatureTrustee.py
@@ -3,7 +3,7 @@
"""Trustee models: TrusteeOrganisation, TrusteeRole, TrusteeAccess, TrusteeContract, TrusteeDocument, TrusteePosition."""
from enum import Enum
-from typing import Optional, Dict
+from typing import Optional, Dict, Any
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
@@ -832,7 +832,7 @@ class TrusteeAccountingConfig(PowerOnModel):
lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error", json_schema_extra={"label": "Fehlermeldung"})
lastSyncDateFrom: Optional[str] = Field(default=None, description="dateFrom (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster von"})
lastSyncDateTo: Optional[str] = Field(default=None, description="dateTo (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster bis"})
- lastSyncCounts: Optional[Dict[str, int]] = Field(default=None, description="Per-entity counts of the last import (accounts, journalEntries, journalLines, contacts, accountBalances)", json_schema_extra={"label": "Letzte Import-Zaehler"})
+ lastSyncCounts: Optional[Dict[str, Any]] = Field(default=None, description="Last import summary: per-entity counts (accounts, journalEntries, journalLines, contacts, accountBalances) plus oldestBookingDate / newestBookingDate (ISO YYYY-MM-DD) for completeness verification", json_schema_extra={"label": "Letzte Import-Zaehler"})
cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})", json_schema_extra={"label": "Cached Kontoplan"})
chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt"})
mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}})
diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py
index d040c37d..fbdd0966 100644
--- a/modules/features/trustee/routeFeatureTrustee.py
+++ b/modules/features/trustee/routeFeatureTrustee.py
@@ -1864,13 +1864,87 @@ def clear_ai_data_cache(
instanceId: str = Path(..., description="Feature Instance ID"),
context: RequestContext = Depends(getRequestContext),
) -> Dict[str, Any]:
- """Clear the AI feature-data query cache for this instance so the next AI query reads fresh DB data."""
+ """Clear ONLY the AI feature-data query result cache (in-memory, ~5 min TTL).
+
+ Important: this does NOT touch the synchronised ``TrusteeData*`` tables.
+ The synced rows (chart of accounts, journal entries/lines, contacts, balances)
+ stay exactly as imported. To wipe those rows, use POST .../wipe-imported-data.
+ """
_validateInstanceAccess(instanceId, context)
from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
removed = clearFeatureQueryCache(instanceId)
return {"cleared": removed, "featureInstanceId": instanceId}
+@router.post("/{instanceId}/accounting/wipe-imported-data")
+@limiter.limit("3/minute")
+def wipe_imported_accounting_data(
+ request: Request,
+ instanceId: str = Path(..., description="Feature Instance ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> Dict[str, Any]:
+ """Delete every ``TrusteeData*`` row imported for this feature instance.
+
+ Use when the source system was changed, test data needs to be cleared, or
+ the user suspects stale rows from earlier connector versions. Also resets
+ the ``lastSync*`` markers on the active config so the UI no longer reports
+ a stale "letzter Import" status. The connector configuration / credentials
+ remain untouched -- only synchronised payload data is removed.
+ """
+ mandateId = _validateInstanceAccess(instanceId, context)
+ interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
+ from .datamodelFeatureTrustee import (
+ TrusteeDataAccount, TrusteeDataJournalEntry, TrusteeDataJournalLine,
+ TrusteeDataContact, TrusteeDataAccountBalance, TrusteeAccountingConfig,
+ )
+ from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
+
+ removed: Dict[str, int] = {}
+ for tableName, model in [
+ ("accounts", TrusteeDataAccount),
+ ("journalEntries", TrusteeDataJournalEntry),
+ ("journalLines", TrusteeDataJournalLine),
+ ("contacts", TrusteeDataContact),
+ ("accountBalances", TrusteeDataAccountBalance),
+ ]:
+ try:
+ removed[tableName] = int(interface.db.recordDeleteWhere(model, {"featureInstanceId": instanceId}) or 0)
+ except Exception as ex:
+ logger.warning("wipeImportedData: failed for %s: %s", tableName, ex)
+ removed[tableName] = 0
+
+ cfgRecords = interface.db.getRecordset(
+ TrusteeAccountingConfig,
+ recordFilter={"featureInstanceId": instanceId, "isActive": True},
+ )
+ if cfgRecords:
+ cfgId = cfgRecords[0].get("id")
+ if cfgId:
+ try:
+ interface.db.recordModify(TrusteeAccountingConfig, cfgId, {
+ "lastSyncAt": None,
+ "lastSyncStatus": None,
+ "lastSyncErrorMessage": None,
+ "lastSyncDateFrom": None,
+ "lastSyncDateTo": None,
+ "lastSyncCounts": None,
+ })
+ except Exception as ex:
+ logger.warning("wipeImportedData: failed to reset lastSync* on cfg %s: %s", cfgId, ex)
+
+ cacheCleared = clearFeatureQueryCache(instanceId)
+ logger.info(
+ "wipeImportedData instance=%s removed=%s cacheCleared=%s",
+ instanceId, removed, cacheCleared,
+ )
+ return {
+ "removed": removed,
+ "totalRemoved": sum(removed.values()),
+ "cacheCleared": cacheCleared,
+ "featureInstanceId": instanceId,
+ }
+
+
# ===== Data Export =====
@router.get("/{instanceId}/accounting/export-data")
diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py
index e9d78e55..3e8bf4ea 100644
--- a/modules/interfaces/interfaceBootstrap.py
+++ b/modules/interfaces/interfaceBootstrap.py
@@ -210,7 +210,18 @@ def _buildSystemTemplates():
"nodes": [
{"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Täglicher Check", "parameters": {}},
{"id": "n2", "type": "email.checkEmail", "x": 300, "y": 200, "title": "Mailbox prüfen", "parameters": {}},
- {"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro E-Mail", "parameters": {}},
+ {
+ "id": "n3",
+ "type": "flow.loop",
+ "x": 550,
+ "y": 200,
+ "title": "Pro E-Mail",
+ "parameters": {
+ "items": {"type": "ref", "nodeId": "n2", "path": ["emails"]},
+ "level": "auto",
+ "concurrency": 1,
+ },
+ },
{"id": "n4", "type": "ai.prompt", "x": 800, "y": 200, "title": "Analyse: Antwort nötig?", "parameters": {}},
{"id": "n5", "type": "flow.ifElse", "x": 1050, "y": 200, "title": "Antwort nötig?", "parameters": {}},
{"id": "n6", "type": "ai.prompt", "x": 1300, "y": 100, "title": "Kontext abrufen & Antwort formulieren", "parameters": {}},
@@ -239,7 +250,18 @@ def _buildSystemTemplates():
"nodes": [
{"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Geplanter Import", "parameters": {}},
{"id": "n2", "type": "sharepoint.listFiles", "x": 300, "y": 200, "title": "SharePoint Ordner lesen", "parameters": {}},
- {"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro Dokument", "parameters": {}},
+ {
+ "id": "n3",
+ "type": "flow.loop",
+ "x": 550,
+ "y": 200,
+ "title": "Pro Dokument",
+ "parameters": {
+ "items": {"type": "ref", "nodeId": "n2", "path": ["files"]},
+ "level": "auto",
+ "concurrency": 1,
+ },
+ },
{"id": "n4", "type": "sharepoint.readFile", "x": 800, "y": 200, "title": "PDF-Inhalt lesen", "parameters": {}},
{"id": "n5", "type": "ai.prompt", "x": 1050, "y": 200, "title": "Typ klassifizieren (Rechnung, Beleg, Bankauszug, Vertrag, etc.)", "parameters": {}},
{"id": "n6", "type": "trustee.extractFromFiles", "x": 1300, "y": 200, "title": "Dokument extrahieren", "parameters": {}},
diff --git a/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py b/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py
index 0026fa23..e0b5cb43 100644
--- a/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py
+++ b/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py
@@ -3,7 +3,7 @@
"""ActionToolAdapter: wraps existing workflow actions (dynamicMode=True) as agent tools."""
import logging
-from typing import Dict, Any, List, Optional
+from typing import Dict, Any, List
from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
ToolDefinition, ToolResult
@@ -70,22 +70,28 @@ def _buildToolDefinition(compoundName: str, actionDef, actionInfo: Dict[str, Any
def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]:
- """Convert workflow action parameter schema to JSON Schema for tool definitions."""
- properties = {}
- required = []
+ """Convert workflow action parameter schema to JSON Schema for tool definitions.
+
+ Schicht-3 Adapter (typed): looks up each parameter's `type` against the
+ PORT_TYPE_CATALOG and produces a strict JSON Schema fragment.
+ Falls back to a generic string schema only when the type is fully unknown
+ (which should never happen after Phase 2's signature validator).
+ """
+ properties: Dict[str, Any] = {}
+ required: List[str] = []
for paramName, paramInfo in actionParams.items():
- paramType = paramInfo.get("type", "str") if isinstance(paramInfo, dict) else "str"
- paramDesc = paramInfo.get("description", "") if isinstance(paramInfo, dict) else ""
- paramRequired = paramInfo.get("required", False) if isinstance(paramInfo, dict) else False
+ if not isinstance(paramInfo, dict):
+ properties[paramName] = {"type": "string", "description": ""}
+ continue
- jsonType = _pythonTypeToJsonType(paramType)
- prop: Dict[str, Any] = {
- "type": jsonType,
- "description": paramDesc,
- }
- if jsonType == "array":
- prop["items"] = _pythonTypeToArrayItems(paramType) or {"type": "string"}
+ paramType = paramInfo.get("type", "str")
+ paramDesc = paramInfo.get("description", "") or ""
+ paramRequired = bool(paramInfo.get("required", False))
+
+ prop = _catalogTypeToJsonSchema(paramType)
+ if paramDesc:
+ prop["description"] = paramDesc
properties[paramName] = prop
if paramRequired:
@@ -94,41 +100,90 @@ def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]:
return {
"type": "object",
"properties": properties,
- "required": required
+ "required": required,
}
-_TYPE_MAPPING = {
+# Primitive Python type strings → JSON Schema scalar types.
+_PRIMITIVE_JSON_TYPE: Dict[str, str] = {
"str": "string",
"int": "integer",
"float": "number",
"bool": "boolean",
- "list": "array",
- "dict": "object",
- "List[str]": "array",
- "List[int]": "array",
- "List[dict]": "array",
- "List[float]": "array",
- "Dict[str, Any]": "object",
-}
-
-_ARRAY_ITEMS_MAPPING = {
- "list": {"type": "string"},
- "List[str]": {"type": "string"},
- "List[int]": {"type": "integer"},
- "List[float]": {"type": "number"},
- "List[dict]": {"type": "object"},
}
-def _pythonTypeToJsonType(pythonType: str) -> str:
- """Map Python type strings to JSON Schema types."""
- return _TYPE_MAPPING.get(pythonType, "string")
+def _catalogTypeToJsonSchema(typeStr: str, _depth: int = 0) -> Dict[str, Any]:
+ """Recursively convert a PORT_TYPE_CATALOG type reference into a JSON Schema fragment.
+ Supports:
+ - Primitives (str/int/bool/float/Any)
+ - Catalog object schemas (recursively expanded with properties/required)
+ - List[X] (array with typed items)
+ - Dict[K, V] (object with typed additionalProperties)
-def _pythonTypeToArrayItems(pythonType: str) -> Optional[Dict[str, Any]]:
- """Return the JSON Schema `items` descriptor for array types, or None."""
- return _ARRAY_ITEMS_MAPPING.get(pythonType)
+ `_depth` guards against pathological recursion in case of a cyclic catalog.
+ """
+ from modules.features.graphicalEditor.portTypes import (
+ PORT_TYPE_CATALOG,
+ PRIMITIVE_TYPES,
+ )
+
+ if _depth > 6:
+ return {"type": "object", "description": "(max-depth)"}
+
+ if not typeStr or not isinstance(typeStr, str):
+ return {"type": "string"}
+
+ typeStr = typeStr.strip()
+
+ if typeStr in _PRIMITIVE_JSON_TYPE:
+ return {"type": _PRIMITIVE_JSON_TYPE[typeStr]}
+ if typeStr == "Any":
+ return {}
+
+ if typeStr.startswith("List[") and typeStr.endswith("]"):
+ inner = typeStr[5:-1].strip()
+ return {"type": "array", "items": _catalogTypeToJsonSchema(inner, _depth + 1)}
+
+ if typeStr.startswith("Dict[") and typeStr.endswith("]"):
+ inner = typeStr[5:-1].strip()
+ valueType = "Any"
+ parts = [p.strip() for p in inner.split(",", 1)]
+ if len(parts) == 2:
+ valueType = parts[1]
+ return {
+ "type": "object",
+ "additionalProperties": _catalogTypeToJsonSchema(valueType, _depth + 1),
+ }
+
+ schema = PORT_TYPE_CATALOG.get(typeStr)
+ if schema is not None:
+ props: Dict[str, Any] = {}
+ required: List[str] = []
+ for f in schema.fields:
+ fragment = _catalogTypeToJsonSchema(f.type, _depth + 1)
+ if f.description:
+ fragment["description"] = f.description
+ if f.enumValues:
+ fragment["enum"] = list(f.enumValues)
+ props[f.name] = fragment
+ if f.required:
+ required.append(f.name)
+ out: Dict[str, Any] = {
+ "type": "object",
+ "properties": props,
+ "description": f"PORT_TYPE_CATALOG schema '{schema.name}'",
+ }
+ if required:
+ out["required"] = required
+ return out
+
+ # Lowercase 'list' / 'dict' aliases (legacy, should be eradicated by Phase 2 validator)
+ if typeStr in PRIMITIVE_TYPES and typeStr in {"List", "Dict"}:
+ return {"type": "array" if typeStr == "List" else "object"}
+
+ return {"type": "string", "description": f"unknown type '{typeStr}' (defaulted to string)"}
def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
diff --git a/modules/serviceCenter/services/serviceAgent/workflowTools.py b/modules/serviceCenter/services/serviceAgent/workflowTools.py
index 34ca5d46..7f01ee79 100644
--- a/modules/serviceCenter/services/serviceAgent/workflowTools.py
+++ b/modules/serviceCenter/services/serviceAgent/workflowTools.py
@@ -291,6 +291,85 @@ async def _setNodeParameter(params: Dict[str, Any], context: Any) -> ToolResult:
return _err(name, str(e))
+async def _list_upstream_paths(params: Dict[str, Any], context: Any) -> ToolResult:
+ """List pickable upstream DataRef paths for a node (saved workflow graph)."""
+ name = "listUpstreamPaths"
+ try:
+ workflow_id, instance_id = _resolveIds(params, context)
+ node_id = params.get("nodeId")
+ if not workflow_id or not instance_id or not node_id:
+ return _err(name, "workflowId, instanceId, and nodeId required")
+
+ iface = _getInterface(context, instance_id)
+ wf = iface.getWorkflow(workflow_id)
+ if not wf:
+ return _err(name, f"Workflow {workflow_id} not found")
+
+ graph = wf.get("graph", {}) or {}
+ from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths
+
+ paths = compute_upstream_paths(graph if isinstance(graph, dict) else {}, str(node_id))
+ return _ok(name, {"paths": paths})
+ except Exception as e:
+ logger.exception("listUpstreamPaths failed: %s", e)
+ return _err(name, str(e))
+
+
+async def _bind_node_parameter(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Bind a node parameter to an upstream field via an explicit DataRef."""
+ name = "bindNodeParameter"
+ try:
+ workflow_id, instance_id = _resolveIds(params, context)
+ node_id = params.get("nodeId")
+ param_name = params.get("parameterName")
+ producer_node_id = params.get("producerNodeId")
+ path = params.get("path")
+ if not workflow_id or not instance_id or not node_id or not param_name:
+ return _err(name, "workflowId, instanceId, nodeId, and parameterName required")
+ if not producer_node_id:
+ return _err(name, "producerNodeId required")
+
+ iface = _getInterface(context, instance_id)
+ wf = iface.getWorkflow(workflow_id)
+ if not wf:
+ return _err(name, f"Workflow {workflow_id} not found")
+
+ graph = dict(wf.get("graph", {}) or {})
+ nodes = list(graph.get("nodes", []) or [])
+ found = False
+ ref: Dict[str, Any] = {
+ "type": "ref",
+ "nodeId": str(producer_node_id),
+ "path": list(path) if isinstance(path, (list, tuple)) else [],
+ }
+ exp_type = params.get("expectedType")
+ if exp_type:
+ ref["expectedType"] = str(exp_type)
+
+ for n in nodes:
+ if n.get("id") == node_id:
+ node_params = dict(n.get("parameters", {}) or {})
+ node_params[param_name] = ref
+ n["parameters"] = node_params
+ found = True
+ break
+
+ if not found:
+ return _err(name, f"Node {node_id} not found in graph")
+
+ graph["nodes"] = nodes
+ iface.updateWorkflow(workflow_id, {"graph": graph})
+ return _ok(name, {
+ "nodeId": node_id,
+ "parameter": param_name,
+ "dataRef": ref,
+ "message": f"Parameter '{param_name}' bound to upstream {producer_node_id}",
+ })
+ except Exception as e:
+ logger.exception("bindNodeParameter failed: %s", e)
+ return _err(name, str(e))
+
+
def _coerceLabel(rawLabel: Any, fallback: str) -> str:
"""Normalize a node label which may be a string, dict {locale: str}, or other."""
if isinstance(rawLabel, str):
@@ -950,6 +1029,45 @@ def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
},
"toolSet": TOOLBOX_ID,
},
+ {
+ "name": "listUpstreamPaths",
+ "handler": _list_upstream_paths,
+ "description": (
+ "List pickable upstream paths for binding DataRefs on a node. "
+ "Call after readWorkflowGraph; use with bindNodeParameter instead of relying on implicit wiring."
+ ),
+ "parameters": {
+ "type": "object",
+ "properties": {
+ **_idFields,
+ "nodeId": {"type": "string", "description": "Target node id (the node whose parameters you will bind)"},
+ },
+ "required": ["nodeId"],
+ },
+ "readOnly": True,
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "bindNodeParameter",
+ "handler": _bind_node_parameter,
+ "description": (
+ "Bind a parameter to an upstream output using an explicit DataRef "
+ "(producerNodeId + path). Prefer listUpstreamPaths to discover valid paths."
+ ),
+ "parameters": {
+ "type": "object",
+ "properties": {
+ **_idFields,
+ "nodeId": {"type": "string"},
+ "parameterName": {"type": "string"},
+ "producerNodeId": {"type": "string", "description": "Upstream node id (port 0 producer)"},
+ "path": {"type": "array", "items": {}, "description": "JSON path segments, e.g. [\"documents\"] or [\"id\"]"},
+ "expectedType": {"type": "string", "description": "Optional type hint stored on the ref"},
+ },
+ "required": ["nodeId", "parameterName", "producerNodeId"],
+ },
+ "toolSet": TOOLBOX_ID,
+ },
{
"name": "listAvailableNodeTypes",
"handler": _listAvailableNodeTypes,
diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py
index cd7de3e9..6428bed3 100644
--- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py
+++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py
@@ -520,7 +520,28 @@ STOP-ERKENNUNG:
Wenn jemand dich bittet aufzuhoeren, still zu sein, zu stoppen, oder nicht mehr zu reden
(in JEDER Sprache, z.B. "{botFirstName} stop", "{botFirstName} sei still", "{botFirstName} halt", "{botFirstName} be quiet",
"{botFirstName} shut up", "{botFirstName} arrete", etc.), dann setze detectedIntent auf "stop" und
-shouldRespond auf false. Du musst NICHT antworten wenn jemand dich stoppt."""
+shouldRespond auf false. Du musst NICHT antworten wenn jemand dich stoppt.
+
+AGENT-ESKALATION (needsAgent):
+Du bist ein SCHNELLER Reflex-Pfad. Fuer komplexe Aufgaben gibt es einen vollwertigen Agent
+mit Web-Recherche, E-Mail-Versand, Dokumenten-Erzeugung und Datenquellen-Zugriff
+(SharePoint, Outlook, GoogleDrive etc. via User-Connections).
+
+Setze "needsAgent": true und "agentReason": ""
+WENN die Aufgabe eines oder mehrere dieser Merkmale hat:
+- Recherche im Internet noetig (z.B. "recherchier was im Internet ueber XY", "schau mal nach", "google das")
+- E-Mail an Teilnehmer/Kontakte versenden
+- Dokument (PDF, Word, Excel) generieren oder im SharePoint/Drive ablegen
+- Mehrere Schritte oder Tool-Aufrufe noetig (Zusammenfassung + Versand, Recherche + Empfehlung etc.)
+- Daten aus externen Quellen abrufen (Outlook-Kontakte, SharePoint-Dateien, Kalender etc.)
+
+Wenn needsAgent=true:
+- Setze shouldRespond=false (der Agent uebernimmt; du sprichst NICHT eigenstaendig).
+- responseText kann eine kurze Bestaetigung sein, wird aber nicht ausgesprochen.
+- agentReason ist die Aufgabenbeschreibung fuer den Agent (klar, in einer Zeile).
+
+Wenn die Aufgabe einfach ist (Definition, Wissensfrage aus eigenem Wissen, kurze Meinung,
+Wiedergabe von vorhandenem Kontext), erledige sie SELBST mit shouldRespond=true und needsAgent=false."""
# Append user-configured instructions if provided
if userSystemPrompt and userSystemPrompt.strip():
@@ -546,7 +567,9 @@ WICHTIG: Antworte IMMER als valides JSON in exakt diesem Format:
"responseChannels": optional - ["voice"], ["chat"] oder ["voice","chat"] je nach User-Anfrage,
"reasoning": "Kurze Begruendung deiner Entscheidung",
"detectedIntent": "addressed" | "question" | "proactive" | "stop" | "none",
- "commands": [] oder null
+ "commands": [] oder null,
+ "needsAgent": false (true nur bei komplexen Aufgaben gemaess Eskalations-Regeln),
+ "agentReason": null (oder kurze Aufgabenbeschreibung wenn needsAgent=true)
}}
detectedIntent-Werte:
diff --git a/modules/workflows/automation2/executionEngine.py b/modules/workflows/automation2/executionEngine.py
index 92615062..b5b5b754 100644
--- a/modules/workflows/automation2/executionEngine.py
+++ b/modules/workflows/automation2/executionEngine.py
@@ -77,7 +77,11 @@ def _outputSchemaForNode(nodeType: str) -> Optional[str]:
if isinstance(ports, dict):
p0 = ports.get(0) or ports.get("0")
if isinstance(p0, dict):
- return p0.get("schema")
+ spec = p0.get("schema")
+ if isinstance(spec, dict) and spec.get("kind") == "fromGraph":
+ return "FormPayload"
+ if isinstance(spec, str):
+ return spec
return None
@@ -329,6 +333,15 @@ async def executeGraph(
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(services)
+ from modules.workflows.automation2.pickNotPushMigration import materializeConnectionRefs
+ from modules.workflows.automation2.featureInstanceRefMigration import (
+ materializeFeatureInstanceRefs,
+ )
+
+ # Phase-5 Schicht-4: typed-ref envelopes are materialized FIRST so the
+ # subsequent connection-ref pass and validation see the canonical shape.
+ graph = materializeFeatureInstanceRefs(graph)
+ graph = materializeConnectionRefs(graph)
nodeTypeIds = _getNodeTypeIds(services)
logger.debug("executeGraph nodeTypeIds (%d): %s", len(nodeTypeIds), sorted(nodeTypeIds))
errors = validateGraph(graph, nodeTypeIds)
diff --git a/modules/workflows/automation2/executors/actionNodeExecutor.py b/modules/workflows/automation2/executors/actionNodeExecutor.py
index 31cfc39c..d9fc99a7 100644
--- a/modules/workflows/automation2/executors/actionNodeExecutor.py
+++ b/modules/workflows/automation2/executors/actionNodeExecutor.py
@@ -1,19 +1,17 @@
# Copyright (c) 2025 Patrick Motsch
# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions.
#
-# Typed Port System: no heuristic merging. Uses INPUT_EXTRACTORS for wire-handover,
-# DataRef for explicit parameter mapping, and _normalizeToSchema for output normalization.
+# Typed Port System: explicit DataRefs / static parameters only (no runtime wire-handover).
+# ``materializeConnectionRefs`` (see pickNotPushMigration) may still rewrite empty connectionReference at run start.
import json
import logging
import re
-from typing import Dict, Any, Optional
+from typing import Any, Dict, Optional
from modules.features.graphicalEditor.portTypes import (
- INPUT_EXTRACTORS,
- _normalizeToSchema,
_normalizeError,
- _unwrapTransit,
+ _normalizeToSchema,
)
from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionInactiveException as _SubscriptionInactiveException
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import BillingContextError as _BillingContextError
@@ -119,6 +117,63 @@ def _buildSearchQuery(
return " ".join(parts) if parts else "*"
+def _buildConnectionRefDict(connRef: str, chatService, services) -> Optional[Dict[str, Any]]:
+ """
+ Build {id, authority, label} for node outputs (no secrets).
+ connRef may be UUID or logical connection:authority:user.
+ """
+ if not connRef or not isinstance(connRef, str):
+ return None
+ original_ref = connRef.strip()
+ ref = original_ref
+ if _isUserConnectionId(ref):
+ resolved = _resolveConnectionIdToReference(chatService, ref, services)
+ if resolved:
+ ref = resolved
+ if not ref.startswith("connection:"):
+ return None
+ parts = ref.split(":", 2)
+ authority = parts[1] if len(parts) > 1 else ""
+ user = parts[2] if len(parts) > 2 else ""
+ label = ref
+ conn_id = ""
+ if chatService:
+ try:
+ for c in chatService.getUserConnections() or []:
+ conn = c if isinstance(c, dict) else (c.model_dump() if hasattr(c, "model_dump") else {})
+ aid = conn.get("authority", "")
+ if hasattr(aid, "value"):
+ aid = aid.value
+ un = conn.get("externalUsername", "") or conn.get("externalId", "") or ""
+ logical = f"connection:{aid}:{un}"
+ if logical == ref or str(conn.get("id")) == original_ref:
+ conn_id = str(conn.get("id", "") or "")
+ break
+ except Exception as e:
+ logger.debug("_buildConnectionRefDict: getUserConnections: %s", e)
+ return {"id": conn_id, "authority": authority, "label": label or f"{authority}:{user}"}
+
+
+def _attachConnectionProvenance(
+ out: Dict[str, Any],
+ resolvedParams: Dict[str, Any],
+ outputSchema: str,
+ chatService,
+ services,
+) -> None:
+ """Mutates out to include connection provenance for typed list/draft outputs."""
+ if out.get("connection"):
+ return
+ cref = resolvedParams.get("connectionReference")
+ if not cref:
+ return
+ if outputSchema not in ("FileList", "DocumentList", "EmailList", "TaskList", "EmailDraft", "UdmDocument"):
+ return
+ payload = _buildConnectionRefDict(str(cref), chatService, services)
+ if payload:
+ out["connection"] = payload
+
+
def _resolveConnectionParam(params: Dict, chatService, services) -> None:
"""Resolve connectionReference if it looks like a UUID (UserConnection.id)."""
connRef = params.get("connectionReference")
@@ -157,45 +212,6 @@ def _applyEmailSearchQuery(params: Dict) -> None:
params.pop(k, None)
-def _wireHandover(nodeDef: Dict, inputSources: Dict, nodeOutputs: Dict, params: Dict) -> None:
- """Apply wire-handover: extract fields from upstream using INPUT_EXTRACTORS."""
- if 0 not in inputSources:
- logger.debug("_wireHandover: no port 0 in inputSources=%s", inputSources)
- return
- srcId, _ = inputSources[0]
- upstream = nodeOutputs.get(srcId)
- if not upstream or not isinstance(upstream, dict):
- logger.debug("_wireHandover: upstream for %s is missing or not dict: %s", srcId, type(upstream))
- return
-
- data = _unwrapTransit(upstream)
- if not isinstance(data, dict):
- logger.debug("_wireHandover: unwrapped data is not dict: %s", type(data))
- return
-
- inputPorts = nodeDef.get("inputPorts", {})
- port0 = inputPorts.get(0, {})
- accepts = port0.get("accepts", [])
- logger.debug("_wireHandover: srcId=%s accepts=%s upstream_keys=%s params_keys_before=%s", srcId, accepts, list(data.keys()), list(params.keys()))
-
- for schemaName in accepts:
- if schemaName == "Transit":
- continue
- extractor = INPUT_EXTRACTORS.get(schemaName)
- if extractor:
- extracted = extractor(data)
- logger.debug("_wireHandover: extractor %s returned keys=%s", schemaName, list(extracted.keys()) if extracted else None)
- if extracted:
- for k, v in extracted.items():
- existing = params.get(k)
- if not existing:
- params[k] = v
- logger.debug("_wireHandover: set %s (was empty/missing) type=%s len=%s", k, type(v).__name__, len(v) if isinstance(v, (list, str, dict)) else "n/a")
- else:
- logger.debug("_wireHandover: skip %s (already has value, type=%s)", k, type(existing).__name__)
- return
-
-
def _getOutputSchemaName(nodeDef: Dict) -> str:
"""Get the output schema name from the node definition."""
outputPorts = nodeDef.get("outputPorts", {})
@@ -238,22 +254,17 @@ class ActionNodeExecutor:
resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {}))
logger.debug("ActionNodeExecutor node %s resolved params keys=%s documentList_present=%s documentList_type=%s", nodeId, list(resolvedParams.keys()), "documentList" in resolvedParams, type(resolvedParams.get("documentList")).__name__)
- # 2. Wire-handover via extractors (fills missing params from upstream)
- inputSources = context.get("inputSources", {}).get(nodeId, {})
- _wireHandover(nodeDef, inputSources, context.get("nodeOutputs", {}), resolvedParams)
- logger.debug("ActionNodeExecutor node %s after wireHandover: params keys=%s documentList_present=%s documentList_type=%s", nodeId, list(resolvedParams.keys()), "documentList" in resolvedParams, type(resolvedParams.get("documentList")).__name__)
-
- # 3. Apply defaults from parameter definitions
+ # 2. Apply defaults from parameter definitions
for pDef in nodeDef.get("parameters", []):
pName = pDef.get("name")
if pName and pName not in resolvedParams and "default" in pDef:
resolvedParams[pName] = pDef["default"]
- # 4. Resolve connectionReference
+ # 3. Resolve connectionReference
chatService = getattr(self.services, "chat", None)
_resolveConnectionParam(resolvedParams, chatService, self.services)
- # 5. Node-type-specific param transformations
+ # 4. Node-type-specific param transformations
if nodeType == "email.checkEmail":
_applyEmailCheckFilter(resolvedParams)
elif nodeType == "email.searchEmail":
@@ -262,7 +273,7 @@ class ActionNodeExecutor:
from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries
merge_clickup_task_update_entries(resolvedParams)
- # 6. email.checkEmail pause for email wait
+ # 5. email.checkEmail pause for email wait
if nodeType == "email.checkEmail":
runId = context.get("_runId")
workflowId = context.get("workflowId")
@@ -277,12 +288,12 @@ class ActionNodeExecutor:
}
raise PauseForEmailWaitError(runId=runId, nodeId=nodeId, waitConfig=waitConfig)
- # 7. AI nodes: normalize legacy "prompt" -> "aiPrompt"
+ # 6. AI nodes: normalize legacy "prompt" -> "aiPrompt"
if nodeType == "ai.prompt":
if "aiPrompt" not in resolvedParams and "prompt" in resolvedParams:
resolvedParams["aiPrompt"] = resolvedParams.pop("prompt")
- # 8. Build context for email.draftEmail from subject + body
+ # 7. Build context for email.draftEmail from subject + body
if nodeType == "email.draftEmail":
subject = resolvedParams.get("subject", "")
body = resolvedParams.get("body", "")
@@ -296,7 +307,7 @@ class ActionNodeExecutor:
resolvedParams.pop("subject", None)
resolvedParams.pop("body", None)
- # 9. Execute action
+ # 8. Execute action
logger.info("ActionNodeExecutor node %s calling %s.%s with %d params", nodeId, methodName, actionName, len(resolvedParams))
try:
executor = ActionExecutor(self.services)
@@ -307,7 +318,7 @@ class ActionNodeExecutor:
logger.exception("ActionNodeExecutor node %s FAILED: %s", nodeId, e)
return _normalizeError(e, outputSchema)
- # 10. Persist generated documents as files and build JSON-safe output
+ # 9. Persist generated documents as files and build JSON-safe output
docsList = []
for d in (result.documents or []):
dumped = d.model_dump() if hasattr(d, "model_dump") else dict(d) if isinstance(d, dict) else d
@@ -360,7 +371,6 @@ class ActionNodeExecutor:
"success": result.success,
"error": result.error,
"documents": docsList,
- "documentList": docsList,
"data": dataField,
}
@@ -396,6 +406,8 @@ class ActionNodeExecutor:
"mode": data_dict.get("mode", resolvedParams.get("mode", "summarize")),
"count": int(data_dict.get("count", 0)),
}
+ _attachConnectionProvenance(cr_out, resolvedParams, outputSchema, chatService, self.services)
return _normalizeToSchema(cr_out, outputSchema)
+ _attachConnectionProvenance(out, resolvedParams, outputSchema, chatService, self.services)
return _normalizeToSchema(out, outputSchema)
diff --git a/modules/workflows/automation2/executors/dataExecutor.py b/modules/workflows/automation2/executors/dataExecutor.py
index 26334dd0..5a33f9e2 100644
--- a/modules/workflows/automation2/executors/dataExecutor.py
+++ b/modules/workflows/automation2/executors/dataExecutor.py
@@ -1,5 +1,5 @@
# Copyright (c) 2025 Patrick Motsch
-# Data manipulation node executor: data.aggregate, data.transform, data.filter.
+# Data manipulation node executor: data.aggregate, data.filter, data.consolidate.
import logging
from typing import Any, Dict
@@ -10,7 +10,7 @@ logger = logging.getLogger(__name__)
class DataExecutor:
- """Execute data.aggregate, data.transform, data.filter nodes."""
+ """Execute data.aggregate, data.filter, data.consolidate nodes."""
async def execute(
self,
@@ -26,8 +26,6 @@ class DataExecutor:
if nodeType == "data.aggregate":
return await self._aggregate(node, nodeOutputs, nodeId, inputSources, context)
- if nodeType == "data.transform":
- return await self._transform(node, nodeOutputs, nodeId, inputSources)
if nodeType == "data.filter":
return await self._filter(node, nodeOutputs, nodeId, inputSources)
if nodeType == "data.consolidate":
@@ -70,41 +68,6 @@ class DataExecutor:
return {"items": items, "count": len(items), "_success": True}
- async def _transform(
- self,
- node: Dict,
- nodeOutputs: Dict,
- nodeId: str,
- inputSources: Dict,
- ) -> Any:
- """Apply mappings to restructure data."""
- from modules.workflows.automation2.graphUtils import resolveParameterReferences
-
- inp = self._getInput(inputSources, nodeOutputs)
- data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
- mappings = (node.get("parameters") or {}).get("mappings", [])
-
- result = {}
- for mapping in mappings:
- if not isinstance(mapping, dict):
- continue
- outputField = mapping.get("outputField")
- if not outputField:
- continue
- source = mapping.get("source")
- if source and isinstance(source, dict) and source.get("type") == "ref":
- resolved = resolveParameterReferences(source, nodeOutputs)
- result[outputField] = resolved
- elif source and isinstance(source, dict) and source.get("type") == "value":
- result[outputField] = source.get("value")
- elif isinstance(data, dict) and mapping.get("sourceField"):
- result[outputField] = data.get(mapping["sourceField"])
- else:
- result[outputField] = source
-
- result["_success"] = True
- return result
-
async def _filter(
self,
node: Dict,
diff --git a/modules/workflows/automation2/featureInstanceRefMigration.py b/modules/workflows/automation2/featureInstanceRefMigration.py
new file mode 100644
index 00000000..b4fba529
--- /dev/null
+++ b/modules/workflows/automation2/featureInstanceRefMigration.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2025 Patrick Motsch
+"""
+Phase-5 Schicht-4 migration: convert raw ``featureInstanceId: ""`` workflow
+parameters into typed ``FeatureInstanceRef`` envelopes on disk.
+
+Why
+---
+The Typed Action Architecture (see
+``wiki/c-work/1-plan/2026-04-typed-action-architecture.md``) declares
+``featureInstanceId`` as ``FeatureInstanceRef`` (a catalog-typed reference with
+a ``featureCode`` discriminator). Older workflows persist this parameter as a
+plain UUID string, which carries no type information and forces every action /
+adapter to re-derive the feature code from the node type.
+
+What this module does
+---------------------
+``materializeFeatureInstanceRefs(graph)`` walks every node, and whenever a
+node parameter named ``featureInstanceId`` is a non-empty string (raw UUID),
+it rewrites the value to a typed envelope::
+
+ {"$type": "FeatureInstanceRef",
+ "id": "",
+ "featureCode": ""}
+
+The runtime resolver (``graphUtils._unwrapTypedRef``) automatically unwraps
+that envelope back to the canonical primitive (``id``) when feeding action
+implementations, so legacy action code keeps working unchanged.
+
+Idempotent
+----------
+Already-migrated values (already-envelope dicts, empty strings, ``None``) are
+left untouched. Running the migration twice is a no-op.
+
+Out of scope
+------------
+The runtime helper ``pickNotPushMigration.materializeConnectionRefs`` solves a
+related but different problem (resolving empty ``connectionReference`` to
+upstream DataRefs at run-start). Both helpers compose: the typical
+``executeGraph`` pipeline is
+
+ raw graph
+ -> materializeFeatureInstanceRefs (this module, on save / on load)
+ -> materializeConnectionRefs (pickNotPushMigration, at run-start)
+ -> ActionNodeExecutor / ActionExecutor
+"""
+from __future__ import annotations
+
+import copy
+import logging
+from typing import Any, Dict, Optional
+
+logger = logging.getLogger(__name__)
+
+
+# Single source of truth for node-type → feature code mapping. Keep in sync
+# with the method registry; values must be the same string the FeatureInstance
+# row uses for its ``featureCode`` column.
+_NODE_TYPE_PREFIX_TO_FEATURE_CODE: Dict[str, str] = {
+ "trustee": "trustee",
+ "redmine": "redmine",
+ "clickup": "clickup",
+ "sharepoint": "sharepoint",
+ "outlook": "outlook",
+ "email": "outlook",
+ "teamsbot": "teamsbot",
+ "ai": "ai",
+}
+
+
+def _deriveFeatureCode(nodeType: str) -> Optional[str]:
+ """Best-effort feature-code derivation from a node type id.
+
+ Returns ``None`` if the prefix is not in the registry — the migration then
+ omits ``featureCode`` from the envelope rather than guessing wrongly.
+ """
+ if not nodeType or not isinstance(nodeType, str):
+ return None
+ prefix = nodeType.split(".", 1)[0].strip().lower()
+ return _NODE_TYPE_PREFIX_TO_FEATURE_CODE.get(prefix)
+
+
+def _isAlreadyTypedEnvelope(value: Any) -> bool:
+ return (
+ isinstance(value, dict)
+ and value.get("$type") == "FeatureInstanceRef"
+ and isinstance(value.get("id"), str)
+ )
+
+
+def _isMigratableUuidValue(value: Any) -> bool:
+ """A bare non-empty string is treated as a UUID candidate worth migrating.
+
+ We deliberately do NOT enforce a strict UUID regex — historically
+ workflows have been seen with non-UUID instance ids (e.g. demo seeds).
+ The migration converts whatever string is there; downstream code already
+ treats the value as opaque.
+ """
+ return isinstance(value, str) and value.strip() != ""
+
+
+def _buildEnvelope(uuidValue: str, nodeType: str) -> Dict[str, Any]:
+ envelope: Dict[str, Any] = {
+ "$type": "FeatureInstanceRef",
+ "id": uuidValue.strip(),
+ }
+ code = _deriveFeatureCode(nodeType)
+ if code:
+ envelope["featureCode"] = code
+ return envelope
+
+
+def materializeFeatureInstanceRefs(graph: Dict[str, Any]) -> Dict[str, Any]:
+ """Return a deep-copied graph with raw ``featureInstanceId`` strings rewritten
+ to typed ``FeatureInstanceRef`` envelopes.
+
+ The function never mutates its input. It is safe to call repeatedly
+ (idempotent) and on partial graphs (missing nodes, missing parameters).
+ """
+ if not isinstance(graph, dict):
+ return graph
+
+ out = copy.deepcopy(graph)
+ nodes = out.get("nodes")
+ if not isinstance(nodes, list):
+ return out
+
+ migratedCount = 0
+ for node in nodes:
+ if not isinstance(node, dict):
+ continue
+ params = node.get("parameters")
+ if not isinstance(params, dict):
+ continue
+ current = params.get("featureInstanceId")
+ if current is None:
+ continue
+ if _isAlreadyTypedEnvelope(current):
+ continue
+ if not _isMigratableUuidValue(current):
+ continue
+ envelope = _buildEnvelope(current, node.get("type") or "")
+ params["featureInstanceId"] = envelope
+ migratedCount += 1
+ logger.debug(
+ "materializeFeatureInstanceRefs: node %s (%s) -> envelope %r",
+ node.get("id"),
+ node.get("type"),
+ envelope,
+ )
+
+ if migratedCount:
+ logger.info(
+ "materializeFeatureInstanceRefs: migrated %d featureInstanceId value(s)",
+ migratedCount,
+ )
+ return out
+
+
+__all__ = ["materializeFeatureInstanceRefs"]
diff --git a/modules/workflows/automation2/graphUtils.py b/modules/workflows/automation2/graphUtils.py
index 1cd2dc3e..1f01a57d 100644
--- a/modules/workflows/automation2/graphUtils.py
+++ b/modules/workflows/automation2/graphUtils.py
@@ -2,7 +2,7 @@
# Graph parsing, validation, and topological sort for automation2.
import logging
-from typing import Dict, List, Any, Tuple, Set
+from typing import Dict, List, Any, Tuple, Set, Optional
logger = logging.getLogger(__name__)
@@ -113,10 +113,11 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
if nid not in nodeIds:
errors.append(f"Connection references non-existent node {nid}")
- # Soft port compatibility check (warnings, not errors)
- warnings = _checkPortCompatibility(nodes, connMap)
- if warnings:
- logger.info("validateGraph port warnings: %s", warnings)
+ # Port compatibility: hard-fail (Pick-not-Push typed graph)
+ port_errors = _checkPortCompatibility(nodes, connMap)
+ if port_errors:
+ logger.warning("validateGraph port mismatches: %s", port_errors)
+ errors.extend(port_errors)
if errors:
logger.debug("validateGraph errors: %s", errors)
@@ -125,19 +126,35 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
return errors
+def parse_graph_defined_schema(node: Dict[str, Any], parameter_key: str) -> Optional[Dict[str, Any]]:
+ """
+ Build a JSON-serializable port schema dict from graph parameters (e.g. form ``fields``).
+ Used by tooling and future API surfaces; mirrors ``parse_graph_defined_output_schema`` logic.
+ """
+ from modules.features.graphicalEditor.portTypes import _derive_form_payload_schema_from_param
+
+ sch = _derive_form_payload_schema_from_param(node, parameter_key)
+ if sch is None:
+ return None
+ return {
+ "name": sch.name,
+ "fields": [f.model_dump() for f in sch.fields],
+ }
+
+
def _checkPortCompatibility(
nodes: List[Dict],
connMap: Dict[str, List[Tuple[str, int, int]]],
) -> List[str]:
"""
- Soft check: warn if connected port types are incompatible.
- Returns warnings (never blocks execution).
+ Hard typed-port check: incompatible connections become validation errors.
"""
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+ from modules.features.graphicalEditor.portTypes import resolve_output_schema_name
nodeDefMap = {n["id"]: n for n in STATIC_NODE_TYPES}
nodeById = {n["id"]: n for n in nodes if n.get("id")}
- warnings = []
+ warnings: List[str] = []
for tgt, pairs in connMap.items():
tgtNode = nodeById.get(tgt)
@@ -156,20 +173,28 @@ def _checkPortCompatibility(
if not srcDef:
continue
srcOutputPorts = srcDef.get("outputPorts", {})
- srcPort = srcOutputPorts.get(srcOut, {})
- tgtPort = tgtInputPorts.get(tgtIn, {})
+ srcPort = srcOutputPorts.get(srcOut, {}) or {}
+ tgtPort = tgtInputPorts.get(tgtIn, {}) or {}
- srcSchema = srcPort.get("schema", "")
+ if not isinstance(srcPort, dict):
+ continue
+ src_schema = resolve_output_schema_name(srcNode, srcPort)
accepts = tgtPort.get("accepts", [])
- if not accepts or not srcSchema:
+ if not accepts or not src_schema:
continue
- if "Transit" in accepts:
+ if src_schema in accepts:
continue
- if srcSchema not in accepts:
- warnings.append(
- f"Port mismatch: {src}[out:{srcOut}] ({srcSchema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})"
- )
+ # Port that only declares Transit behaves as an untyped sink (legacy graphs).
+ if len(accepts) == 1 and accepts[0] == "Transit":
+ continue
+ if src_schema == "FormPayload_dynamic" and "FormPayload" in accepts:
+ continue
+ if src_schema.startswith("FormPayload") and "FormPayload" in accepts:
+ continue
+ warnings.append(
+ f"Port mismatch: {src}[out:{srcOut}] ({src_schema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})"
+ )
return warnings
@@ -217,12 +242,35 @@ def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, in
return order
+_WILDCARD_SEGMENT = "*"
+
+
def _get_by_path(data: Any, path: List[Any]) -> Any:
- """Traverse data by path (strings and ints); return None if not found."""
+ """Traverse data by path (strings and ints); return None if not found.
+
+ Supports the iteration wildcard ``"*"`` as a path segment: when applied
+ to a list, the remainder of the path is mapped over each element and the
+ results are returned as a list (drops elements that resolve to ``None``).
+ This is the "typed Bindings-Resolver" iteration primitive defined for
+ Schicht 4 of the Typed Action Architecture.
+ """
current = data
- for seg in path:
+ for i, seg in enumerate(path):
if current is None:
return None
+ if isinstance(seg, str) and seg == _WILDCARD_SEGMENT:
+ if not isinstance(current, (list, tuple)):
+ return None
+ tail = list(path[i + 1 :])
+ if not tail:
+ return list(current)
+ mapped: List[Any] = []
+ for item in current:
+ resolved = _get_by_path(item, tail)
+ if resolved is None:
+ continue
+ mapped.append(resolved)
+ return mapped
if isinstance(current, dict) and isinstance(seg, str) and seg in current:
current = current[seg]
elif isinstance(current, (list, tuple)) and isinstance(seg, (int, str)):
@@ -236,6 +284,52 @@ def _get_by_path(data: Any, path: List[Any]) -> Any:
return current
+def _pathContainsWildcard(path: List[Any]) -> bool:
+ """True if any segment is the iteration wildcard ``"*"``."""
+ return any(isinstance(seg, str) and seg == _WILDCARD_SEGMENT for seg in path)
+
+
+# ---------------------------------------------------------------------------
+# Phase-5 Schicht-4 — Typed-Ref envelope unwrap
+# ---------------------------------------------------------------------------
+#
+# Workflow params can carry a typed-ref envelope like
+# ``{"$type": "FeatureInstanceRef", "id": "", "featureCode": "trustee"}``.
+# Action implementations historically receive the canonical primitive (the
+# referenced ``id``) as a string. ``_unwrapTypedRef`` extracts that primitive
+# without losing the typed envelope shape on disk — the migration script
+# (``featureInstanceRefMigration.materializeFeatureInstanceRefs``) writes the
+# envelope, the resolver unwraps it on its way to the action.
+
+_TYPED_REF_PRIMARY_FIELD = {
+ "FeatureInstanceRef": "id",
+ "ConnectionRef": "id",
+ "PromptTemplateRef": "id",
+ "ClickUpListRef": "listId",
+ "SharePointFileRef": "filePath",
+ "SharePointFolderRef": "folderPath",
+}
+
+
+def _isTypedRefEnvelope(value: Any) -> bool:
+ """True if ``value`` looks like a typed-ref envelope ({\"$type\": \"\", ...})."""
+ if not isinstance(value, dict):
+ return False
+ typeName = value.get("$type")
+ return isinstance(typeName, str) and typeName in _TYPED_REF_PRIMARY_FIELD
+
+
+def _unwrapTypedRef(value: Any) -> Any:
+ """If ``value`` is a typed-ref envelope, return its primary primitive.
+
+ Falls back to the original value for unknown / non-envelope inputs.
+ """
+ if not _isTypedRefEnvelope(value):
+ return value
+ primary = _TYPED_REF_PRIMARY_FIELD[value["$type"]]
+ return value.get(primary, value)
+
+
def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
"""
Resolve parameter references:
@@ -247,6 +341,11 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
import re
if isinstance(value, dict):
+ # Phase-5 Schicht-4: typed-ref envelopes (FeatureInstanceRef etc.) on
+ # disk get unwrapped to their canonical primitive (e.g. ``id``) so
+ # legacy action signatures keep working. See ``_unwrapTypedRef``.
+ if _isTypedRefEnvelope(value):
+ return _unwrapTypedRef(value)
if value.get("type") == "ref":
node_id = value.get("nodeId")
path = value.get("path")
diff --git a/modules/workflows/automation2/pickNotPushMigration.py b/modules/workflows/automation2/pickNotPushMigration.py
new file mode 100644
index 00000000..fe347761
--- /dev/null
+++ b/modules/workflows/automation2/pickNotPushMigration.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2025 Patrick Motsch
+"""
+Graph helpers for Pick-not-Push: materialize connectionReference as explicit DataRefs.
+
+Runtime: executeGraph deep-copies the version graph and applies materialize_connection_refs
+so downstream nodes resolve connection UUIDs from upstream output.connection.id.
+"""
+from __future__ import annotations
+
+import copy
+import logging
+from typing import Any, Dict, List
+
+from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+from modules.features.graphicalEditor.portTypes import resolve_output_schema_name
+from modules.workflows.automation2.graphUtils import buildConnectionMap, getInputSources
+
+logger = logging.getLogger(__name__)
+
+_NODE_DEF_BY_ID = {n["id"]: n for n in STATIC_NODE_TYPES}
+
+_SCHEMAS_WITH_CONNECTION = frozenset(
+ {"FileList", "DocumentList", "EmailList", "TaskList", "EmailDraft", "UdmDocument"},
+)
+
+
+def _data_ref(node_id: str, path: List[Any]) -> Dict[str, Any]:
+ return {"type": "ref", "nodeId": node_id, "path": list(path)}
+
+
+def materializeConnectionRefs(graph: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Deep-copy graph and set empty connectionReference (userConnection params) to
+ DataRef { nodeId: upstreamPort0, path: ['connection','id'] } when upstream
+ output schema carries connection provenance.
+ """
+ g = copy.deepcopy(graph)
+ nodes: List[Dict[str, Any]] = g.get("nodes") or []
+ connections = g.get("connections") or []
+ if not nodes:
+ return g
+
+ conn_map = buildConnectionMap(connections)
+ node_by_id = {n["id"]: n for n in nodes if n.get("id")}
+
+ for node in nodes:
+ nid = node.get("id")
+ ntype = node.get("type")
+ if not nid or not ntype:
+ continue
+ node_def = _NODE_DEF_BY_ID.get(ntype)
+ if not node_def:
+ continue
+ pdefs = node_def.get("parameters") or []
+ has_conn = any(
+ p.get("name") == "connectionReference" and p.get("frontendType") == "userConnection"
+ for p in pdefs
+ )
+ if not has_conn:
+ continue
+ params = node.get("parameters")
+ if not isinstance(params, dict):
+ node["parameters"] = {}
+ params = node["parameters"]
+ cur = params.get("connectionReference")
+ if cur not in (None, "", {}):
+ continue
+ input_sources = getInputSources(nid, conn_map)
+ if 0 not in input_sources:
+ continue
+ src_id, _ = input_sources[0]
+ src_node = node_by_id.get(src_id) or {}
+ src_def = _NODE_DEF_BY_ID.get(src_node.get("type") or "")
+ if not src_def:
+ continue
+ out_port = (src_def.get("outputPorts") or {}).get(0, {}) or {}
+ out_schema = resolve_output_schema_name(src_node, out_port if isinstance(out_port, dict) else {})
+ if out_schema not in _SCHEMAS_WITH_CONNECTION:
+ continue
+ params["connectionReference"] = _data_ref(src_id, ["connection", "id"])
+ logger.debug("materializeConnectionRefs: %s.connectionReference -> ref %s.connection.id", nid, src_id)
+
+ return g
diff --git a/modules/workflows/automation2/udmUpstreamShapes.py b/modules/workflows/automation2/udmUpstreamShapes.py
new file mode 100644
index 00000000..33dea176
--- /dev/null
+++ b/modules/workflows/automation2/udmUpstreamShapes.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2025 Patrick Motsch
+"""
+Pure shape coercion for UDM-related upstream payloads (tests + optional tooling).
+
+No runtime wire-handover — kept only so unit tests can assert stable normalisation rules.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict
+
+
+def _coerceUdmDocumentInput(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ if upstream.get("children") is not None and upstream.get("sourceType"):
+ return upstream
+ udm = upstream.get("udm")
+ if isinstance(udm, dict) and udm.get("children") is not None:
+ return udm
+ return {}
+
+
+def _coerceUdmNodeListInput(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ nodes = upstream.get("nodes")
+ if isinstance(nodes, list):
+ return {"nodes": nodes, "count": len(nodes)}
+ children = upstream.get("children")
+ if isinstance(children, list):
+ return {"nodes": children, "count": len(children)}
+ return {}
+
+
+def _coerceConsolidateResultInput(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ result: Dict[str, Any] = {}
+ for key in ("result", "mode", "count"):
+ if key in upstream:
+ result[key] = upstream[key]
+ return result
diff --git a/modules/workflows/methods/_actionSignatureValidator.py b/modules/workflows/methods/_actionSignatureValidator.py
new file mode 100644
index 00000000..942ccb8a
--- /dev/null
+++ b/modules/workflows/methods/_actionSignatureValidator.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Action signature validator for the Typed Action Architecture (Phase 2).
+
+Verifies that every WorkflowActionDefinition exposed by a Method:
+ 1. Declares a parameter `type` that is either a primitive or a known
+ PORT_TYPE_CATALOG schema name.
+ 2. Declares an `outputType` that exists in PORT_TYPE_CATALOG.
+ 3. Declares container types (`List[X]`, `Dict[K,V]`) whose element types
+ are also primitives or catalog schemas.
+
+Used at startup (and in CI tests) to prevent silent drift between
+backend method signatures and the type catalog.
+
+Plan: wiki/c-work/1-plan/2026-04-typed-action-architecture.md
+"""
+
+from __future__ import annotations
+
+import logging
+from typing import Dict, Iterable, List, Optional
+
+from modules.datamodels.datamodelWorkflowActions import (
+ WorkflowActionDefinition,
+ WorkflowActionParameter,
+)
+from modules.features.graphicalEditor.portTypes import (
+ PORT_TYPE_CATALOG,
+ PRIMITIVE_TYPES,
+ _stripContainer,
+)
+
+logger = logging.getLogger(__name__)
+
+
+# Catalog types that are explicitly allowed as fire-and-forget outputs
+# (no typed payload expected by downstream nodes).
+_ALLOWED_GENERIC_OUTPUTS = frozenset({"ActionResult", "Transit"})
+
+
+def _isKnownType(typeName: str) -> bool:
+ """Primitive or catalog-resolvable type name."""
+ return typeName in PRIMITIVE_TYPES or typeName in PORT_TYPE_CATALOG
+
+
+def _validateTypeRef(typeStr: str) -> List[str]:
+ """
+ Validate a single type reference string (the value of `type` on a
+ WorkflowActionParameter or `outputType` on a WorkflowActionDefinition).
+
+ Returns a list of human-readable error fragments (empty if OK).
+ """
+ if not typeStr or not isinstance(typeStr, str):
+ return ["empty/non-string type"]
+
+ # Backwards-compatible aliases (lowercase Python builtins)
+ if typeStr in {"list", "dict"}:
+ return [
+ f"'{typeStr}' is too generic — use 'List[X]' / 'Dict[K,V]' or a "
+ f"catalog schema name"
+ ]
+
+ parts = _stripContainer(typeStr)
+ if not parts:
+ return [f"could not parse type '{typeStr}'"]
+
+ errors: List[str] = []
+ for part in parts:
+ if not _isKnownType(part):
+ errors.append(
+ f"unknown type '{part}' (not a primitive and not in catalog)"
+ )
+ return errors
+
+
+def _validateActionParameter(
+ actionId: str,
+ paramName: str,
+ param: WorkflowActionParameter,
+) -> List[str]:
+ """Validate a single parameter; returns prefixed error messages."""
+ out: List[str] = []
+ for err in _validateTypeRef(param.type):
+ out.append(f"{actionId}.{paramName}: {err}")
+ return out
+
+
+def _validateActionDefinition(
+ actionDef: WorkflowActionDefinition,
+) -> List[str]:
+ """Validate parameters + outputType of one action."""
+ errors: List[str] = []
+ actionId = actionDef.actionId or ""
+
+ for paramName, param in (actionDef.parameters or {}).items():
+ errors.extend(_validateActionParameter(actionId, paramName, param))
+
+ outputType = actionDef.outputType
+ if outputType not in _ALLOWED_GENERIC_OUTPUTS:
+ for err in _validateTypeRef(outputType):
+ errors.append(f"{actionId}.: {err}")
+
+ return errors
+
+
+def _validateActionsDict(
+ methodName: str,
+ actions: Dict[str, WorkflowActionDefinition],
+) -> List[str]:
+ """Validate every action in a Method's _actions dict."""
+ errors: List[str] = []
+ if not actions:
+ return errors
+ for localName, actionDef in actions.items():
+ if not isinstance(actionDef, WorkflowActionDefinition):
+ errors.append(
+ f"{methodName}.{localName}: not a WorkflowActionDefinition instance"
+ )
+ continue
+ errors.extend(_validateActionDefinition(actionDef))
+ return errors
+
+
+# ---------------------------------------------------------------------------
+# Public entry points
+# ---------------------------------------------------------------------------
+
+def _validateMethods(methodInstances: Iterable) -> List[str]:
+ """
+ Validate a sequence of Method instances.
+
+ Each instance is expected to expose `_actions` (Dict[str, WorkflowActionDefinition]).
+ """
+ errors: List[str] = []
+ for method in methodInstances:
+ methodName = getattr(method, "name", method.__class__.__name__)
+ actions = getattr(method, "_actions", None) or {}
+ errors.extend(_validateActionsDict(methodName, actions))
+ return errors
+
+
+def _formatValidationReport(errors: List[str]) -> str:
+ """Build a multi-line human-readable error report."""
+ if not errors:
+ return "Action signatures are healthy."
+ lines = [f"Found {len(errors)} action-signature drift(s):"]
+ lines.extend(f" - {e}" for e in errors)
+ return "\n".join(lines)
+
+
+def _logValidationReport(errors: List[str], strict: bool = False) -> None:
+ """
+ Log validation results.
+
+ If `strict=True`, raises RuntimeError on any error (use in tests / CI).
+ Otherwise emits warnings (use at startup so the app keeps running but
+ operators see the drift in the log).
+ """
+ report = _formatValidationReport(errors)
+ if errors:
+ if strict:
+ raise RuntimeError(report)
+ logger.warning(report)
+ else:
+ logger.info(report)
+
+
+__all__ = [
+ "_validateMethods",
+ "_validateActionsDict",
+ "_validateActionDefinition",
+ "_validateActionParameter",
+ "_validateTypeRef",
+ "_formatValidationReport",
+ "_logValidationReport",
+]
diff --git a/modules/workflows/methods/methodAi/methodAi.py b/modules/workflows/methods/methodAi/methodAi.py
index eac1babe..5265f5c9 100644
--- a/modules/workflows/methods/methodAi/methodAi.py
+++ b/modules/workflows/methods/methodAi/methodAi.py
@@ -39,17 +39,19 @@ class MethodAi(MethodBase):
actionId="ai.process",
description="Universal AI document processing action - accepts multiple input documents in any format and processes them together with a prompt. If the prompt specifies document formats to deliver, include them in the prompt",
dynamicMode=True,
+ outputType="AiResult",
parameters={
"aiPrompt": WorkflowActionParameter(
name="aiPrompt",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=True,
description="Instruction for the AI describing what processing to perform"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Document reference(s) in any format to use as input/context"
@@ -82,7 +84,7 @@ class MethodAi(MethodBase):
),
"contentParts": WorkflowActionParameter(
name="contentParts",
- type="List[ContentPart]",
+ type="List[Any]",
frontendType=FrontendType.HIDDEN,
required=False,
description="Pre-extracted content parts (internal parameter, typically passed between actions). If provided, these will be used instead of extracting from documentList. Can be a list of ContentPart objects or an object with a 'parts' attribute."
@@ -94,10 +96,12 @@ class MethodAi(MethodBase):
actionId="ai.webResearch",
description="Web research with two-step process: search for URLs, then crawl content",
dynamicMode=True,
+ outputType="AiResult",
parameters={
"prompt": WorkflowActionParameter(
name="prompt",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=True,
description="Natural language research instruction"
@@ -140,10 +144,11 @@ class MethodAi(MethodBase):
actionId="ai.summarizeDocument",
description="Summarize one or more documents, extracting key points and main ideas. If the prompt specifies document formats to deliver, include them in the prompt",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference(s) to summarize"
@@ -180,10 +185,11 @@ class MethodAi(MethodBase):
actionId="ai.translateDocument",
description="Translate documents to a target language while preserving formatting and structure",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference(s) to translate"
@@ -224,10 +230,11 @@ class MethodAi(MethodBase):
actionId="ai.convertDocument",
description="Convert documents between different formats (PDF→Word, Excel→CSV, etc.)",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference(s) to convert"
@@ -255,17 +262,19 @@ class MethodAi(MethodBase):
actionId="ai.generateDocument",
description="Generate documents from scratch or based on templates/inputs. If the prompt specifies document formats to deliver, include them in the prompt",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"prompt": WorkflowActionParameter(
name="prompt",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=True,
description="Description of the document to generate"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Template documents or reference documents to use as a guide"
@@ -293,17 +302,19 @@ class MethodAi(MethodBase):
actionId="ai.generateCode",
description="Generate one or multiple code files in a single action - explicitly sets intent to 'code'. This action can generate multiple files (e.g., config.json, customers.json, settings.json) when the prompt requests multiple files. If the prompt specifies file formats to deliver, include them in the prompt. IMPORTANT: When the user requests multiple files (e.g., 'generate 3 JSON files'), use a SINGLE ai.generateCode action with a prompt that describes ALL requested files, rather than splitting into multiple actions.",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"prompt": WorkflowActionParameter(
name="prompt",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=True,
description="Description of code to generate. If multiple files are requested, describe ALL files in this single prompt (e.g., 'Generate 3 JSON files: 1) config.json with..., 2) customers.json with..., 3) settings.json with...')."
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Reference documents"
@@ -323,6 +334,7 @@ class MethodAi(MethodBase):
actionId="ai.consolidate",
description="AI-assisted consolidation of aggregated workflow results (summarize, classify, semantic merge)",
dynamicMode=True,
+ outputType="ConsolidateResult",
parameters={
"mode": WorkflowActionParameter(
name="mode",
@@ -336,6 +348,7 @@ class MethodAi(MethodBase):
"prompt": WorkflowActionParameter(
name="prompt",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=False,
description="Optional extra instructions for the LLM",
diff --git a/modules/workflows/methods/methodBase.py b/modules/workflows/methods/methodBase.py
index 6a9f2956..02cae134 100644
--- a/modules/workflows/methods/methodBase.py
+++ b/modules/workflows/methods/methodBase.py
@@ -176,6 +176,7 @@ class MethodBase:
'default': param.default,
'frontendType': param.frontendType.value,
'frontendOptions': param.frontendOptions,
+ 'uiHint': param.uiHint,
'validation': param.validation
}
return result
@@ -230,8 +231,18 @@ class MethodBase:
return validated
def _validateType(self, value: Any, expectedType: str) -> Any:
- """Validate and convert value to expected type"""
- # Type validation logic
+ """Validate and convert value to expected type.
+
+ Catalog types (e.g. 'ConnectionRef', 'FeatureInstanceRef',
+ 'DocumentList', 'TrusteeProcessResult') pass through unchanged —
+ runtime structural validation is handled by the workflow engine /
+ port-schema layer, not at the action-call boundary.
+ """
+ from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
+
+ if expectedType in PORT_TYPE_CATALOG:
+ return value
+
typeMap = {
'str': str,
'int': int,
@@ -239,8 +250,12 @@ class MethodBase:
'bool': bool,
'list': list,
'dict': dict,
+ 'Any': lambda v: v,
}
-
+
+ if expectedType == 'Any':
+ return value
+
# Handle List[str], List[int], etc.
if expectedType.startswith('List['):
if isinstance(value, str):
diff --git a/modules/workflows/methods/methodChatbot/methodChatbot.py b/modules/workflows/methods/methodChatbot/methodChatbot.py
index 4583e636..cc44428e 100644
--- a/modules/workflows/methods/methodChatbot/methodChatbot.py
+++ b/modules/workflows/methods/methodChatbot/methodChatbot.py
@@ -25,17 +25,19 @@ class MethodChatbot(MethodBase):
actionId="chatbot.queryDatabase",
description="Execute a SQL SELECT query via the preprocessor connector. Returns formatted query results.",
dynamicMode=False,
+ outputType="QueryResult",
parameters={
"sqlQuery": WorkflowActionParameter(
name="sqlQuery",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=False,
description="SQL SELECT query to execute. If not provided, will attempt to extract from analysis_result document in documentList."
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Document reference(s) containing analysis_result with sqlQuery field. Used if sqlQuery parameter is not provided."
diff --git a/modules/workflows/methods/methodClickup/methodClickup.py b/modules/workflows/methods/methodClickup/methodClickup.py
index 05eba50d..17f42300 100644
--- a/modules/workflows/methods/methodClickup/methodClickup.py
+++ b/modules/workflows/methods/methodClickup/methodClickup.py
@@ -34,10 +34,11 @@ class MethodClickup(MethodBase):
actionId="clickup.listTasks",
description="List tasks in a ClickUp list (virtual path /team/{id}/list/{id})",
dynamicMode=True,
+ outputType="TaskList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="ClickUp connection",
@@ -72,10 +73,11 @@ class MethodClickup(MethodBase):
actionId="clickup.listFields",
description="List custom and built-in field definitions for a ClickUp list (names, types, ids)",
dynamicMode=True,
+ outputType="ActionResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="ClickUp connection",
@@ -101,10 +103,11 @@ class MethodClickup(MethodBase):
actionId="clickup.searchTasks",
description="Search tasks in a ClickUp workspace (team)",
dynamicMode=True,
+ outputType="TaskList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="ClickUp connection",
@@ -172,10 +175,11 @@ class MethodClickup(MethodBase):
actionId="clickup.getTask",
description="Get a single task by ID",
dynamicMode=True,
+ outputType="TaskResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="ClickUp connection",
@@ -201,10 +205,11 @@ class MethodClickup(MethodBase):
actionId="clickup.createTask",
description="Create a task in a list",
dynamicMode=True,
+ outputType="TaskResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="ClickUp connection",
@@ -300,10 +305,11 @@ class MethodClickup(MethodBase):
actionId="clickup.updateTask",
description="Update a task (JSON body per ClickUp API)",
dynamicMode=True,
+ outputType="TaskResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="ClickUp connection",
@@ -336,10 +342,11 @@ class MethodClickup(MethodBase):
actionId="clickup.uploadAttachment",
description="Upload a file attachment to a task",
dynamicMode=True,
+ outputType="TaskAttachmentRef",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="ClickUp connection",
diff --git a/modules/workflows/methods/methodContext/methodContext.py b/modules/workflows/methods/methodContext/methodContext.py
index 61afaf2e..d5f62772 100644
--- a/modules/workflows/methods/methodContext/methodContext.py
+++ b/modules/workflows/methods/methodContext/methodContext.py
@@ -36,6 +36,7 @@ class MethodContext(MethodBase):
actionId="context.getDocumentIndex",
description="Generate a comprehensive index of all documents available in the current workflow",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"resultType": WorkflowActionParameter(
name="resultType",
@@ -53,17 +54,18 @@ class MethodContext(MethodBase):
actionId="context.extractContent",
description="Extract raw content parts from documents without AI processing. Returns ContentParts with different typeGroups (text, image, table, structure, container). Images are returned as base64 data, not as extracted text. Text content is extracted from text-based formats (PDF text layers, Word docs, etc.) but NOT from images (no OCR). Use this action to prepare documents for subsequent AI processing actions.",
dynamicMode=True,
+ outputType="UdmDocument",
parameters={
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference(s) to extract content from"
),
"extractionOptions": WorkflowActionParameter(
name="extractionOptions",
- type="dict",
+ type="Dict[str,Any]",
frontendType=FrontendType.JSON,
required=False,
description="Extraction options (if not provided, defaults are used). Note: This action does NOT use AI - it performs pure content extraction. Images are preserved as base64 data, not converted to text."
@@ -74,10 +76,11 @@ class MethodContext(MethodBase):
"neutralizeData": WorkflowActionDefinition(
actionId="context.neutralizeData",
description="Neutralize extracted data from ContentExtracted documents (for use after extractContent)",
+ outputType="DocumentList",
parameters={
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference(s) containing ContentExtracted objects to neutralize"
@@ -88,6 +91,7 @@ class MethodContext(MethodBase):
"triggerPreprocessingServer": WorkflowActionDefinition(
actionId="context.triggerPreprocessingServer",
description="Trigger preprocessing server at customer tenant to update database with configuration",
+ outputType="ActionResult",
parameters={
"endpoint": WorkflowActionParameter(
name="endpoint",
diff --git a/modules/workflows/methods/methodFile/methodFile.py b/modules/workflows/methods/methodFile/methodFile.py
index 072ca598..8724ab11 100644
--- a/modules/workflows/methods/methodFile/methodFile.py
+++ b/modules/workflows/methods/methodFile/methodFile.py
@@ -24,10 +24,11 @@ class MethodFile(MethodBase):
actionId="file.create",
description="Create a file from context (text/markdown from AI). Configurable format and style preset.",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"contentSources": WorkflowActionParameter(
name="contentSources",
- type="list",
+ type="List[str]",
frontendType=FrontendType.HIDDEN,
required=False,
description="Array of context refs. Resolved and concatenated. Empty = from connected node.",
diff --git a/modules/workflows/methods/methodJira/methodJira.py b/modules/workflows/methods/methodJira/methodJira.py
index d7baacf0..0268d020 100644
--- a/modules/workflows/methods/methodJira/methodJira.py
+++ b/modules/workflows/methods/methodJira/methodJira.py
@@ -42,6 +42,7 @@ class MethodJira(MethodBase):
"connectJira": WorkflowActionDefinition(
actionId="jira.connectJira",
description="Connect to JIRA instance and create ticket interface",
+ outputType="ActionResult",
parameters={
"apiUsername": WorkflowActionParameter(
name="apiUsername",
@@ -81,6 +82,7 @@ class MethodJira(MethodBase):
"taskSyncDefinition": WorkflowActionParameter(
name="taskSyncDefinition",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=True,
description="Field mapping definition as JSON string or dict"
@@ -91,6 +93,7 @@ class MethodJira(MethodBase):
"exportTicketsAsJson": WorkflowActionDefinition(
actionId="jira.exportTicketsAsJson",
description="Export tickets from JIRA as JSON list",
+ outputType="DocumentList",
parameters={
"connectionId": WorkflowActionParameter(
name="connectionId",
@@ -112,6 +115,7 @@ class MethodJira(MethodBase):
"importTicketsFromJson": WorkflowActionDefinition(
actionId="jira.importTicketsFromJson",
description="Import ticket data from JSON back to JIRA",
+ outputType="ActionResult",
parameters={
"connectionId": WorkflowActionParameter(
name="connectionId",
@@ -122,7 +126,7 @@ class MethodJira(MethodBase):
),
"ticketData": WorkflowActionParameter(
name="ticketData",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference containing ticket data as JSON"
@@ -140,17 +144,18 @@ class MethodJira(MethodBase):
"mergeTicketData": WorkflowActionDefinition(
actionId="jira.mergeTicketData",
description="Merge JIRA export data with existing SharePoint data",
+ outputType="DocumentList",
parameters={
"jiraData": WorkflowActionParameter(
name="jiraData",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference containing JIRA ticket data as JSON array"
),
"existingData": WorkflowActionParameter(
name="existingData",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference containing existing SharePoint data as JSON array"
@@ -176,10 +181,11 @@ class MethodJira(MethodBase):
"parseCsvContent": WorkflowActionDefinition(
actionId="jira.parseCsvContent",
description="Parse CSV content with custom headers",
+ outputType="DocumentList",
parameters={
"csvContent": WorkflowActionParameter(
name="csvContent",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference containing CSV file content as bytes"
@@ -207,10 +213,11 @@ class MethodJira(MethodBase):
"parseExcelContent": WorkflowActionDefinition(
actionId="jira.parseExcelContent",
description="Parse Excel content with custom headers",
+ outputType="DocumentList",
parameters={
"excelContent": WorkflowActionParameter(
name="excelContent",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference containing Excel file content as bytes"
@@ -238,17 +245,18 @@ class MethodJira(MethodBase):
"createCsvContent": WorkflowActionDefinition(
actionId="jira.createCsvContent",
description="Create CSV content with custom headers",
+ outputType="DocumentList",
parameters={
"data": WorkflowActionParameter(
name="data",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference containing data as JSON (with data field from mergeTicketData)"
),
"headers": WorkflowActionParameter(
name="headers",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Document reference containing headers JSON (from parseCsvContent/parseExcelContent)"
@@ -273,17 +281,18 @@ class MethodJira(MethodBase):
"createExcelContent": WorkflowActionDefinition(
actionId="jira.createExcelContent",
description="Create Excel content with custom headers",
+ outputType="DocumentList",
parameters={
"data": WorkflowActionParameter(
name="data",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference containing data as JSON (with data field from mergeTicketData)"
),
"headers": WorkflowActionParameter(
name="headers",
- type="str",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Document reference containing headers JSON (from parseExcelContent)"
diff --git a/modules/workflows/methods/methodOutlook/methodOutlook.py b/modules/workflows/methods/methodOutlook/methodOutlook.py
index 633f396d..4370b237 100644
--- a/modules/workflows/methods/methodOutlook/methodOutlook.py
+++ b/modules/workflows/methods/methodOutlook/methodOutlook.py
@@ -40,10 +40,11 @@ class MethodOutlook(MethodBase):
actionId="outlook.readEmails",
description="Read emails and metadata from a mailbox folder",
dynamicMode=True,
+ outputType="EmailList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
@@ -89,10 +90,11 @@ class MethodOutlook(MethodBase):
actionId="outlook.searchEmails",
description="Search emails by query and return matching items with metadata",
dynamicMode=True,
+ outputType="EmailList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
@@ -138,10 +140,11 @@ class MethodOutlook(MethodBase):
actionId="outlook.composeAndDraftEmailWithContext",
description="Compose email content using AI from context and optional documents, then create a draft",
dynamicMode=True,
+ outputType="EmailDraft",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
@@ -156,20 +159,21 @@ class MethodOutlook(MethodBase):
"context": WorkflowActionParameter(
name="context",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=False,
description="Detailed context for AI composition (omit when emailContent provided)"
),
"emailContent": WorkflowActionParameter(
name="emailContent",
- type="dict",
+ type="Dict[str,Any]",
frontendType=FrontendType.HIDDEN,
required=False,
description="Direct subject/body/to from upstream (skips AI composition)"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[Any]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Document references or inline ActionDocuments for attachments"
@@ -213,17 +217,18 @@ class MethodOutlook(MethodBase):
actionId="outlook.sendDraftEmail",
description="Send draft email(s) using draft email JSON document(s) from action outlook.composeAndDraftEmailWithContext",
dynamicMode=True,
+ outputType="ActionResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference(s) to draft emails in JSON format (outputs from outlook.composeAndDraftEmailWithContext function)"
diff --git a/modules/workflows/methods/methodRedmine/methodRedmine.py b/modules/workflows/methods/methodRedmine/methodRedmine.py
index 0dd8f461..6c40c951 100644
--- a/modules/workflows/methods/methodRedmine/methodRedmine.py
+++ b/modules/workflows/methods/methodRedmine/methodRedmine.py
@@ -43,10 +43,11 @@ class MethodRedmine(MethodBase):
actionId="redmine.readTicket",
description="Read a single Redmine ticket from the local mirror by ticketId.",
dynamicMode=False,
+ outputType="RedmineTicket",
parameters={
"featureInstanceId": WorkflowActionParameter(
- name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
- required=True, description="Redmine feature instance ID",
+ name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
+ required=True, description="Redmine feature instance",
),
"ticketId": WorkflowActionParameter(
name="ticketId", type="int", frontendType=FrontendType.TEXT,
@@ -59,13 +60,14 @@ class MethodRedmine(MethodBase):
actionId="redmine.listTickets",
description="List tickets from the mirror with optional filters (tracker, status, period, assignee).",
dynamicMode=False,
+ outputType="RedmineTicketList",
parameters={
"featureInstanceId": WorkflowActionParameter(
- name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
- required=True, description="Redmine feature instance ID",
+ name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
+ required=True, description="Redmine feature instance",
),
"trackerIds": WorkflowActionParameter(
- name="trackerIds", type="list", frontendType=FrontendType.JSON,
+ name="trackerIds", type="List[int]", frontendType=FrontendType.JSON,
required=False, description="Restrict to these tracker ids (list of int or comma-separated string).",
),
"status": WorkflowActionParameter(
@@ -95,10 +97,11 @@ class MethodRedmine(MethodBase):
actionId="redmine.createTicket",
description="Create a new Redmine ticket. Requires subject and trackerId.",
dynamicMode=False,
+ outputType="RedmineTicket",
parameters={
"featureInstanceId": WorkflowActionParameter(
- name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
- required=True, description="Redmine feature instance ID",
+ name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
+ required=True, description="Redmine feature instance",
),
"subject": WorkflowActionParameter(
name="subject", type="str", frontendType=FrontendType.TEXT,
@@ -109,7 +112,7 @@ class MethodRedmine(MethodBase):
required=True, description="Tracker id (Userstory, Feature, Task ...).",
),
"description": WorkflowActionParameter(
- name="description", type="str", frontendType=FrontendType.TEXTAREA,
+ name="description", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA,
required=False, description="Markdown/Textile description body.",
),
"statusId": WorkflowActionParameter(
@@ -133,7 +136,7 @@ class MethodRedmine(MethodBase):
required=False, description="Target/fixed version id.",
),
"customFields": WorkflowActionParameter(
- name="customFields", type="dict", frontendType=FrontendType.JSON,
+ name="customFields", type="Dict[str,Any]", frontendType=FrontendType.JSON,
required=False, description="Custom fields as {customFieldId: value}.",
),
},
@@ -143,10 +146,11 @@ class MethodRedmine(MethodBase):
actionId="redmine.updateTicket",
description="Update a Redmine ticket. Only provided fields are sent.",
dynamicMode=False,
+ outputType="RedmineTicket",
parameters={
"featureInstanceId": WorkflowActionParameter(
- name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
- required=True, description="Redmine feature instance ID",
+ name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
+ required=True, description="Redmine feature instance",
),
"ticketId": WorkflowActionParameter(
name="ticketId", type="int", frontendType=FrontendType.TEXT,
@@ -157,7 +161,7 @@ class MethodRedmine(MethodBase):
required=False, description="New title.",
),
"description": WorkflowActionParameter(
- name="description", type="str", frontendType=FrontendType.TEXTAREA,
+ name="description", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA,
required=False, description="New description.",
),
"trackerId": WorkflowActionParameter(
@@ -185,11 +189,11 @@ class MethodRedmine(MethodBase):
required=False, description="Change fixed version.",
),
"notes": WorkflowActionParameter(
- name="notes", type="str", frontendType=FrontendType.TEXTAREA,
+ name="notes", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA,
required=False, description="Journal entry (comment) added to the ticket.",
),
"customFields": WorkflowActionParameter(
- name="customFields", type="dict", frontendType=FrontendType.JSON,
+ name="customFields", type="Dict[str,Any]", frontendType=FrontendType.JSON,
required=False, description="Custom fields as {customFieldId: value}.",
),
},
@@ -199,10 +203,11 @@ class MethodRedmine(MethodBase):
actionId="redmine.getStats",
description="Aggregated stats (KPIs, throughput, status distribution, backlog) from the mirror.",
dynamicMode=False,
+ outputType="RedmineStats",
parameters={
"featureInstanceId": WorkflowActionParameter(
- name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
- required=True, description="Redmine feature instance ID",
+ name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
+ required=True, description="Redmine feature instance",
),
"dateFrom": WorkflowActionParameter(
name="dateFrom", type="str", frontendType=FrontendType.TEXT,
@@ -217,7 +222,7 @@ class MethodRedmine(MethodBase):
required=False, description="'day' | 'week' | 'month' (default 'week').",
),
"trackerIds": WorkflowActionParameter(
- name="trackerIds", type="list", frontendType=FrontendType.JSON,
+ name="trackerIds", type="List[int]", frontendType=FrontendType.JSON,
required=False, description="Restrict to these tracker ids.",
),
},
@@ -227,10 +232,11 @@ class MethodRedmine(MethodBase):
actionId="redmine.runSync",
description="Sync Redmine tickets and relations into the local mirror (incremental by default).",
dynamicMode=False,
+ outputType="ActionResult",
parameters={
"featureInstanceId": WorkflowActionParameter(
- name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
- required=True, description="Redmine feature instance ID",
+ name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
+ required=True, description="Redmine feature instance",
),
"force": WorkflowActionParameter(
name="force", type="bool", frontendType=FrontendType.CHECKBOX,
diff --git a/modules/workflows/methods/methodSharepoint/methodSharepoint.py b/modules/workflows/methods/methodSharepoint/methodSharepoint.py
index 0fa0aca8..78e462d7 100644
--- a/modules/workflows/methods/methodSharepoint/methodSharepoint.py
+++ b/modules/workflows/methods/methodSharepoint/methodSharepoint.py
@@ -51,10 +51,11 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.findDocumentPath",
description="Find documents and folders by name/path across sites",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
@@ -89,17 +90,18 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.readDocuments",
description="Read documents from SharePoint and extract content/metadata",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Document list reference(s) containing findDocumentPath result"
@@ -126,17 +128,18 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.uploadDocument",
description="Upload documents to SharePoint",
dynamicMode=True,
+ outputType="ActionResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document reference(s) to upload. File names are taken from the documents"
@@ -155,17 +158,18 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.listDocuments",
description="List documents and folders in SharePoint paths across sites",
dynamicMode=True,
+ outputType="FileList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=False,
description="Document list reference(s) containing findDocumentPath result"
@@ -192,17 +196,18 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.analyzeFolderUsage",
description="Analyze usage intensity of folders and files in SharePoint",
dynamicMode=True,
+ outputType="ActionResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
),
"documentList": WorkflowActionParameter(
name="documentList",
- type="List[str]",
+ type="DocumentList",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
description="Document list reference(s) containing findDocumentPath result"
@@ -237,10 +242,11 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.findSiteByUrl",
description="Find SharePoint site by hostname and site path",
dynamicMode=True,
+ outputType="ActionResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
@@ -266,10 +272,11 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.downloadFileByPath",
description="Download file from SharePoint by exact file path",
dynamicMode=True,
+ outputType="DocumentList",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
@@ -302,10 +309,11 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.copyFile",
description="Copy file within SharePoint",
dynamicMode=True,
+ outputType="ActionResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
@@ -331,10 +339,11 @@ class MethodSharepoint(MethodBase):
actionId="sharepoint.uploadFile",
description="Upload raw file content (bytes) to SharePoint",
dynamicMode=True,
+ outputType="ActionResult",
parameters={
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=True,
description="Microsoft connection label"
diff --git a/modules/workflows/methods/methodTrustee/actions/processDocuments.py b/modules/workflows/methods/methodTrustee/actions/processDocuments.py
index 0d29c9bd..11e9aba1 100644
--- a/modules/workflows/methods/methodTrustee/actions/processDocuments.py
+++ b/modules/workflows/methods/methodTrustee/actions/processDocuments.py
@@ -2,10 +2,15 @@
# All rights reserved.
"""
Process extracted documents: create TrusteeDocument + TrusteePosition from extraction JSON.
-Input: documentList (reference to extractFromFiles result).
-Each document is JSON with documentType, extractedData, fileId, fileName.
-extractedData is a list of expense/position records.
-Output: one ActionDocument with JSON { positionIds, documentIds } for chaining to syncToAccounting.
+
+Input: documentList (DataRef on upstream DocumentList.documents — typically
+trustee.extractFromFiles[documents]). Each item is an ActionDocument-dump dict
+with `documentData` (JSON string) carrying { documentType, extractedData, fileId,
+fileName }. extractedData is a list of expense/position records.
+
+Output: ActionResult with one ActionDocument containing JSON
+{ positionIds, documentIds, autoMatchedPositionIds } for chaining to
+syncToAccounting (via DataRef on documents[0]).
"""
import json
diff --git a/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py b/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py
index 555a8623..b9c99f2c 100644
--- a/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py
+++ b/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py
@@ -2,8 +2,10 @@
# All rights reserved.
"""
Sync trustee positions to accounting (Buha).
-Input: featureInstanceId, documentList (reference to processDocuments result message).
-Reads positionIds from the document and calls AccountingBridge.pushBatchToAccounting.
+
+Input: featureInstanceId, documentList (DataRef on processDocuments[documents] —
+list with one ActionDocument carrying JSON { positionIds, documentIds, ... }).
+Reads positionIds from the first document and calls AccountingBridge.pushBatchToAccounting.
"""
import json
diff --git a/modules/workflows/methods/methodTrustee/methodTrustee.py b/modules/workflows/methods/methodTrustee/methodTrustee.py
index ceb5849f..73e7d573 100644
--- a/modules/workflows/methods/methodTrustee/methodTrustee.py
+++ b/modules/workflows/methods/methodTrustee/methodTrustee.py
@@ -31,17 +31,21 @@ class MethodTrustee(MethodBase):
actionId="trustee.extractFromFiles",
description="Extract document type and data from PDF/JPG (fileIds or SharePoint folder)",
dynamicMode=False,
+ # Runtime returns ActionResult.isSuccess(documents=[...]); see
+ # actions/extractFromFiles.py. Keep this in sync with the
+ # graphical-editor adapter (nodeDefinitions/trustee.py).
+ outputType="ActionResult",
parameters={
"fileIds": WorkflowActionParameter(
name="fileIds",
- type="list",
+ type="List[str]",
frontendType=FrontendType.JSON,
required=False,
description="List of file IDs already in DB (alternative to connectionReference + sharepointFolder)",
),
"connectionReference": WorkflowActionParameter(
name="connectionReference",
- type="str",
+ type="ConnectionRef",
frontendType=FrontendType.USER_CONNECTION,
required=False,
description="Microsoft connection for SharePoint (use with sharepointFolder)",
@@ -55,14 +59,15 @@ class MethodTrustee(MethodBase):
),
"featureInstanceId": WorkflowActionParameter(
name="featureInstanceId",
- type="str",
+ type="FeatureInstanceRef",
frontendType=FrontendType.TEXT,
required=True,
- description="Trustee feature instance ID",
+ description="Trustee feature instance",
),
"prompt": WorkflowActionParameter(
name="prompt",
type="str",
+ uiHint="textarea",
frontendType=FrontendType.TEXTAREA,
required=False,
description="AI prompt for extraction (optional)",
@@ -74,20 +79,24 @@ class MethodTrustee(MethodBase):
actionId="trustee.processDocuments",
description="Create TrusteeDocument + TrusteePosition from extraction result (documentList from previous action)",
dynamicMode=False,
+ # Runtime returns ActionResult.isSuccess(documents=[...]).
+ outputType="ActionResult",
parameters={
"documentList": WorkflowActionParameter(
name="documentList",
- type="list",
+ # Concrete shape consumed by _resolveDocumentList (list
+ # of dicts with documentName/documentData/mimeType).
+ type="List[ActionDocument]",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
- description="Reference to extractFromFiles result (e.g. docList:messageId:extract_result)",
+ description="DataRef to upstream documents (e.g. trustee.extractFromFiles → documents)",
),
"featureInstanceId": WorkflowActionParameter(
name="featureInstanceId",
- type="str",
+ type="FeatureInstanceRef",
frontendType=FrontendType.TEXT,
required=True,
- description="Trustee feature instance ID",
+ description="Trustee feature instance",
),
},
execute=processDocuments.__get__(self, self.__class__),
@@ -96,20 +105,24 @@ class MethodTrustee(MethodBase):
actionId="trustee.syncToAccounting",
description="Push trustee positions to accounting (documentList = processDocuments result)",
dynamicMode=False,
+ # Runtime returns ActionResult.isSuccess(documents=[...]).
+ outputType="ActionResult",
parameters={
"documentList": WorkflowActionParameter(
name="documentList",
- type="list",
+ # Concrete shape consumed by syncToAccounting._resolveDocumentList:
+ # list of ActionDocument dicts produced by processDocuments.
+ type="List[ActionDocument]",
frontendType=FrontendType.DOCUMENT_REFERENCE,
required=True,
- description="Reference to processDocuments result message",
+ description="DataRef to upstream documents (e.g. trustee.processDocuments → documents)",
),
"featureInstanceId": WorkflowActionParameter(
name="featureInstanceId",
- type="str",
+ type="FeatureInstanceRef",
frontendType=FrontendType.TEXT,
required=True,
- description="Trustee feature instance ID",
+ description="Trustee feature instance",
),
},
execute=syncToAccounting.__get__(self, self.__class__),
@@ -118,13 +131,14 @@ class MethodTrustee(MethodBase):
actionId="trustee.refreshAccountingData",
description="Import/refresh accounting data from external system (e.g. Abacus) into local tables. Checks cache freshness; use forceRefresh to re-import.",
dynamicMode=True,
+ outputType="TrusteeRefreshResult",
parameters={
"featureInstanceId": WorkflowActionParameter(
name="featureInstanceId",
- type="str",
+ type="FeatureInstanceRef",
frontendType=FrontendType.TEXT,
required=True,
- description="Trustee feature instance ID",
+ description="Trustee feature instance",
),
"forceRefresh": WorkflowActionParameter(
name="forceRefresh",
@@ -154,13 +168,14 @@ class MethodTrustee(MethodBase):
actionId="trustee.queryData",
description="Read data from the Trustee DB (lookup tenant+rent, raw recordset, or aggregate). Does NOT trigger an external sync.",
dynamicMode=False,
+ outputType="QueryResult",
parameters={
"featureInstanceId": WorkflowActionParameter(
name="featureInstanceId",
- type="str",
+ type="FeatureInstanceRef",
frontendType=FrontendType.TEXT,
required=True,
- description="Trustee feature instance ID",
+ description="Trustee feature instance",
),
"mode": WorkflowActionParameter(
name="mode",
diff --git a/scripts/_listMandates.py b/scripts/_listMandates.py
new file mode 100644
index 00000000..cf3e9bd2
--- /dev/null
+++ b/scripts/_listMandates.py
@@ -0,0 +1,25 @@
+import sys
+from pathlib import Path
+sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
+import psycopg2, psycopg2.extras
+from modules.shared.configuration import APP_CONFIG
+
+c = psycopg2.connect(
+ host=APP_CONFIG.get('DB_HOST','localhost'),
+ user=APP_CONFIG.get('DB_USER'),
+ password=APP_CONFIG.get('DB_PASSWORD_SECRET'),
+ port=int(APP_CONFIG.get('DB_PORT',5432)),
+ dbname='poweron_app',
+)
+cur = c.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
+cur.execute('SELECT id, name, label, enabled, "deletedAt", "sysCreatedAt" FROM "Mandate" ORDER BY "sysCreatedAt"')
+print("All Mandates in poweron_app:")
+for r in cur.fetchall():
+ print(f" id={r['id']} name={r['name']} label={r['label']} enabled={r['enabled']} deletedAt={r['deletedAt']}")
+
+cur.execute('SELECT COUNT(*) AS n FROM "FeatureInstance" WHERE "featureCode" = %s', ("redmine",))
+print(f"\nTotal redmine FeatureInstances in poweron_app: {cur.fetchone()['n']}")
+
+cur.execute('SELECT id, "mandateId", label, enabled FROM "FeatureInstance" WHERE "featureCode" = %s ORDER BY "sysCreatedAt"', ("redmine",))
+for r in cur.fetchall():
+ print(f" fi={r['id']} mandate={r['mandateId']} label={r['label']} enabled={r['enabled']}")
diff --git a/scripts/check_orphan_featureinstance.py b/scripts/check_orphan_featureinstance.py
new file mode 100644
index 00000000..c09de61b
--- /dev/null
+++ b/scripts/check_orphan_featureinstance.py
@@ -0,0 +1,97 @@
+"""Quick-Check: existiert FeatureInstance-Row 6019e7d0-b23d-41ec-b9f7-3dd1293078f2
+in poweron_app, und welche Mandate/Instances stehen mit dem RedmineTicketMirror in Verbindung?
+
+Aufruf: python gateway/scripts/check_orphan_featureinstance.py
+"""
+from __future__ import annotations
+
+import sys
+from pathlib import Path
+
+_GATEWAY = Path(__file__).resolve().parents[1]
+if str(_GATEWAY) not in sys.path:
+ sys.path.insert(0, str(_GATEWAY))
+
+import psycopg2
+import psycopg2.extras
+
+from modules.shared.configuration import APP_CONFIG
+
+
+_TARGET_FI = "6019e7d0-b23d-41ec-b9f7-3dd1293078f2"
+_TARGET_MANDATE = "674b1bc0-1d01-4696-a094-3374c450f6e2"
+
+
+def _connect(dbName: str):
+ return psycopg2.connect(
+ host=APP_CONFIG.get("DB_HOST", "localhost"),
+ user=APP_CONFIG.get("DB_USER"),
+ password=APP_CONFIG.get("DB_PASSWORD_SECRET"),
+ port=int(APP_CONFIG.get("DB_PORT", 5432)),
+ dbname=dbName,
+ )
+
+
+def main() -> int:
+ print(f"Checking FeatureInstance {_TARGET_FI} ...\n")
+
+ with _connect("poweron_app") as appConn:
+ with appConn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:
+ cur.execute(
+ 'SELECT id, "mandateId", "featureCode", label, enabled, '
+ '"sysCreatedAt", "sysModifiedAt" '
+ 'FROM "FeatureInstance" WHERE id = %s',
+ (_TARGET_FI,),
+ )
+ fi = cur.fetchone()
+ print(f"FeatureInstance row in poweron_app: {fi}\n")
+
+ cur.execute(
+ 'SELECT id, "mandateId", "featureCode", label, enabled '
+ 'FROM "FeatureInstance" '
+ 'WHERE "mandateId" = %s AND "featureCode" = %s',
+ (_TARGET_MANDATE, "redmine"),
+ )
+ sameMandateRedmine = cur.fetchall()
+ print(
+ f"All redmine FeatureInstances on mandate {_TARGET_MANDATE} "
+ f"({len(sameMandateRedmine)}):"
+ )
+ for r in sameMandateRedmine:
+ print(f" {r}")
+ print()
+
+ cur.execute(
+ 'SELECT id, name, label, enabled, "deletedAt", '
+ '"sysCreatedAt", "sysModifiedAt" '
+ 'FROM "Mandate" WHERE id = %s',
+ (_TARGET_MANDATE,),
+ )
+ mandate = cur.fetchone()
+ print(f"Mandate row: {mandate}\n")
+
+ with _connect("poweron_redmine") as rmConn:
+ with rmConn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:
+ cur.execute(
+ 'SELECT COUNT(*) AS n '
+ 'FROM "RedmineTicketMirror" WHERE "featureInstanceId" = %s',
+ (_TARGET_FI,),
+ )
+ n = cur.fetchone()["n"]
+ print(f"RedmineTicketMirror rows with featureInstanceId={_TARGET_FI}: {n}")
+
+ cur.execute(
+ 'SELECT DISTINCT "featureInstanceId", "mandateId", COUNT(*) AS n '
+ 'FROM "RedmineTicketMirror" '
+ 'GROUP BY "featureInstanceId", "mandateId" ORDER BY n DESC LIMIT 20'
+ )
+ distribution = cur.fetchall()
+ print(f"\nRedmineTicketMirror distribution (top 20):")
+ for r in distribution:
+ print(f" fi={r['featureInstanceId']} mandate={r['mandateId']} count={r['n']}")
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/script_migrate_feature_instance_refs.py b/scripts/script_migrate_feature_instance_refs.py
new file mode 100644
index 00000000..40f723c1
--- /dev/null
+++ b/scripts/script_migrate_feature_instance_refs.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python3
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Persistent DB migration: rewrite raw ``featureInstanceId`` UUIDs in stored
+workflow graphs to typed ``FeatureInstanceRef`` envelopes.
+
+Why
+---
+The runtime engine (``executeGraph``) already calls
+``materializeFeatureInstanceRefs`` on every run, so legacy graphs *execute*
+correctly today. The Editor however reads the persisted ``graph`` field
+directly and shows whatever shape is on disk — until a workflow is saved
+again it still displays the old plain-string format.
+
+What this script does
+---------------------
+Walks every row of:
+
+* ``poweron_graphicaleditor.Automation2Workflow`` (legacy ``graph`` column)
+* ``poweron_graphicaleditor.AutoVersion`` (canonical ``graph`` column)
+
+For each row, it:
+
+1. Loads the JSONB ``graph`` column.
+2. Applies :func:`materializeFeatureInstanceRefs`.
+3. Persists the result if (and only if) it differs from the input.
+
+Idempotent — re-runs are no-ops.
+
+Usage
+-----
+::
+
+ python scripts/script_migrate_feature_instance_refs.py --dry-run
+ python scripts/script_migrate_feature_instance_refs.py
+
+Plan: ``wiki/c-work/1-plan/2026-04-typed-action-followups.md`` (Track C1).
+"""
+from __future__ import annotations
+
+import argparse
+import json
+import logging
+import os
+import sys
+from pathlib import Path
+from typing import Any, Dict, Iterable, List, Tuple
+
+_scriptPath = Path(__file__).resolve()
+_gatewayPath = _scriptPath.parent.parent
+sys.path.insert(0, str(_gatewayPath))
+os.chdir(str(_gatewayPath))
+
+import psycopg2 # noqa: E402
+from psycopg2.extras import Json, RealDictCursor # noqa: E402
+
+from modules.shared.configuration import APP_CONFIG # noqa: E402
+from modules.workflows.automation2.featureInstanceRefMigration import ( # noqa: E402
+ materializeFeatureInstanceRefs,
+)
+
+logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
+logger = logging.getLogger("script_migrate_feature_instance_refs")
+
+
+_DB_NAME = "poweron_graphicaleditor"
+_TABLES_AND_PK: List[Tuple[str, str]] = [
+ ('"Automation2Workflow"', "id"),
+ ('"AutoVersion"', "id"),
+]
+
+
+def _connect() -> "psycopg2.extensions.connection":
+ cfg = {
+ "host": APP_CONFIG.get("DB_HOST", "localhost"),
+ "port": int(APP_CONFIG.get("DB_PORT", "5432")),
+ "user": APP_CONFIG.get("DB_USER"),
+ "password": (
+ APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD")
+ ),
+ "database": _DB_NAME,
+ }
+ if not cfg["user"] or not cfg["password"]:
+ raise SystemExit("DB_USER and DB_PASSWORD/DB_PASSWORD_SECRET must be set")
+ return psycopg2.connect(**cfg)
+
+
+def _loadGraph(value: Any) -> Dict[str, Any]:
+ """psycopg2 returns JSONB as a Python dict, but legacy data may be a JSON string."""
+ if isinstance(value, dict):
+ return value
+ if isinstance(value, (bytes, bytearray)):
+ value = value.decode("utf-8", errors="replace")
+ if isinstance(value, str) and value.strip():
+ try:
+ return json.loads(value)
+ except json.JSONDecodeError:
+ return {}
+ return {}
+
+
+def _countMigrations(before: Dict[str, Any], after: Dict[str, Any]) -> int:
+ """Count how many ``featureInstanceId`` values were rewritten."""
+ if before == after:
+ return 0
+ bnodes = before.get("nodes") if isinstance(before, dict) else None
+ anodes = after.get("nodes") if isinstance(after, dict) else None
+ if not isinstance(bnodes, list) or not isinstance(anodes, list):
+ return 0
+ count = 0
+ for bn, an in zip(bnodes, anodes):
+ bp = (bn.get("parameters") or {}) if isinstance(bn, dict) else {}
+ ap = (an.get("parameters") or {}) if isinstance(an, dict) else {}
+ if bp.get("featureInstanceId") != ap.get("featureInstanceId"):
+ count += 1
+ return count
+
+
+def _migrateOneTable(
+ conn,
+ table: str,
+ pk: str,
+ *,
+ dryRun: bool,
+) -> Dict[str, int]:
+ """Process one table; returns counts dict."""
+ counts = {"scanned": 0, "rowsChanged": 0, "fieldsRewritten": 0}
+ with conn.cursor(cursor_factory=RealDictCursor) as cur:
+ cur.execute(f'SELECT {pk} AS pk, "graph" AS graph FROM {table}')
+ rows: Iterable[Dict[str, Any]] = cur.fetchall()
+ for row in rows:
+ counts["scanned"] += 1
+ before = _loadGraph(row.get("graph"))
+ if not before:
+ continue
+ after = materializeFeatureInstanceRefs(before)
+ if before == after:
+ continue
+ rewritten = _countMigrations(before, after)
+ if rewritten == 0:
+ continue
+ counts["rowsChanged"] += 1
+ counts["fieldsRewritten"] += rewritten
+ logger.info(
+ "%s id=%s: %d featureInstanceId value(s) %s",
+ table,
+ row["pk"],
+ rewritten,
+ "would be migrated [dry-run]" if dryRun else "migrated",
+ )
+ if not dryRun:
+ with conn.cursor() as updCur:
+ updCur.execute(
+ f'UPDATE {table} SET "graph" = %s WHERE {pk} = %s',
+ (Json(after), row["pk"]),
+ )
+ if not dryRun:
+ conn.commit()
+ return counts
+
+
+def migrate(dryRun: bool = False) -> Dict[str, Dict[str, int]]:
+ """Walk all tracked tables and migrate. Returns per-table counts."""
+ summary: Dict[str, Dict[str, int]] = {}
+ conn = _connect()
+ try:
+ for table, pk in _TABLES_AND_PK:
+ summary[table] = _migrateOneTable(conn, table, pk, dryRun=dryRun)
+ finally:
+ conn.close()
+ return summary
+
+
+def main() -> int:
+ parser = argparse.ArgumentParser(
+ description="Persist materializeFeatureInstanceRefs into stored workflow graphs."
+ )
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help="Report what would be migrated without writing back.",
+ )
+ args = parser.parse_args()
+
+ logger.info(
+ "Starting featureInstanceRef DB migration (dry-run=%s, db=%s)",
+ args.dry_run,
+ _DB_NAME,
+ )
+ summary = migrate(dryRun=args.dry_run)
+ totalRows = sum(s["rowsChanged"] for s in summary.values())
+ totalFields = sum(s["fieldsRewritten"] for s in summary.values())
+ for table, counts in summary.items():
+ logger.info(
+ "%s: scanned=%d rowsChanged=%d fieldsRewritten=%d",
+ table,
+ counts["scanned"],
+ counts["rowsChanged"],
+ counts["fieldsRewritten"],
+ )
+ logger.info(
+ "%s: %d row(s) %s, %d featureInstanceId value(s) total.",
+ "Dry-run summary" if args.dry_run else "Migration summary",
+ totalRows,
+ "would be updated" if args.dry_run else "updated",
+ totalFields,
+ )
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tests/integration/automation2/__init__.py b/tests/integration/automation2/__init__.py
new file mode 100644
index 00000000..d30846a4
--- /dev/null
+++ b/tests/integration/automation2/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) 2025 Patrick Motsch
+"""Integration tests for automation2 typed bindings (Phase-5 Schicht-4)."""
diff --git a/tests/integration/automation2/test_pick_not_push_migration_v2.py b/tests/integration/automation2/test_pick_not_push_migration_v2.py
new file mode 100644
index 00000000..9b98e0ec
--- /dev/null
+++ b/tests/integration/automation2/test_pick_not_push_migration_v2.py
@@ -0,0 +1,189 @@
+# Copyright (c) 2025 Patrick Motsch
+"""
+Phase-5 Schicht-4 integration test (T11): the typed-bindings pipeline must
+produce identical action-call parameters whether a workflow stores
+``featureInstanceId`` as a legacy raw UUID or as a typed
+``FeatureInstanceRef`` envelope.
+
+The pipeline under test::
+
+ saved graph
+ -> materializeFeatureInstanceRefs (Phase-5, this test)
+ -> materializeConnectionRefs (existing pick-not-push helper)
+ -> resolveParameterReferences (typed bindings + envelope unwrap)
+ -> action params (what the action implementation would receive)
+
+This is the integration counterpart to the focused unit tests in
+``tests/unit/workflows/test_featureInstanceRefMigration.py``.
+
+Plan: ``wiki/c-work/1-plan/2026-04-typed-action-architecture.md``.
+"""
+from __future__ import annotations
+
+import copy
+from typing import Any, Dict
+
+import pytest
+
+from modules.workflows.automation2.featureInstanceRefMigration import (
+ materializeFeatureInstanceRefs,
+)
+from modules.workflows.automation2.graphUtils import resolveParameterReferences
+from modules.workflows.automation2.pickNotPushMigration import materializeConnectionRefs
+
+
+_TRUSTEE_INSTANCE_UUID = "f1e2d3c4-b5a6-7890-1234-567890abcdef"
+
+
+def _resolveActionParams(graph: Dict[str, Any], nodeId: str) -> Dict[str, Any]:
+ """Apply the full Schicht-4 pipeline and return the resolved action params
+ that ``ActionNodeExecutor`` would forward to ``ActionExecutor.executeAction``."""
+ g = materializeFeatureInstanceRefs(graph)
+ g = materializeConnectionRefs(g)
+ targetNode = next(n for n in g["nodes"] if n["id"] == nodeId)
+ rawParams = dict(targetNode.get("parameters") or {})
+ return resolveParameterReferences(rawParams, nodeOutputs={})
+
+
+def _legacyTrusteeGraph() -> Dict[str, Any]:
+ """Trustee Spesenbelege-shape graph with raw UUIDs (pre-migration)."""
+ return {
+ "nodes": [
+ {"id": "n1", "type": "trigger.manual", "parameters": {}},
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {
+ "featureInstanceId": _TRUSTEE_INSTANCE_UUID,
+ "prompt": "extract expenses",
+ },
+ },
+ {
+ "id": "n6",
+ "type": "trustee.processDocuments",
+ "parameters": {
+ "featureInstanceId": _TRUSTEE_INSTANCE_UUID,
+ "documentList": {
+ "type": "ref",
+ "nodeId": "n5",
+ "path": ["documents"],
+ },
+ },
+ },
+ {
+ "id": "n7",
+ "type": "trustee.syncToAccounting",
+ "parameters": {
+ "featureInstanceId": _TRUSTEE_INSTANCE_UUID,
+ "documentList": {
+ "type": "ref",
+ "nodeId": "n6",
+ "path": ["documents"],
+ },
+ },
+ },
+ ],
+ "connections": [
+ {"source": "n1", "target": "n5"},
+ {"source": "n5", "target": "n6"},
+ {"source": "n6", "target": "n7"},
+ ],
+ }
+
+
+def _migratedTrusteeGraph() -> Dict[str, Any]:
+ """The same graph but already in the migrated (typed envelope) shape."""
+ g = _legacyTrusteeGraph()
+ envelope = {
+ "$type": "FeatureInstanceRef",
+ "id": _TRUSTEE_INSTANCE_UUID,
+ "featureCode": "trustee",
+ }
+ for node in g["nodes"]:
+ if node.get("type", "").startswith("trustee."):
+ node["parameters"]["featureInstanceId"] = copy.deepcopy(envelope)
+ return g
+
+
+# ---------------------------------------------------------------------------
+# Round-trip: legacy + migrated graphs produce identical action params
+# ---------------------------------------------------------------------------
+
+
+class TestTrusteeBindingsPipeline:
+ @pytest.mark.parametrize("nodeId", ["n5", "n6", "n7"])
+ def test_legacyAndMigratedGraphsResolveToSameFeatureInstanceId(self, nodeId):
+ legacyParams = _resolveActionParams(_legacyTrusteeGraph(), nodeId)
+ migratedParams = _resolveActionParams(_migratedTrusteeGraph(), nodeId)
+ assert legacyParams["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
+ assert migratedParams["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
+ assert legacyParams == migratedParams
+
+ def test_legacyGraphIsConvertedToTypedEnvelopeInPlaceOfRawUuid(self):
+ legacy = _legacyTrusteeGraph()
+ migrated = materializeFeatureInstanceRefs(legacy)
+ for node in migrated["nodes"]:
+ if not node.get("type", "").startswith("trustee."):
+ continue
+ param = node["parameters"]["featureInstanceId"]
+ assert isinstance(param, dict), f"node {node['id']} not migrated"
+ assert param["$type"] == "FeatureInstanceRef"
+ assert param["id"] == _TRUSTEE_INSTANCE_UUID
+ assert param["featureCode"] == "trustee"
+
+ def test_migrationIsIdempotentAcrossPipeline(self):
+ once = materializeFeatureInstanceRefs(_legacyTrusteeGraph())
+ twice = materializeFeatureInstanceRefs(once)
+ assert once == twice
+
+ def test_otherParamsArePreservedAcrossMigration(self):
+ legacy = _legacyTrusteeGraph()
+ migrated = materializeFeatureInstanceRefs(legacy)
+ n5 = next(n for n in migrated["nodes"] if n["id"] == "n5")
+ assert n5["parameters"]["prompt"] == "extract expenses"
+ n6 = next(n for n in migrated["nodes"] if n["id"] == "n6")
+ # documentList DataRef must survive untouched (only the
+ # featureInstanceId key is rewritten).
+ assert n6["parameters"]["documentList"] == {
+ "type": "ref",
+ "nodeId": "n5",
+ "path": ["documents"],
+ }
+
+
+# ---------------------------------------------------------------------------
+# Cross-feature: same migration handles redmine / clickup / sharepoint
+# ---------------------------------------------------------------------------
+
+
+class TestCrossFeatureMigration:
+ @pytest.mark.parametrize(
+ "nodeType,expectedCode",
+ [
+ ("redmine.createIssue", "redmine"),
+ ("clickup.createTask", "clickup"),
+ ("sharepoint.listFiles", "sharepoint"),
+ ],
+ )
+ def test_nonTrusteeNodesAreMigratedWithCorrectFeatureCode(
+ self, nodeType, expectedCode
+ ):
+ graph = {
+ "nodes": [
+ {
+ "id": "n",
+ "type": nodeType,
+ "parameters": {"featureInstanceId": "uuid-x"},
+ }
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ env = out["nodes"][0]["parameters"]["featureInstanceId"]
+ assert env == {
+ "$type": "FeatureInstanceRef",
+ "id": "uuid-x",
+ "featureCode": expectedCode,
+ }
+ # And the resolver still hands back the raw UUID for legacy actions.
+ resolved = resolveParameterReferences(env, nodeOutputs={})
+ assert resolved == "uuid-x"
diff --git a/tests/integration/trustee/__init__.py b/tests/integration/trustee/__init__.py
new file mode 100644
index 00000000..d02d6efc
--- /dev/null
+++ b/tests/integration/trustee/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+#
+# Trustee feature integration tests.
diff --git a/tests/integration/trustee/test_spesenbelege_workflow_e2e.py b/tests/integration/trustee/test_spesenbelege_workflow_e2e.py
new file mode 100644
index 00000000..a1143063
--- /dev/null
+++ b/tests/integration/trustee/test_spesenbelege_workflow_e2e.py
@@ -0,0 +1,474 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Plan #2 Track A2 (T4): Trustee Spesenbelege Live-E2E Integration-Test.
+
+Runs the canonical Trustee Spesenbelege chain end-to-end through
+``executeGraph``::
+
+ trigger.manual
+ -> trustee.processDocuments (real action)
+ -> trustee.syncToAccounting (real action)
+
+with:
+
+* an in-memory **TrusteeInterface** fake (records createDocument /
+ createPosition / updatePosition calls and assigns deterministic IDs),
+* an in-memory **AccountingBridge** fake (records pushBatchToAccounting
+ calls and returns one success result per positionId),
+* a literal upstream ``documentList`` (no AI / SharePoint involved — the
+ extraction step is replaced by a canned ActionDocument list so this
+ test focuses on the bindings + action layer, exactly as the Track A2
+ plan requires: "Mock SharePoint + AI + Trustee-DB, echtes
+ processDocuments + syncToAccounting").
+
+The test exercises the **Schicht-4 typed bindings pipeline** end-to-end:
+
+* ``featureInstanceId`` is provided as a typed ``FeatureInstanceRef``
+ envelope on the producer node and as a raw legacy UUID on the consumer
+ node — both must reach the action layer as the bare UUID string after
+ ``materializeFeatureInstanceRefs`` + ``resolveParameterReferences``.
+* ``documentList`` on ``trustee.syncToAccounting`` is a ``DataRef`` on
+ ``processDocuments[documents]`` (Pick-not-Push) — must resolve to the
+ ActionDocument list produced by ``processDocuments``.
+
+Plan: ``wiki/c-work/1-plan/2026-04-typed-action-followups.md`` (A2 / T4).
+"""
+from __future__ import annotations
+
+import copy
+import json
+import uuid
+from typing import Any, Dict, List, Optional
+
+import pytest
+
+from modules.workflows.automation2.executionEngine import executeGraph
+from modules.workflows.automation2.runEnvelope import default_run_envelope
+
+
+_TRUSTEE_INSTANCE_UUID = "11111111-2222-3333-4444-555555555555"
+_MANDATE_ID = "mandate-zh-001"
+
+
+# ---------------------------------------------------------------------------
+# In-memory fakes for the Trustee feature
+# ---------------------------------------------------------------------------
+
+
+class _FakeTrusteeDocument:
+ """Minimal stand-in for ``TrusteeDocument`` with attribute access."""
+
+ def __init__(self, payload: Dict[str, Any]):
+ self.id = str(uuid.uuid4())
+ for k, v in payload.items():
+ setattr(self, k, v)
+
+ def model_dump(self) -> Dict[str, Any]:
+ return {k: v for k, v in self.__dict__.items()}
+
+
+class _FakeTrusteePosition:
+ """Minimal stand-in for ``TrusteePosition`` with attribute access."""
+
+ def __init__(self, payload: Dict[str, Any]):
+ self.id = str(uuid.uuid4())
+ for k, v in payload.items():
+ setattr(self, k, v)
+
+ def model_dump(self) -> Dict[str, Any]:
+ return {k: v for k, v in self.__dict__.items()}
+
+
+class _FakeTrusteeDb:
+ """Captures ``getRecordset`` calls so processDocuments' bank-match
+ auto-linking path can be exercised without a real DB."""
+
+ def __init__(self, positions: List[_FakeTrusteePosition]):
+ self._positions = positions
+ self.calls: List[Dict[str, Any]] = []
+
+ def getRecordset(self, model, recordFilter=None):
+ self.calls.append({"model": getattr(model, "__name__", str(model)),
+ "filter": recordFilter})
+ return list(self._positions)
+
+
+class _FakeTrusteeInterface:
+ """In-memory replacement for the live trustee interface."""
+
+ def __init__(self, mandateId: str, featureInstanceId: str):
+ self.mandateId = mandateId
+ self.featureInstanceId = featureInstanceId
+ self.documents: List[_FakeTrusteeDocument] = []
+ self.positions: List[_FakeTrusteePosition] = []
+ self.updates: List[Dict[str, Any]] = []
+ self.db = _FakeTrusteeDb(self.positions)
+
+ def createDocument(self, payload: Dict[str, Any]) -> _FakeTrusteeDocument:
+ doc = _FakeTrusteeDocument({
+ "fileId": payload.get("fileId"),
+ "documentName": payload.get("documentName"),
+ "documentMimeType": payload.get("documentMimeType"),
+ "sourceType": payload.get("sourceType"),
+ "documentType": payload.get("documentType"),
+ "mandateId": self.mandateId,
+ "featureInstanceId": self.featureInstanceId,
+ })
+ self.documents.append(doc)
+ return doc
+
+ def createPosition(self, payload: Dict[str, Any]) -> _FakeTrusteePosition:
+ pos = _FakeTrusteePosition({**payload})
+ self.positions.append(pos)
+ return pos
+
+ def updatePosition(self, positionId: str, patch: Dict[str, Any]) -> Optional[_FakeTrusteePosition]:
+ self.updates.append({"id": positionId, "patch": dict(patch)})
+ for pos in self.positions:
+ if getattr(pos, "id", None) == positionId:
+ for k, v in patch.items():
+ setattr(pos, k, v)
+ return pos
+ return None
+
+
+class _FakeAccountingResult:
+ def __init__(self, success: bool = True, errorMessage: Optional[str] = None):
+ self.success = success
+ self.errorMessage = errorMessage
+
+
+class _FakeAccountingBridge:
+ """Records pushBatchToAccounting invocations and returns one success
+ per positionId."""
+
+ pushBatchCalls: List[Dict[str, Any]] = []
+
+ def __init__(self, trusteeInterface):
+ self.trusteeInterface = trusteeInterface
+
+ async def pushBatchToAccounting(self, featureInstanceId: str,
+ positionIds: List[str]):
+ type(self).pushBatchCalls.append({
+ "featureInstanceId": featureInstanceId,
+ "positionIds": list(positionIds),
+ })
+ return [_FakeAccountingResult(success=True) for _ in positionIds]
+
+
+# ---------------------------------------------------------------------------
+# Test fixtures: mock services + module-level patches
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture
+def trusteeInterface():
+ return _FakeTrusteeInterface(_MANDATE_ID, _TRUSTEE_INSTANCE_UUID)
+
+
+@pytest.fixture(autouse=True)
+def resetAccountingBridgeCalls():
+ _FakeAccountingBridge.pushBatchCalls = []
+ yield
+ _FakeAccountingBridge.pushBatchCalls = []
+
+
+@pytest.fixture
+def patchTrustee(monkeypatch, trusteeInterface):
+ """Patches ``getInterface`` + ``AccountingBridge`` in both action
+ modules so the real action code runs against the in-memory fakes."""
+ from modules.workflows.methods.methodTrustee.actions import (
+ processDocuments as _procMod,
+ syncToAccounting as _syncMod,
+ )
+ from modules.features.trustee import (
+ interfaceFeatureTrustee as _ifaceMod,
+ )
+ from modules.features.trustee.accounting import accountingBridge as _bridgeMod
+
+ def _fakeGetInterface(*_args, **_kwargs):
+ return trusteeInterface
+
+ monkeypatch.setattr(_ifaceMod, "getInterface", _fakeGetInterface, raising=True)
+ monkeypatch.setattr(_bridgeMod, "AccountingBridge", _FakeAccountingBridge, raising=True)
+ return trusteeInterface
+
+
+def _services():
+ """Minimal services container for executeGraph.
+
+ The ``ActionExecutor`` only needs ``services`` to be passed through to
+ the trustee actions. The trustee actions only touch
+ ``services.mandateId`` and ``services.featureInstanceId`` directly
+ (everything else is provided via ``parameters``); ``services.chat`` is
+ looked up but only used as a fallback that we do not exercise here.
+ """
+ class _S:
+ mandateId = _MANDATE_ID
+ featureInstanceId = _TRUSTEE_INSTANCE_UUID
+ user = None
+ chat = None
+ return _S()
+
+
+# ---------------------------------------------------------------------------
+# Canned upstream extraction result
+# ---------------------------------------------------------------------------
+
+
+def _expenseReceiptExtraction() -> Dict[str, Any]:
+ return {
+ "documentType": "EXPENSE_RECEIPT",
+ "fileId": "file-001",
+ "fileName": "tankbeleg.pdf",
+ "extractedData": [
+ {
+ "documentType": "expense_receipt",
+ "valuta": "2026-04-12",
+ "transactionDateTime": 1744675200,
+ "company": "Migrolino Tankstelle Zürich AG",
+ "desc": "Tankfüllung Bleifrei 95, 42.30 L à 1.799 CHF/L",
+ "bookingCurrency": "CHF",
+ "bookingAmount": "76.10",
+ "originalCurrency": "CHF",
+ "originalAmount": "76.10",
+ "vatPercentage": "8.1",
+ "vatAmount": "5.71",
+ "debitAccountNumber": "6200 Fahrzeugaufwand",
+ "creditAccountNumber": "1020 Bank",
+ "tags": ["fuel", "vehicle"],
+ "bookingReference": "RB-2026-04-12-001",
+ }
+ ],
+ }
+
+
+def _bankDocumentExtraction() -> Dict[str, Any]:
+ return {
+ "documentType": "BANK_DOCUMENT",
+ "fileId": "file-002",
+ "fileName": "kontoauszug_april.pdf",
+ "extractedData": [
+ {
+ "documentType": "bank_document",
+ "valuta": "2026-04-13",
+ "company": "Migrolino Tankstelle Zürich AG",
+ "desc": "Lastschrift Tankfüllung 12.04.2026, Ref RB-2026-04-12-001",
+ "bookingCurrency": "CHF",
+ "bookingAmount": "-76.10",
+ "creditAccountNumber": "1020 Bank",
+ "bookingReference": "RB-2026-04-12-001",
+ }
+ ],
+ }
+
+
+def _cannedExtractionDocuments() -> List[Dict[str, Any]]:
+ """Two ActionDocument-shaped dicts: one expense receipt + one bank
+ document. processDocuments' ``_resolveDocumentList`` accepts this
+ shape directly when ``documentName`` / ``documentData`` are present."""
+ return [
+ {
+ "documentName": "tankbeleg.json",
+ "documentData": json.dumps(_expenseReceiptExtraction()),
+ "mimeType": "application/json",
+ },
+ {
+ "documentName": "kontoauszug_april.json",
+ "documentData": json.dumps(_bankDocumentExtraction()),
+ "mimeType": "application/json",
+ },
+ ]
+
+
+# ---------------------------------------------------------------------------
+# Graph builder
+# ---------------------------------------------------------------------------
+
+
+def _buildGraph(featureInstanceIdOnProcess, featureInstanceIdOnSync) -> Dict[str, Any]:
+ """Trustee Spesenbelege chain.
+
+ The ``trigger.manual`` node emits an ``ActionResult`` port, which is
+ not assignable into ``trustee.processDocuments[in:0]`` (accepts only
+ ``DocumentList`` / ``Transit``). Production graphs solve this by
+ going through ``trustee.extractFromFiles`` (DocumentList output)
+ first; this test bypasses that step (we ship a literal canned
+ extraction list instead of running AI/SharePoint), so we simply
+ leave ``trigger.manual`` orphaned and start the data plane at
+ ``process``."""
+ return {
+ "nodes": [
+ {"id": "trigger", "type": "trigger.manual", "parameters": {}},
+ {
+ "id": "process",
+ "type": "trustee.processDocuments",
+ "parameters": {
+ "featureInstanceId": featureInstanceIdOnProcess,
+ "documentList": _cannedExtractionDocuments(),
+ },
+ },
+ {
+ "id": "sync",
+ "type": "trustee.syncToAccounting",
+ "parameters": {
+ "featureInstanceId": featureInstanceIdOnSync,
+ "documentList": {
+ "type": "ref",
+ "nodeId": "process",
+ "path": ["documents"],
+ },
+ },
+ },
+ ],
+ "connections": [
+ {"source": "process", "target": "sync"},
+ ],
+ }
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestSpesenbelegeEndToEnd:
+ """End-to-end Trustee Spesenbelege graph through executeGraph."""
+
+ @pytest.mark.asyncio
+ async def test_processAndSyncWritesDocumentsPositionsAndAccountingPush(
+ self, patchTrustee
+ ):
+ """Happy-path: 1 expense receipt + 1 bank document.
+
+ Asserts at all three layers: bindings, action results, and side
+ effects on the (faked) trustee + accounting infrastructure."""
+ trustee = patchTrustee
+ envelope = {
+ "$type": "FeatureInstanceRef",
+ "id": _TRUSTEE_INSTANCE_UUID,
+ "featureCode": "trustee",
+ }
+ graph = _buildGraph(
+ featureInstanceIdOnProcess=copy.deepcopy(envelope),
+ featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID,
+ )
+ runEnvelope = default_run_envelope("manual", payload={})
+
+ result = await executeGraph(
+ graph,
+ services=_services(),
+ run_envelope=runEnvelope,
+ userId="test-user",
+ mandateId=_MANDATE_ID,
+ instanceId=_TRUSTEE_INSTANCE_UUID,
+ )
+
+ assert result.get("success") is True, result
+
+ # --- Layer 1: bindings — both nodes must see the unwrapped UUID ---
+ assert len(trustee.documents) == 2
+ for doc in trustee.documents:
+ assert doc.featureInstanceId == _TRUSTEE_INSTANCE_UUID
+
+ # --- Layer 2: action results -----------------------------------
+ nodeOutputs = result["nodeOutputs"]
+ processOut = nodeOutputs["process"]
+ assert processOut.get("success") is True
+ assert processOut.get("error") in (None, "", False)
+ assert isinstance(processOut.get("documents"), list)
+ assert len(processOut["documents"]) == 1
+ processedDoc = processOut["documents"][0]
+ assert processedDoc.get("documentName") == "process_documents_result.json"
+ payload = json.loads(processedDoc["documentData"])
+ assert len(payload["documentIds"]) == 2
+ assert len(payload["positionIds"]) == 2
+ # Bank document auto-link found the matching expense (same
+ # bookingReference RB-2026-04-12-001), so exactly one position
+ # was matched.
+ assert len(payload["autoMatchedPositionIds"]) == 1
+
+ syncOut = nodeOutputs["sync"]
+ assert syncOut.get("success") is True
+ assert syncOut.get("error") in (None, "", False)
+ syncDoc = syncOut["documents"][0]
+ syncSummary = json.loads(syncDoc["documentData"])
+ assert syncSummary["pushed"] == 2
+ assert syncSummary["total"] == 2
+ assert all(r["success"] is True for r in syncSummary["results"])
+
+ # --- Layer 3: side effects -------------------------------------
+ assert len(trustee.positions) == 2
+ # Bank document update propagated through updatePosition
+ assert len(trustee.updates) == 1
+ assert "bankDocumentId" in trustee.updates[0]["patch"]
+
+ # Accounting bridge was called once with the resolved positionIds
+ # and the unwrapped UUID, NOT the typed envelope.
+ assert len(_FakeAccountingBridge.pushBatchCalls) == 1
+ call = _FakeAccountingBridge.pushBatchCalls[0]
+ assert call["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
+ assert sorted(call["positionIds"]) == sorted(payload["positionIds"])
+
+ @pytest.mark.asyncio
+ async def test_legacyRawUuidFeatureInstanceIdAlsoWorks(self, patchTrustee):
+ """A pre-Schicht-4 graph storing ``featureInstanceId`` as a raw
+ UUID must produce the same end-to-end behaviour after the
+ runtime ``materializeFeatureInstanceRefs`` migration."""
+ trustee = patchTrustee
+ graph = _buildGraph(
+ featureInstanceIdOnProcess=_TRUSTEE_INSTANCE_UUID,
+ featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID,
+ )
+ result = await executeGraph(
+ graph,
+ services=_services(),
+ run_envelope=default_run_envelope("manual", payload={}),
+ userId="test-user",
+ mandateId=_MANDATE_ID,
+ instanceId=_TRUSTEE_INSTANCE_UUID,
+ )
+ assert result.get("success") is True, result
+ assert len(trustee.documents) == 2
+ assert len(trustee.positions) == 2
+ assert _FakeAccountingBridge.pushBatchCalls[0]["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
+
+ @pytest.mark.asyncio
+ async def test_emptyExtractionListIsHandledGracefully(self, patchTrustee):
+ """When processDocuments receives no documents, syncToAccounting
+ must surface a graceful "No positionIds in document" message and
+ never call the accounting bridge."""
+ trustee = patchTrustee
+ graph = _buildGraph(
+ featureInstanceIdOnProcess=_TRUSTEE_INSTANCE_UUID,
+ featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID,
+ )
+ # Replace the canned documents with a no-records extraction.
+ emptyExtraction = {
+ "documentType": "EXPENSE_RECEIPT",
+ "fileId": "file-empty",
+ "fileName": "empty.json",
+ "extractedData": [],
+ }
+ graph["nodes"][1]["parameters"]["documentList"] = [{
+ "documentName": "empty.json",
+ "documentData": json.dumps(emptyExtraction),
+ "mimeType": "application/json",
+ }]
+ result = await executeGraph(
+ graph,
+ services=_services(),
+ run_envelope=default_run_envelope("manual", payload={}),
+ userId="test-user",
+ mandateId=_MANDATE_ID,
+ instanceId=_TRUSTEE_INSTANCE_UUID,
+ )
+ assert result.get("success") is True, result
+ assert len(trustee.documents) == 0
+ assert len(trustee.positions) == 0
+ syncSummary = json.loads(
+ result["nodeOutputs"]["sync"]["documents"][0]["documentData"]
+ )
+ assert syncSummary["pushed"] == 0
+ assert _FakeAccountingBridge.pushBatchCalls == []
diff --git a/tests/unit/graphicalEditor/test_action_node_connection_provenance.py b/tests/unit/graphicalEditor/test_action_node_connection_provenance.py
new file mode 100644
index 00000000..b04dd594
--- /dev/null
+++ b/tests/unit/graphicalEditor/test_action_node_connection_provenance.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2025 Patrick Motsch
+from modules.workflows.automation2.executors.actionNodeExecutor import _buildConnectionRefDict
+
+
+def test_build_connection_ref_dict_from_logical_string():
+ d = _buildConnectionRefDict("connection:msft:user@example.com", None, None)
+ assert d is not None
+ assert d["authority"] == "msft"
+ assert d["label"] == "connection:msft:user@example.com"
diff --git a/tests/unit/graphicalEditor/test_adapter_validator.py b/tests/unit/graphicalEditor/test_adapter_validator.py
new file mode 100644
index 00000000..5f8091fd
--- /dev/null
+++ b/tests/unit/graphicalEditor/test_adapter_validator.py
@@ -0,0 +1,352 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Tests for the Schicht-3 Adapter Validator (Phase 3).
+
+Validates the 5 drift rules between Editor-Node Adapters and the
+Schicht-2 Actions they bind to:
+
+ Rule 1: every userParams.actionArg exists in the Action
+ Rule 2: every required Action arg is covered (userParams or contextParams)
+ Rule 3: every Action parameter type exists in PORT_TYPE_CATALOG
+ Rule 4: Action outputType exists in PORT_TYPE_CATALOG
+ Rule 5: every Action with dynamicMode=False has an Editor adapter
+
+Plus a healthy-state test that runs the validator against the live
+STATIC_NODE_TYPES + every shipping Method instance, and asserts no drift.
+"""
+from __future__ import annotations
+
+import importlib
+import sys
+import types
+
+import pytest
+
+from modules.datamodels.datamodelWorkflowActions import (
+ WorkflowActionDefinition,
+ WorkflowActionParameter,
+)
+from modules.features.graphicalEditor.adapterValidator import (
+ AdapterValidationReport,
+ _buildActionsRegistryFromMethods,
+ _formatAdapterReport,
+ _validateAdapterAgainstAction,
+ _validateAllAdapters,
+)
+from modules.features.graphicalEditor.nodeAdapter import (
+ NodeAdapter,
+ UserParamMapping,
+)
+from modules.shared.frontendTypes import FrontendType
+
+
+# ---------------------------------------------------------------------------
+# Test factories
+# ---------------------------------------------------------------------------
+
+def _makeParam(typeStr: str, *, required: bool = False, **kwargs) -> WorkflowActionParameter:
+ defaults = {
+ "name": "p",
+ "type": typeStr,
+ "frontendType": FrontendType.TEXT,
+ "required": required,
+ "description": "",
+ }
+ defaults.update(kwargs)
+ return WorkflowActionParameter(**defaults)
+
+
+def _makeAction(
+ actionId: str = "trustee.processDocuments",
+ parameters: dict | None = None,
+ outputType: str = "TrusteeProcessResult",
+ dynamicMode: bool = False,
+) -> WorkflowActionDefinition:
+ return WorkflowActionDefinition(
+ actionId=actionId,
+ description="t",
+ parameters=parameters or {},
+ outputType=outputType,
+ dynamicMode=dynamicMode,
+ execute=lambda *a, **k: None,
+ )
+
+
+def _makeAdapter(
+ *,
+ userArgs: list[str] | None = None,
+ contextArgs: list[str] | None = None,
+) -> NodeAdapter:
+ return NodeAdapter(
+ nodeId="trustee.processDocuments",
+ bindsAction="trustee.processDocuments",
+ category="trustee",
+ label="Verarbeiten",
+ description="...",
+ userParams=[UserParamMapping(actionArg=a) for a in (userArgs or [])],
+ contextParams={k: f"$session.{k}" for k in (contextArgs or [])},
+ )
+
+
+# ---------------------------------------------------------------------------
+# Per-rule unit tests
+# ---------------------------------------------------------------------------
+
+class TestRule1_UserParamArgExistsInAction:
+ def test_okWhenAllArgsExist(self):
+ action = _makeAction(parameters={
+ "documentList": _makeParam("DocumentList", required=True),
+ "featureInstanceId": _makeParam("FeatureInstanceRef", required=True),
+ })
+ adapter = _makeAdapter(userArgs=["documentList", "featureInstanceId"])
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert report.isHealthy, report.errors
+
+ def test_failsWhenAdapterReferencesUnknownArg(self):
+ action = _makeAction(parameters={"documentList": _makeParam("DocumentList", required=True),
+ "featureInstanceId": _makeParam("FeatureInstanceRef", required=True)})
+ adapter = _makeAdapter(userArgs=["documentList", "featureInstanceId", "ghostArg"])
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert any("ghostArg" in e for e in report.errors)
+
+
+class TestRule2_RequiredArgsCovered:
+ def test_failsWhenRequiredArgMissing(self):
+ action = _makeAction(parameters={
+ "documentList": _makeParam("DocumentList", required=True),
+ "featureInstanceId": _makeParam("FeatureInstanceRef", required=True),
+ })
+ adapter = _makeAdapter(userArgs=["documentList"]) # missing featureInstanceId
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert any("featureInstanceId" in e for e in report.errors)
+
+ def test_okWhenRequiredArgInContext(self):
+ action = _makeAction(parameters={
+ "documentList": _makeParam("DocumentList", required=True),
+ "mandateId": _makeParam("str", required=True),
+ })
+ adapter = _makeAdapter(userArgs=["documentList"], contextArgs=["mandateId"])
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert report.isHealthy, report.errors
+
+ def test_optionalArgMayBeUnset(self):
+ action = _makeAction(parameters={
+ "documentList": _makeParam("DocumentList", required=True),
+ "prompt": _makeParam("str", required=False),
+ })
+ adapter = _makeAdapter(userArgs=["documentList"])
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert report.isHealthy, report.errors
+
+
+class TestRule3_ActionParamTypesInCatalog:
+ def test_failsForUnknownType(self):
+ action = _makeAction(parameters={"documentList": _makeParam("Foobar", required=True)})
+ adapter = _makeAdapter(userArgs=["documentList"])
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert any("Foobar" in e for e in report.errors)
+
+
+class TestRule4_OutputTypeInCatalog:
+ def test_failsForUnknownOutputType(self):
+ action = _makeAction(outputType="Nonsense")
+ adapter = _makeAdapter()
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert any("Nonsense" in e for e in report.errors)
+
+ def test_okForActionResult(self):
+ action = _makeAction(outputType="ActionResult")
+ adapter = _makeAdapter()
+ report = _validateAdapterAgainstAction(adapter, action)
+ assert report.isHealthy, report.errors
+
+
+class TestRule5_OrphanActionsAcrossRegistry:
+ def test_warnsForActionWithoutAdapter(self):
+ action = _makeAction(actionId="trustee.queryData")
+ registry = {"trustee": {"queryData": action}}
+ report = _validateAllAdapters([], registry)
+ assert any("trustee.queryData" in w for w in report.warnings)
+
+ def test_dynamicModeActionDoesNotWarn(self):
+ action = _makeAction(actionId="trustee.queryData", dynamicMode=True)
+ registry = {"trustee": {"queryData": action}}
+ report = _validateAllAdapters([], registry)
+ assert report.warnings == []
+
+
+# ---------------------------------------------------------------------------
+# Aggregator + report formatter
+# ---------------------------------------------------------------------------
+
+class TestValidateAllAdapters:
+ def test_passesWithFullCoverage(self):
+ node = {
+ "id": "trustee.processDocuments",
+ "category": "trustee",
+ "label": "X", "description": "Y",
+ "parameters": [
+ {"name": "documentList", "type": "DocumentList"},
+ {"name": "featureInstanceId", "type": "FeatureInstanceRef"},
+ ],
+ "inputs": 1, "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList"]}},
+ "_method": "trustee", "_action": "processDocuments",
+ }
+ action = _makeAction(parameters={
+ "documentList": _makeParam("DocumentList", required=True),
+ "featureInstanceId": _makeParam("FeatureInstanceRef", required=True),
+ })
+ registry = {"trustee": {"processDocuments": action}}
+ report = _validateAllAdapters([node], registry)
+ assert report.isHealthy, report.errors
+
+ def test_reportsMissingAction(self):
+ node = {
+ "id": "trustee.processDocuments",
+ "_method": "trustee", "_action": "ghostAction",
+ "parameters": [], "inputs": 0,
+ }
+ report = _validateAllAdapters([node], {"trustee": {}})
+ assert any("ghostAction" in e for e in report.errors)
+
+
+class TestFormatReport:
+ def test_healthy(self):
+ out = _formatAdapterReport(AdapterValidationReport())
+ assert "healthy" in out.lower()
+
+ def test_withErrorsAndWarnings(self):
+ rep = AdapterValidationReport(errors=["e1"], warnings=["w1"])
+ out = _formatAdapterReport(rep)
+ assert "ERROR" in out and "WARN" in out
+
+
+# ---------------------------------------------------------------------------
+# Healthy-state: live methods + STATIC_NODE_TYPES
+# ---------------------------------------------------------------------------
+
+class _NullRbac:
+ def getUserPermissions(self, **kwargs):
+ class _P:
+ view = read = create = update = delete = True
+ return _P()
+
+
+class _StubServices:
+ def __init__(self):
+ self.rbac = _NullRbac()
+ self.user = type("U", (), {"id": "test-user", "roleLabels": []})()
+ self.mandateId = None
+ self.featureInstanceId = None
+
+
+def _ensureOptionalDeps():
+ class _AnyAttrModule(types.ModuleType):
+ def __getattr__(self, name):
+ return type(name, (), {})
+
+ for name in ("aiohttp",):
+ if name not in sys.modules:
+ sys.modules[name] = _AnyAttrModule(name)
+
+
+_LIVE_METHODS = [
+ ("modules.workflows.methods.methodTrustee.methodTrustee", "MethodTrustee", "trustee"),
+ ("modules.workflows.methods.methodRedmine.methodRedmine", "MethodRedmine", "redmine"),
+ ("modules.workflows.methods.methodSharepoint.methodSharepoint", "MethodSharepoint", "sharepoint"),
+ ("modules.workflows.methods.methodOutlook.methodOutlook", "MethodOutlook", "outlook"),
+ ("modules.workflows.methods.methodAi.methodAi", "MethodAi", "ai"),
+ ("modules.workflows.methods.methodClickup.methodClickup", "MethodClickup", "clickup"),
+ ("modules.workflows.methods.methodFile.methodFile", "MethodFile", "file"),
+ ("modules.workflows.methods.methodContext.methodContext", "MethodContext", "context"),
+]
+
+
+def _instantiateLiveMethods() -> dict:
+ """Best-effort instantiation of every shipping Method with stub services.
+
+ Returns {shortName: instance}. Methods that can't be instantiated in the
+ test env (missing dependencies) are skipped silently — Phase 2 has its
+ own healthy-state test that catches per-method drift.
+ """
+ _ensureOptionalDeps()
+ out: dict = {}
+ for modulePath, className, shortName in _LIVE_METHODS:
+ try:
+ module = importlib.import_module(modulePath)
+ cls = getattr(module, className, None)
+ if cls is None:
+ continue
+ instance = cls(_StubServices())
+ out[shortName] = instance
+ except Exception:
+ continue
+ return out
+
+
+# Snapshot of pre-Phase-3 drift discovered when the validator was first run
+# against the live STATIC_NODE_TYPES + live Method registry.
+#
+# After Phase-4 Adapter-Drift-Cleanup (Plan #4) this set is intentionally
+# empty: every Editor adapter must align cleanly with its Schicht-2 Action,
+# and the regression net below now uses `assert report.errors == []`.
+#
+# History of removed drifts:
+# wiki/c-work/4-done/2026-04-adapter-drift-cleanup.md
+#
+# Rule: this set MUST stay empty. New drift => fix the adapter or the action,
+# not the snapshot.
+_KNOWN_ADAPTER_DRIFTS: frozenset[tuple[str, str]] = frozenset()
+
+
+def _extractDriftKey(errorMessage: str) -> tuple[str, str] | None:
+ """Parse a validator error message into a (nodeId, fieldName) drift key.
+
+ Recognises both rule-1 ("userParams.actionArg 'X' does not exist…") and
+ rule-2 ("required action arg 'X' is neither in userParams…") patterns.
+ """
+ import re
+ m = re.search(
+ r"adapter '([^']+)' bindsAction '[^']+': userParams\.actionArg '([^']+)'",
+ errorMessage,
+ )
+ if m:
+ return (m.group(1), m.group(2))
+ m = re.search(
+ r"adapter '([^']+)' bindsAction '[^']+': required action arg '([^']+)'",
+ errorMessage,
+ )
+ if m:
+ return (m.group(1), m.group(2))
+ return None
+
+
+def test_staticNodesHaveNoDriftAgainstLiveMethods():
+ """Strict regression: every Editor adapter in STATIC_NODE_TYPES must align
+ with its Schicht-2 Action signature.
+
+ Phase 3 shipped the validator with a tracked drift snapshot
+ (`_KNOWN_ADAPTER_DRIFTS`); Phase 4 cleaned the backlog so the snapshot is
+ empty and we now demand zero errors. Any new drift fails immediately —
+ fix the adapter or the action, never the assertion.
+
+ History: wiki/c-work/4-done/2026-04-adapter-drift-cleanup.md
+ """
+ from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+
+ instances = _instantiateLiveMethods()
+ if not instances:
+ pytest.skip("no methods could be instantiated in this test env")
+
+ registry = _buildActionsRegistryFromMethods(instances)
+ report = _validateAllAdapters(list(STATIC_NODE_TYPES), registry)
+
+ assert _KNOWN_ADAPTER_DRIFTS == frozenset(), (
+ "_KNOWN_ADAPTER_DRIFTS must stay empty after Phase-4 cleanup. "
+ "Do not add new entries — fix the drift instead."
+ )
+ assert report.errors == [], (
+ "Adapter↔Action drift detected:\n" + "\n".join(report.errors)
+ )
diff --git a/tests/unit/graphicalEditor/test_node_adapter.py b/tests/unit/graphicalEditor/test_node_adapter.py
new file mode 100644
index 00000000..7b24b01a
--- /dev/null
+++ b/tests/unit/graphicalEditor/test_node_adapter.py
@@ -0,0 +1,170 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Tests for the Schicht-3 NodeAdapter projection (Phase 3).
+
+Covers the pure projection helpers in nodeAdapter.py:
+ - identifying method-bound vs framework-primitive nodes
+ - extracting bindsAction
+ - building UserParamMapping from legacy parameter dicts
+ - converting inputPorts dict-of-dicts into per-port accepts lists
+ - end-to-end legacy-node → NodeAdapter projection
+
+These tests do NOT touch live methods; they verify the projection logic
+in isolation so it is robust before the adapterValidator composes with it.
+"""
+from __future__ import annotations
+
+import pytest
+
+from modules.features.graphicalEditor.nodeAdapter import (
+ NodeAdapter,
+ UserParamMapping,
+ _adapterFromLegacyNode,
+ _bindsActionFromLegacy,
+ _extractVisibleWhen,
+ _isMethodBoundNode,
+ _projectAllAdapters,
+ _projectInputAccepts,
+ _userParamFromLegacyParam,
+)
+
+
+def _legacyMethodNode(**overrides):
+ base = {
+ "id": "trustee.processDocuments",
+ "category": "trustee",
+ "label": "Verarbeiten",
+ "description": "...",
+ "parameters": [
+ {"name": "documentList", "type": "DocumentList", "required": True,
+ "frontendType": "dataRef", "description": "Eingabe"},
+ {"name": "featureInstanceId", "type": "FeatureInstanceRef", "required": True,
+ "frontendType": "hidden", "description": "Trustee-Instanz"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "TrusteeProcessResult"}},
+ "meta": {"icon": "mdi-x", "color": "#000", "usesAi": False},
+ "_method": "trustee",
+ "_action": "processDocuments",
+ }
+ base.update(overrides)
+ return base
+
+
+def _primitiveNode(**overrides):
+ base = {
+ "id": "flow.loop",
+ "category": "flow",
+ "label": "Schleife",
+ "parameters": [{"name": "items", "type": "string", "required": True}],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "executor": "flow",
+ }
+ base.update(overrides)
+ return base
+
+
+class TestIsMethodBound:
+ def test_methodBoundIsTrue(self):
+ assert _isMethodBoundNode(_legacyMethodNode()) is True
+
+ def test_primitiveIsFalse(self):
+ assert _isMethodBoundNode(_primitiveNode()) is False
+
+ @pytest.mark.parametrize("partial", [
+ {"_method": "trustee"}, # missing _action
+ {"_action": "processDocuments"}, # missing _method
+ {},
+ ])
+ def test_partialBindingIsFalse(self, partial):
+ node = _primitiveNode(**partial)
+ assert _isMethodBoundNode(node) is False
+
+
+class TestBindsActionFromLegacy:
+ def test_returnsCanonicalFqn(self):
+ assert _bindsActionFromLegacy(_legacyMethodNode()) == "trustee.processDocuments"
+
+ def test_returnsNoneForPrimitive(self):
+ assert _bindsActionFromLegacy(_primitiveNode()) is None
+
+
+class TestUserParamFromLegacy:
+ def test_carriesEditorOverridesOnly(self):
+ legacy = {"name": "documentList", "type": "DocumentList", "required": True,
+ "frontendType": "dataRef", "description": "Eingabe", "default": []}
+ mapping = _userParamFromLegacyParam(legacy)
+ assert isinstance(mapping, UserParamMapping)
+ assert mapping.actionArg == "documentList"
+ assert mapping.uiHint == "dataRef"
+ assert mapping.description == "Eingabe"
+ assert mapping.defaultValue == []
+ assert mapping.frontendOptions is None
+
+ def test_extractsConditionalVisibility(self):
+ legacy = {
+ "name": "filterJson",
+ "type": "string",
+ "frontendType": "textarea",
+ "frontendOptions": {"dependsOn": "mode", "showWhen": ["raw", "aggregate"]},
+ }
+ mapping = _userParamFromLegacyParam(legacy)
+ assert mapping.visibleWhen == {"actionArg": "mode", "in": ["raw", "aggregate"]}
+
+
+class TestExtractVisibleWhen:
+ def test_returnsNoneForMissingHint(self):
+ assert _extractVisibleWhen(None) is None
+ assert _extractVisibleWhen({}) is None
+ assert _extractVisibleWhen({"dependsOn": "x"}) is None
+
+ def test_normalizesScalarShowWhen(self):
+ out = _extractVisibleWhen({"dependsOn": "entity", "showWhen": "tenant"})
+ assert out == {"actionArg": "entity", "in": ["tenant"]}
+
+
+class TestProjectInputAccepts:
+ def test_perPortAcceptsList(self):
+ node = _legacyMethodNode()
+ assert _projectInputAccepts(node) == [["DocumentList", "Transit"]]
+
+ def test_emptyForZeroInputs(self):
+ node = _legacyMethodNode(inputs=0, inputPorts={})
+ assert _projectInputAccepts(node) == []
+
+ def test_handlesStringKeys(self):
+ node = _legacyMethodNode(inputPorts={"0": {"accepts": ["Transit"]}})
+ assert _projectInputAccepts(node) == [["Transit"]]
+
+ def test_missingPortReturnsEmptyList(self):
+ node = _legacyMethodNode(inputs=2, inputPorts={0: {"accepts": ["Transit"]}})
+ assert _projectInputAccepts(node) == [["Transit"], []]
+
+
+class TestAdapterFromLegacyNode:
+ def test_buildsAdapter(self):
+ adapter = _adapterFromLegacyNode(_legacyMethodNode())
+ assert isinstance(adapter, NodeAdapter)
+ assert adapter.nodeId == "trustee.processDocuments"
+ assert adapter.bindsAction == "trustee.processDocuments"
+ assert adapter.category == "trustee"
+ assert len(adapter.userParams) == 2
+ assert adapter.userParams[0].actionArg == "documentList"
+ assert adapter.inputAccepts == [["DocumentList", "Transit"]]
+ assert adapter.contextParams == {}
+ assert adapter.meta.get("icon") == "mdi-x"
+
+ def test_returnsNoneForPrimitive(self):
+ assert _adapterFromLegacyNode(_primitiveNode()) is None
+
+
+class TestProjectAllAdapters:
+ def test_skipsPrimitives(self):
+ nodes = [_legacyMethodNode(), _primitiveNode()]
+ out = _projectAllAdapters(nodes)
+ assert list(out.keys()) == ["trustee.processDocuments"]
diff --git a/tests/unit/graphicalEditor/test_portTypes_catalog.py b/tests/unit/graphicalEditor/test_portTypes_catalog.py
new file mode 100644
index 00000000..11967376
--- /dev/null
+++ b/tests/unit/graphicalEditor/test_portTypes_catalog.py
@@ -0,0 +1,257 @@
+# Copyright (c) 2025 Patrick Motsch
+"""
+Catalog integrity + new Phase-1 schemas
+(see wiki/c-work/1-plan/2026-04-typed-action-architecture.md).
+"""
+
+import pytest
+
+from modules.features.graphicalEditor.portTypes import (
+ PORT_TYPE_CATALOG,
+ PRIMITIVE_TYPES,
+ PortField,
+ PortSchema,
+ _stripContainer,
+ _validateCatalog,
+)
+
+
+# ---------------------------------------------------------------------------
+# Validator behaviour
+# ---------------------------------------------------------------------------
+
+def test_catalogIsHealthy():
+ """The shipped catalog must validate without errors."""
+ errors = _validateCatalog()
+ assert errors == [], "Catalog has integrity errors:\n - " + "\n - ".join(errors)
+
+
+def test_validatorDetectsUnknownType(monkeypatch):
+ """Inject a bad schema and ensure it is reported."""
+ bad = PortSchema(name="_BadTest", fields=[
+ PortField(name="x", type="DoesNotExist"),
+ ])
+ monkeypatch.setitem(PORT_TYPE_CATALOG, "_BadTest", bad)
+ errors = _validateCatalog()
+ assert any("DoesNotExist" in e for e in errors)
+
+
+def test_validatorDetectsBadDiscriminatorType(monkeypatch):
+ bad = PortSchema(name="_BadDisc", fields=[
+ PortField(name="x", type="int", discriminator=True),
+ ])
+ monkeypatch.setitem(PORT_TYPE_CATALOG, "_BadDisc", bad)
+ errors = _validateCatalog()
+ assert any("discriminator must be 'str'" in e for e in errors)
+
+
+def test_validatorDetectsMultipleDiscriminators(monkeypatch):
+ bad = PortSchema(name="_DoubleDisc", fields=[
+ PortField(name="a", type="str", discriminator=True),
+ PortField(name="b", type="str", discriminator=True),
+ ])
+ monkeypatch.setitem(PORT_TYPE_CATALOG, "_DoubleDisc", bad)
+ errors = _validateCatalog()
+ assert any("max 1 allowed" in e for e in errors)
+
+
+def test_validatorDetectsKeyNameMismatch(monkeypatch):
+ bad = PortSchema(name="DifferentName", fields=[
+ PortField(name="x", type="str"),
+ ])
+ monkeypatch.setitem(PORT_TYPE_CATALOG, "_KeyMismatch", bad)
+ errors = _validateCatalog()
+ assert any("does not match schema.name" in e for e in errors)
+
+
+# ---------------------------------------------------------------------------
+# _stripContainer helper
+# ---------------------------------------------------------------------------
+
+@pytest.mark.parametrize("typeStr,expected", [
+ ("str", ["str"]),
+ ("int", ["int"]),
+ ("ConnectionRef", ["ConnectionRef"]),
+ ("List[Document]", ["Document"]),
+ ("List[ProcessError]", ["ProcessError"]),
+ ("Dict[str,Any]", ["str", "Any"]),
+ ("Dict[str,int]", ["str", "int"]),
+ ("", []),
+])
+def test_stripContainer(typeStr, expected):
+ assert _stripContainer(typeStr) == expected
+
+
+# ---------------------------------------------------------------------------
+# Phase-1 new Refs
+# ---------------------------------------------------------------------------
+
+def test_featureInstanceRefHasDiscriminator():
+ s = PORT_TYPE_CATALOG["FeatureInstanceRef"]
+ names = {f.name for f in s.fields}
+ assert names == {"id", "featureCode", "label", "mandateId"}
+ discriminators = [f for f in s.fields if f.discriminator]
+ assert len(discriminators) == 1
+ assert discriminators[0].name == "featureCode"
+ assert discriminators[0].type == "str"
+
+
+def test_connectionRefAuthorityIsDiscriminator():
+ s = PORT_TYPE_CATALOG["ConnectionRef"]
+ discriminators = [f for f in s.fields if f.discriminator]
+ assert len(discriminators) == 1
+ assert discriminators[0].name == "authority"
+
+
+def test_clickUpListRefExists():
+ s = PORT_TYPE_CATALOG["ClickUpListRef"]
+ names = {f.name for f in s.fields}
+ assert "listId" in names
+ assert "connection" in names
+
+
+def test_promptTemplateRefExists():
+ s = PORT_TYPE_CATALOG["PromptTemplateRef"]
+ names = {f.name for f in s.fields}
+ assert "id" in names
+
+
+# ---------------------------------------------------------------------------
+# Phase-1 Trustee Result Schemas
+# ---------------------------------------------------------------------------
+
+def test_trusteeRefreshResultStructure():
+ s = PORT_TYPE_CATALOG["TrusteeRefreshResult"]
+ names = {f.name for f in s.fields}
+ assert {"syncCounts", "oldestBookingDate", "newestBookingDate",
+ "featureInstance", "errors"}.issubset(names)
+
+
+def test_trusteeProcessResultExposesDocuments():
+ s = PORT_TYPE_CATALOG["TrusteeProcessResult"]
+ docField = next((f for f in s.fields if f.name == "documents"), None)
+ assert docField is not None
+ assert docField.type == "List[Document]"
+ assert docField.required is True
+
+
+def test_trusteeSyncResultHasJournalLines():
+ s = PORT_TYPE_CATALOG["TrusteeSyncResult"]
+ names = {f.name for f in s.fields}
+ assert "syncedCount" in names
+ assert "journalLines" in names
+
+
+def test_journalLineHasAccountingFields():
+ s = PORT_TYPE_CATALOG["JournalLine"]
+ names = {f.name for f in s.fields}
+ for required in ("bookingDate", "account", "amount"):
+ assert required in names
+
+
+def test_processErrorHasStageAndMessage():
+ s = PORT_TYPE_CATALOG["ProcessError"]
+ names = {f.name for f in s.fields}
+ assert {"stage", "message"}.issubset(names)
+
+
+# ---------------------------------------------------------------------------
+# Phase-1 Redmine Result Schemas
+# ---------------------------------------------------------------------------
+
+def test_redmineTicketHasCoreFields():
+ s = PORT_TYPE_CATALOG["RedmineTicket"]
+ names = {f.name for f in s.fields}
+ for required in ("id", "subject", "status"):
+ assert required in names
+
+
+def test_redmineTicketListReferencesTicket():
+ s = PORT_TYPE_CATALOG["RedmineTicketList"]
+ ticketsField = next((f for f in s.fields if f.name == "tickets"), None)
+ assert ticketsField is not None
+ assert ticketsField.type == "List[RedmineTicket]"
+
+
+def test_redmineStatsExists():
+ s = PORT_TYPE_CATALOG["RedmineStats"]
+ names = {f.name for f in s.fields}
+ assert "kpis" in names
+
+
+# ---------------------------------------------------------------------------
+# Phase-1 Expressions / Misc
+# ---------------------------------------------------------------------------
+
+def test_cronExpressionExists():
+ s = PORT_TYPE_CATALOG["CronExpression"]
+ names = {f.name for f in s.fields}
+ assert "expression" in names
+ assert "timezone" in names
+
+
+def test_conditionExpressionHasSyntaxEnum():
+ s = PORT_TYPE_CATALOG["ConditionExpression"]
+ syntaxField = next((f for f in s.fields if f.name == "syntax"), None)
+ assert syntaxField is not None
+ assert syntaxField.enumValues
+ assert "jmespath" in syntaxField.enumValues
+
+
+def test_attachmentSpecHasSourceEnum():
+ s = PORT_TYPE_CATALOG["AttachmentSpec"]
+ sourceField = next((f for f in s.fields if f.name == "source"), None)
+ assert sourceField is not None
+ assert set(sourceField.enumValues or []) == {"path", "document", "url"}
+
+
+def test_taskAttachmentRefExists():
+ s = PORT_TYPE_CATALOG["TaskAttachmentRef"]
+ names = {f.name for f in s.fields}
+ assert "taskId" in names
+
+
+def test_dateTimeAndUrlSemanticPrimitivesExist():
+ assert "DateTime" in PORT_TYPE_CATALOG
+ assert "Url" in PORT_TYPE_CATALOG
+
+
+# ---------------------------------------------------------------------------
+# Cross-cutting: every Trustee/Redmine result references FeatureInstanceRef
+# ---------------------------------------------------------------------------
+
+@pytest.mark.parametrize("schemaName", [
+ "TrusteeRefreshResult",
+ "TrusteeProcessResult",
+ "TrusteeSyncResult",
+ "RedmineTicket",
+ "RedmineTicketList",
+ "RedmineStats",
+])
+def test_resultSchemasReferenceFeatureInstance(schemaName):
+ s = PORT_TYPE_CATALOG[schemaName]
+ fiField = next((f for f in s.fields if f.name == "featureInstance"), None)
+ assert fiField is not None, f"{schemaName} should expose featureInstance for traceability"
+ assert fiField.type == "FeatureInstanceRef"
+
+
+# ---------------------------------------------------------------------------
+# Serialization stays compatible (frontend reads model_dump output)
+# ---------------------------------------------------------------------------
+
+def test_portFieldDumpsDiscriminatorFlag():
+ f = PortField(name="x", type="str", discriminator=True)
+ dumped = f.model_dump()
+ assert dumped["discriminator"] is True
+
+
+def test_defaultDiscriminatorIsFalse():
+ f = PortField(name="x", type="str")
+ dumped = f.model_dump()
+ assert dumped["discriminator"] is False
+
+
+def test_primitiveTypesFrozenSet():
+ assert "str" in PRIMITIVE_TYPES
+ assert "Any" in PRIMITIVE_TYPES
+ assert "DoesNotExist" not in PRIMITIVE_TYPES
diff --git a/tests/unit/graphicalEditor/test_port_schema_recursive.py b/tests/unit/graphicalEditor/test_port_schema_recursive.py
new file mode 100644
index 00000000..b3ae22c6
--- /dev/null
+++ b/tests/unit/graphicalEditor/test_port_schema_recursive.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2025 Patrick Motsch
+"""Port type catalog: nested provenance schemas (Typed Generic Handover)."""
+
+from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, _defaultForType
+
+
+def test_connection_ref_in_catalog():
+ s = PORT_TYPE_CATALOG["ConnectionRef"]
+ names = {f.name for f in s.fields}
+ assert names == {"id", "authority", "label"}
+
+
+def test_document_list_has_provenance_fields():
+ s = PORT_TYPE_CATALOG["DocumentList"]
+ names = {f.name for f in s.fields}
+ assert "documents" in names
+ assert "connection" in names
+ assert "source" in names
+ assert "count" in names
+
+
+def test_default_for_nested_schema_type():
+ assert _defaultForType("ConnectionRef") == {}
+ assert _defaultForType("List[Document]") == []
diff --git a/tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py b/tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py
new file mode 100644
index 00000000..16aec90d
--- /dev/null
+++ b/tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2025 Patrick Motsch
+from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths
+from modules.workflows.automation2.graphUtils import parse_graph_defined_schema, validateGraph
+from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+
+
+def test_compute_upstream_paths_includes_form_dynamic_fields():
+ graph = {
+ "nodes": [
+ {
+ "id": "form1",
+ "type": "input.form",
+ "parameters": {
+ "fields": [{"name": "custName", "type": "str", "label": "Name", "required": True}],
+ },
+ },
+ {"id": "ai1", "type": "ai.prompt", "parameters": {"aiPrompt": "hi"}},
+ ],
+ "connections": [
+ {"source": "form1", "target": "ai1", "sourceOutput": 0, "targetInput": 0},
+ ],
+ }
+ paths = compute_upstream_paths(graph, "ai1")
+ labels = [p["label"] for p in paths if p.get("producerNodeId") == "form1"]
+ assert any("custName" in lbl for lbl in labels), labels
+
+
+def test_parse_graph_defined_schema_fields():
+ node = {"parameters": {"fields": [{"name": "a", "type": "str", "label": "A", "required": False}]}}
+ sch = parse_graph_defined_schema(node, "fields")
+ assert sch and sch["name"] == "FormPayload_dynamic"
+ assert sch["fields"][0]["name"] == "a"
+
+
+def test_parse_graph_defined_schema_nested_group():
+ node = {
+ "parameters": {
+ "fields": [
+ {
+ "name": "addr",
+ "type": "group",
+ "label": "Addr",
+ "fields": [{"name": "zip", "type": "str", "label": "ZIP"}],
+ },
+ ],
+ },
+ }
+ sch = parse_graph_defined_schema(node, "fields")
+ names = [f["name"] for f in sch["fields"]]
+ assert "addr.zip" in names
+
+
+def test_validate_graph_port_mismatch_errors():
+ node_type_ids = {n["id"] for n in STATIC_NODE_TYPES}
+ graph = {
+ "nodes": [
+ {"id": "t1", "type": "trigger.manual", "parameters": {}},
+ {"id": "e1", "type": "email.checkEmail", "parameters": {"connectionReference": "x"}},
+ {"id": "a1", "type": "ai.prompt", "parameters": {"aiPrompt": "summarize"}},
+ ],
+ "connections": [
+ {"source": "t1", "target": "e1", "sourceOutput": 0, "targetInput": 0},
+ {"source": "e1", "target": "a1", "sourceOutput": 0, "targetInput": 0},
+ ],
+ }
+ errors = validateGraph(graph, node_type_ids)
+ assert any("Port mismatch" in e for e in errors), errors
diff --git a/tests/unit/methods/__init__.py b/tests/unit/methods/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unit/methods/test_action_signature_validator.py b/tests/unit/methods/test_action_signature_validator.py
new file mode 100644
index 00000000..8e54fdcf
--- /dev/null
+++ b/tests/unit/methods/test_action_signature_validator.py
@@ -0,0 +1,289 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Tests for the action-signature validator (Phase 2 of the Typed Action
+Architecture, see wiki/c-work/1-plan/2026-04-typed-action-architecture.md).
+
+Two parts:
+ A) Unit tests for the validator itself (positive + negative cases)
+ B) Healthy-state test: every Method discovered by methodDiscovery passes
+ validation. This is the regression net that catches drift between an
+ action's declared types and the type catalog.
+"""
+from __future__ import annotations
+
+import pytest
+
+from modules.datamodels.datamodelWorkflowActions import (
+ WorkflowActionDefinition,
+ WorkflowActionParameter,
+)
+from modules.shared.frontendTypes import FrontendType
+from modules.workflows.methods._actionSignatureValidator import (
+ _formatValidationReport,
+ _validateActionDefinition,
+ _validateActionParameter,
+ _validateActionsDict,
+ _validateMethods,
+ _validateTypeRef,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+def _makeParam(typeStr: str, **kwargs) -> WorkflowActionParameter:
+ defaults = {
+ "name": "p",
+ "type": typeStr,
+ "frontendType": FrontendType.TEXT,
+ "required": False,
+ "description": "",
+ }
+ defaults.update(kwargs)
+ return WorkflowActionParameter(**defaults)
+
+
+def _makeAction(
+ actionId: str = "test.x",
+ parameters: dict | None = None,
+ outputType: str = "ActionResult",
+) -> WorkflowActionDefinition:
+ return WorkflowActionDefinition(
+ actionId=actionId,
+ description="t",
+ parameters=parameters or {},
+ outputType=outputType,
+ execute=lambda *a, **k: None,
+ )
+
+
+# ---------------------------------------------------------------------------
+# A) Unit tests
+# ---------------------------------------------------------------------------
+
+class TestValidateTypeRef:
+ """Single-type validation."""
+
+ @pytest.mark.parametrize("t", [
+ "str", "int", "bool", "float", "Any",
+ "ConnectionRef", "FeatureInstanceRef", "DocumentList",
+ "TrusteeProcessResult", "RedmineTicket",
+ "List[str]", "List[int]", "List[Any]",
+ "Dict[str,Any]", "Dict[str,Document]",
+ "List[FeatureInstanceRef]",
+ ])
+ def test_validTypes(self, t):
+ assert _validateTypeRef(t) == []
+
+ @pytest.mark.parametrize("t", [
+ "list", # too generic
+ "dict", # too generic
+ "Foobar", # unknown schema
+ "List[Foo]", # unknown inner
+ "Dict[str,Foo]", # unknown inner value
+ "", # empty
+ ])
+ def test_invalidTypes(self, t):
+ errors = _validateTypeRef(t)
+ assert errors, f"expected validation errors for {t!r}"
+
+
+class TestValidateActionParameter:
+ def test_validParam(self):
+ p = _makeParam("ConnectionRef")
+ assert _validateActionParameter("ai.x", "p", p) == []
+
+ def test_invalidParam(self):
+ p = _makeParam("Foobar")
+ errors = _validateActionParameter("ai.x", "myParam", p)
+ assert errors and errors[0].startswith("ai.x.myParam:")
+
+
+class TestValidateActionDefinition:
+ def test_valid(self):
+ action = _makeAction(
+ parameters={"a": _makeParam("ConnectionRef", name="a")},
+ outputType="DocumentList",
+ )
+ assert _validateActionDefinition(action) == []
+
+ def test_invalidOutputType(self):
+ action = _makeAction(outputType="DoesNotExist")
+ errors = _validateActionDefinition(action)
+ assert any("" in e for e in errors)
+
+ def test_genericOutputAllowed(self):
+ # ActionResult and Transit are allowed as fire-and-forget outputs
+ for t in ("ActionResult", "Transit"):
+ assert _validateActionDefinition(_makeAction(outputType=t)) == []
+
+
+class TestValidateActionsDict:
+ def test_emptyDictOk(self):
+ assert _validateActionsDict("m", {}) == []
+
+ def test_nonActionDefinitionRejected(self):
+ errors = _validateActionsDict("m", {"x": "not an action"})
+ assert any("not a WorkflowActionDefinition" in e for e in errors)
+
+ def test_collectsErrorsAcrossActions(self):
+ actions = {
+ "good": _makeAction(
+ parameters={"a": _makeParam("str", name="a")},
+ outputType="DocumentList",
+ ),
+ "bad": _makeAction(
+ actionId="m.bad",
+ parameters={"x": _makeParam("Foobar", name="x")},
+ outputType="AlsoUnknown",
+ ),
+ }
+ errors = _validateActionsDict("m", actions)
+ # bad action contributes 2 errors, good contributes 0
+ assert len(errors) == 2
+
+
+class TestValidateMethods:
+ def test_emptyOk(self):
+ assert _validateMethods([]) == []
+
+ def test_methodLikeObject(self):
+ class FakeMethod:
+ name = "fake"
+
+ def __init__(self):
+ self._actions = {
+ "a": _makeAction(
+ parameters={"p": _makeParam("ConnectionRef", name="p")},
+ outputType="DocumentList",
+ ),
+ }
+
+ assert _validateMethods([FakeMethod()]) == []
+
+ def test_methodWithDrift(self):
+ class FakeMethod:
+ name = "fake"
+
+ def __init__(self):
+ self._actions = {
+ "broken": _makeAction(
+ actionId="fake.broken",
+ parameters={"p": _makeParam("Unknown", name="p")},
+ outputType="ActionResult",
+ ),
+ }
+
+ errors = _validateMethods([FakeMethod()])
+ assert errors and "fake.broken.p" in errors[0]
+
+
+class TestFormatValidationReport:
+ def test_healthyMessage(self):
+ assert "healthy" in _formatValidationReport([]).lower()
+
+ def test_errorReport(self):
+ msg = _formatValidationReport(["a.x: bad", "b.y: also bad"])
+ assert "Found 2 action-signature drift" in msg
+ assert "a.x: bad" in msg
+ assert "b.y: also bad" in msg
+
+
+# ---------------------------------------------------------------------------
+# B) Healthy-state test for the real Method registry
+# ---------------------------------------------------------------------------
+
+class _NullRbac:
+ """Minimal RBAC stub so MethodBase.__init__ does not crash."""
+
+ def getUserPermissions(self, **kwargs): # noqa: D401
+ class _P:
+ view = True
+ read = True
+ create = True
+ update = True
+ delete = True
+ return _P()
+
+
+class _StubServices:
+ """Minimal services container required by MethodBase.__init__."""
+
+ def __init__(self):
+ self.rbac = _NullRbac()
+ self.user = type("U", (), {"id": "test-user", "roleLabels": []})()
+ self.mandateId = None
+ self.featureInstanceId = None
+
+
+def _ensureOptionalDeps():
+ """Patch sys.modules with stubs for optional deps that some Methods
+ import at module-load time but that the test env might not have.
+
+ This is purely so the validator can inspect the action signatures —
+ no real network calls happen in these tests.
+ """
+ import sys
+ import types
+
+ class _AnyAttrModule(types.ModuleType):
+ """Module stub that lazily creates dummy classes for any attribute,
+ so type annotations like `aiohttp.ClientSession` resolve."""
+
+ def __getattr__(self, name): # noqa: D401
+ return type(name, (), {})
+
+ for name in ("aiohttp",):
+ if name not in sys.modules:
+ sys.modules[name] = _AnyAttrModule(name)
+
+
+def _instantiateMethod(methodCls):
+ """Try to instantiate a Method with a stub services object.
+
+ Some Methods do extra work in __init__ (e.g. helper imports). We
+ accept failures and return None; missing Methods are skipped.
+ """
+ _ensureOptionalDeps()
+ try:
+ return methodCls(_StubServices())
+ except Exception as exc: # pragma: no cover - environment dependent
+ pytest.skip(f"could not instantiate {methodCls.__name__}: {exc}")
+ return None
+
+
+@pytest.mark.parametrize("modulePath,className", [
+ ("modules.workflows.methods.methodTrustee.methodTrustee", "MethodTrustee"),
+ ("modules.workflows.methods.methodRedmine.methodRedmine", "MethodRedmine"),
+ ("modules.workflows.methods.methodSharepoint.methodSharepoint", "MethodSharepoint"),
+ ("modules.workflows.methods.methodOutlook.methodOutlook", "MethodOutlook"),
+ ("modules.workflows.methods.methodAi.methodAi", "MethodAi"),
+ ("modules.workflows.methods.methodClickup.methodClickup", "MethodClickup"),
+ ("modules.workflows.methods.methodFile.methodFile", "MethodFile"),
+ ("modules.workflows.methods.methodContext.methodContext", "MethodContext"),
+ ("modules.workflows.methods.methodJira.methodJira", "MethodJira"),
+ ("modules.workflows.methods.methodChatbot.methodChatbot", "MethodChatbot"),
+])
+def test_methodSignaturesAreHealthy(modulePath, className):
+ """Each shipping Method's _actions must validate against the catalog."""
+ import importlib
+
+ try:
+ module = importlib.import_module(modulePath)
+ except ImportError as exc:
+ pytest.skip(f"module not importable: {exc}")
+ return
+
+ cls = getattr(module, className, None)
+ if cls is None:
+ pytest.skip(f"{className} not found in {modulePath}")
+ return
+
+ instance = _instantiateMethod(cls)
+ if instance is None:
+ return
+
+ errors = _validateMethods([instance])
+ assert errors == [], _formatValidationReport(errors)
diff --git a/tests/unit/nodeDefinitions/test_trustee_schema_compliance.py b/tests/unit/nodeDefinitions/test_trustee_schema_compliance.py
new file mode 100644
index 00000000..d1b6397c
--- /dev/null
+++ b/tests/unit/nodeDefinitions/test_trustee_schema_compliance.py
@@ -0,0 +1,188 @@
+# Copyright (c) 2025 Patrick Motsch
+"""Trustee node schema-compliance under the Pick-not-Push typed port system.
+
+Verifies that:
+ - All three trustee actions (extractFromFiles, processDocuments,
+ syncToAccounting) declare ``ActionResult`` as output, matching what the
+ Python implementations actually return at runtime
+ (``ActionResult.isSuccess(documents=[...])``).
+ - processDocuments / syncToAccounting accept ``ActionResult`` (the producer
+ schema) plus ``DocumentList`` and ``Transit`` for back-compat.
+ - The ``documentList`` parameter is required, typed ``List[ActionDocument]``
+ (the concrete shape consumed by ``_resolveDocumentList``) and rendered via
+ the dataRef picker so the user can bind it to ``upstream → documents``.
+ - The end-to-end Trustee pipeline graph (extract -> process -> sync) passes
+ hard port-compat validation (validateGraph).
+ - actionNodeExecutor produces canonical ``documents`` field — no legacy
+ ``documentList`` alias — so that DataRef path=['documents'] is the single
+ source of truth.
+"""
+
+import inspect
+
+from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
+from modules.workflows.automation2.executors import actionNodeExecutor as _actionExec
+from modules.workflows.automation2.graphUtils import validateGraph
+
+
+def _node(nodeId: str) -> dict:
+ return next(n for n in STATIC_NODE_TYPES if n["id"] == nodeId)
+
+
+def test_extractFromFiles_outputs_ActionResult():
+ """Runtime returns ActionResult.isSuccess(documents=[...]) — see
+ actions/extractFromFiles.py. The adapter must declare the same."""
+ n = _node("trustee.extractFromFiles")
+ assert n["outputPorts"][0]["schema"] == "ActionResult"
+
+
+def test_processDocuments_outputs_ActionResult():
+ n = _node("trustee.processDocuments")
+ assert n["outputPorts"][0]["schema"] == "ActionResult"
+
+
+def test_syncToAccounting_outputs_ActionResult():
+ n = _node("trustee.syncToAccounting")
+ assert n["outputPorts"][0]["schema"] == "ActionResult"
+
+
+def test_processDocuments_accepts_ActionResult_and_legacy():
+ """processDocuments must accept ActionResult (the new producer schema for
+ extractFromFiles) plus DocumentList / Transit for back-compat."""
+ n = _node("trustee.processDocuments")
+ accepts = n["inputPorts"][0]["accepts"]
+ assert "ActionResult" in accepts
+ assert "DocumentList" in accepts
+ assert "Transit" in accepts
+ assert "UdmDocument" not in accepts, (
+ "UdmDocument was dropped from accepts during the Pick-not-Push schema cleanup."
+ )
+
+
+def test_syncToAccounting_accepts_ActionResult_and_legacy():
+ n = _node("trustee.syncToAccounting")
+ accepts = n["inputPorts"][0]["accepts"]
+ assert "ActionResult" in accepts
+ assert "DocumentList" in accepts
+ assert "Transit" in accepts
+
+
+def test_processDocuments_documentList_param_typed_required_dataRef():
+ """documentList is a Pick-not-Push DataRef parameter — must be visible
+ and typed exactly like the producer field (``ActionResult.documents`` is
+ ``List[ActionDocument]``) so DataPicker's strict-filter accepts it.
+ """
+ params = {p["name"]: p for p in _node("trustee.processDocuments")["parameters"]}
+ p = params["documentList"]
+ assert p["type"] == "List[ActionDocument]", (
+ "documentList must declare the concrete producer type so the DataPicker "
+ "strict-filter resolves upstream ActionResult.documents as compatible."
+ )
+ assert p["required"] is True
+ assert p["frontendType"] == "dataRef", (
+ "documentList must use the dataRef renderer so the binding is visible"
+ )
+
+
+def test_syncToAccounting_documentList_param_typed_required_dataRef():
+ params = {p["name"]: p for p in _node("trustee.syncToAccounting")["parameters"]}
+ p = params["documentList"]
+ assert p["type"] == "List[ActionDocument]", (
+ "documentList must declare the concrete producer type so the DataPicker "
+ "strict-filter resolves upstream ActionResult.documents as compatible."
+ )
+ assert p["required"] is True
+ assert p["frontendType"] == "dataRef", (
+ "documentList must use the dataRef renderer so the binding is visible"
+ )
+
+
+def test_trustee_pipeline_graph_passes_hard_port_validation():
+ """End-to-end pipeline: trigger.manual -> extract -> process -> sync.
+
+ Mirrors what frontend_nyla/.../trusteePipelineGraph.ts builds for
+ _buildScanUploadGraph. Port-compat must hold without warnings.
+ """
+ graph = {
+ "nodes": [
+ {"id": "trigger-manual", "type": "trigger.manual", "parameters": {}},
+ {
+ "id": "extract",
+ "type": "trustee.extractFromFiles",
+ "parameters": {
+ "fileIds": ["f1"],
+ "featureInstanceId": "inst-1",
+ "prompt": "",
+ },
+ },
+ {
+ "id": "process",
+ "type": "trustee.processDocuments",
+ "parameters": {
+ "documentList": {"type": "ref", "nodeId": "extract", "path": ["documents"]},
+ "featureInstanceId": "inst-1",
+ },
+ },
+ {
+ "id": "sync",
+ "type": "trustee.syncToAccounting",
+ "parameters": {
+ "documentList": {"type": "ref", "nodeId": "process", "path": ["documents"]},
+ "featureInstanceId": "inst-1",
+ },
+ },
+ ],
+ "connections": [
+ {"source": "trigger-manual", "sourceOutput": 0, "target": "extract", "targetInput": 0},
+ {"source": "extract", "sourceOutput": 0, "target": "process", "targetInput": 0},
+ {"source": "process", "sourceOutput": 0, "target": "sync", "targetInput": 0},
+ ],
+ }
+ nodeTypeIds = {n["id"] for n in STATIC_NODE_TYPES}
+ errors = validateGraph(graph, nodeTypeIds)
+ portMismatches = [e for e in errors if "Port mismatch" in e]
+ assert not portMismatches, f"Trustee pipeline must be port-compatible: {portMismatches}"
+
+
+def test_catalog_ActionResult_exposes_documents_field():
+ """Without ``documents`` on the ActionResult schema the DataPicker cannot
+ surface the canonical list-of-documents path that every downstream node
+ (processDocuments, syncToAccounting, AI consumers, ...) needs to bind to.
+ """
+ schema = PORT_TYPE_CATALOG.get("ActionResult")
+ assert schema is not None
+ fieldNames = {f.name for f in schema.fields}
+ assert "documents" in fieldNames, (
+ "ActionResult.documents must be in PORT_TYPE_CATALOG so the frontend "
+ "DataPicker can offer it as a bindable path."
+ )
+
+
+def test_catalog_ActionDocument_is_registered():
+ """ActionResult.documents is List[ActionDocument]; the inner schema must
+ be registered so the picker can drill down to ``documents → * → documentName``.
+ """
+ schema = PORT_TYPE_CATALOG.get("ActionDocument")
+ assert schema is not None
+ fieldNames = {f.name for f in schema.fields}
+ assert {"documentName", "documentData", "mimeType"}.issubset(fieldNames), (
+ "ActionDocument schema must mirror datamodelChat.ActionDocument."
+ )
+
+
+def test_actionNodeExecutor_does_not_emit_legacy_documentList_alias():
+ """Source-code assertion: out dict in execute() must not write documentList alias.
+
+ Pick-not-Push canonicalises on ``documents``. Removing the alias prevents
+ DataRefs from drifting back to the legacy field name.
+ """
+ src = inspect.getsource(_actionExec)
+ assert '"documentList": docsList' not in src, (
+ "Legacy alias ``documentList`` must be removed from actionNodeExecutor "
+ "out-dict (use canonical ``documents`` only — see issues.md "
+ "'Trustee Schema-Compliance')."
+ )
+ assert '"documents": docsList' in src, (
+ "Canonical ``documents`` field missing from actionNodeExecutor out-dict."
+ )
diff --git a/tests/unit/scripts/__init__.py b/tests/unit/scripts/__init__.py
new file mode 100644
index 00000000..fdcc4f0e
--- /dev/null
+++ b/tests/unit/scripts/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
diff --git a/tests/unit/scripts/test_migrate_feature_instance_refs.py b/tests/unit/scripts/test_migrate_feature_instance_refs.py
new file mode 100644
index 00000000..80367b4e
--- /dev/null
+++ b/tests/unit/scripts/test_migrate_feature_instance_refs.py
@@ -0,0 +1,289 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Tests for ``scripts/script_migrate_feature_instance_refs.py``.
+
+The script touches the live ``poweron_graphicaleditor`` DB. Tests run against
+an in-memory fake psycopg2 connection so we exercise the full code path
+(SELECT -> migrate -> UPDATE) without requiring a real Postgres server.
+"""
+from __future__ import annotations
+
+import importlib
+import json
+import sys
+from pathlib import Path
+from typing import Any, Dict, List, Tuple
+
+import pytest
+
+_gatewayPath = Path(__file__).resolve().parents[3]
+_scriptsPath = _gatewayPath / "scripts"
+if str(_scriptsPath) not in sys.path:
+ sys.path.insert(0, str(_scriptsPath))
+
+migrationModule = importlib.import_module("script_migrate_feature_instance_refs")
+
+
+# ---------------------------------------------------------------------------
+# Fake psycopg2 connection / cursor
+# ---------------------------------------------------------------------------
+
+class _FakeCursor:
+ """Mimics enough of psycopg2's RealDictCursor + plain cursor for the script."""
+
+ def __init__(self, rowsByTable: Dict[str, List[Dict[str, Any]]], updates: List[Tuple[str, str, Any]]):
+ self._rowsByTable = rowsByTable
+ self._updates = updates
+ self._lastFetch: List[Dict[str, Any]] = []
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc, tb):
+ return False
+
+ def execute(self, query: str, params: Any = None):
+ if query.strip().upper().startswith("SELECT"):
+ for table, rows in self._rowsByTable.items():
+ if table in query:
+ self._lastFetch = list(rows)
+ return
+ self._lastFetch = []
+ return
+ if query.strip().upper().startswith("UPDATE"):
+ for table in self._rowsByTable:
+ if table in query:
+ graphValue, pk = params
+ if hasattr(graphValue, "adapted"):
+ graphValue = graphValue.adapted
+ self._updates.append((table, pk, graphValue))
+ return
+ return
+
+ def fetchall(self):
+ return self._lastFetch
+
+
+class _FakeConn:
+ def __init__(self, rowsByTable: Dict[str, List[Dict[str, Any]]]):
+ self._rowsByTable = rowsByTable
+ self.updates: List[Tuple[str, str, Any]] = []
+ self.committed = False
+ self.closed = False
+
+ def cursor(self, cursor_factory: Any = None):
+ return _FakeCursor(self._rowsByTable, self.updates)
+
+ def commit(self):
+ self.committed = True
+
+ def close(self):
+ self.closed = True
+
+
+@pytest.fixture
+def graphsByTable() -> Dict[str, List[Dict[str, Any]]]:
+ return {
+ '"Automation2Workflow"': [
+ {
+ "pk": "wf-legacy",
+ "graph": {
+ "nodes": [
+ {
+ "id": "n1",
+ "type": "trustee.processDocuments",
+ "parameters": {"featureInstanceId": "11111111-1111-1111-1111-111111111111"},
+ },
+ {
+ "id": "n2",
+ "type": "redmine.createIssue",
+ "parameters": {"featureInstanceId": "22222222-2222-2222-2222-222222222222"},
+ },
+ ],
+ "connections": [],
+ },
+ },
+ {
+ "pk": "wf-already-typed",
+ "graph": {
+ "nodes": [
+ {
+ "id": "n1",
+ "type": "trustee.processDocuments",
+ "parameters": {
+ "featureInstanceId": {
+ "$type": "FeatureInstanceRef",
+ "id": "33333333-3333-3333-3333-333333333333",
+ "featureCode": "trustee",
+ }
+ },
+ }
+ ],
+ "connections": [],
+ },
+ },
+ {
+ "pk": "wf-empty-graph",
+ "graph": {},
+ },
+ {
+ "pk": "wf-graph-as-string",
+ "graph": json.dumps({
+ "nodes": [
+ {
+ "id": "n1",
+ "type": "outlook.sendMail",
+ "parameters": {"featureInstanceId": "44444444-4444-4444-4444-444444444444"},
+ }
+ ],
+ "connections": [],
+ }),
+ },
+ ],
+ '"AutoVersion"': [
+ {
+ "pk": "ver-legacy",
+ "graph": {
+ "nodes": [
+ {
+ "id": "n1",
+ "type": "ai.runPrompt",
+ "parameters": {"featureInstanceId": "55555555-5555-5555-5555-555555555555"},
+ }
+ ],
+ "connections": [],
+ },
+ }
+ ],
+ }
+
+
+# ---------------------------------------------------------------------------
+# Helper-level tests
+# ---------------------------------------------------------------------------
+
+class TestLoadGraph:
+ def test_dictPassesThrough(self):
+ assert migrationModule._loadGraph({"a": 1}) == {"a": 1}
+
+ def test_jsonStringIsParsed(self):
+ assert migrationModule._loadGraph('{"a": 2}') == {"a": 2}
+
+ def test_emptyOrInvalidYieldsEmptyDict(self):
+ assert migrationModule._loadGraph(None) == {}
+ assert migrationModule._loadGraph("") == {}
+ assert migrationModule._loadGraph("not json") == {}
+
+ def test_bytesStringIsParsed(self):
+ assert migrationModule._loadGraph(b'{"a": 3}') == {"a": 3}
+
+
+class TestCountMigrations:
+ def test_zeroWhenIdentical(self):
+ g = {"nodes": [{"id": "n", "parameters": {"featureInstanceId": "uuid"}}]}
+ assert migrationModule._countMigrations(g, g) == 0
+
+ def test_countsMigratedFields(self):
+ before = {
+ "nodes": [
+ {"id": "n1", "parameters": {"featureInstanceId": "u1"}},
+ {"id": "n2", "parameters": {"featureInstanceId": "u2"}},
+ {"id": "n3", "parameters": {"featureInstanceId": "u3"}},
+ ]
+ }
+ after = {
+ "nodes": [
+ {
+ "id": "n1",
+ "parameters": {
+ "featureInstanceId": {"$type": "FeatureInstanceRef", "id": "u1"}
+ },
+ },
+ {"id": "n2", "parameters": {"featureInstanceId": "u2"}},
+ {
+ "id": "n3",
+ "parameters": {
+ "featureInstanceId": {"$type": "FeatureInstanceRef", "id": "u3"}
+ },
+ },
+ ]
+ }
+ assert migrationModule._countMigrations(before, after) == 2
+
+
+# ---------------------------------------------------------------------------
+# End-to-end migrate() tests
+# ---------------------------------------------------------------------------
+
+class TestMigrate:
+ def test_dryRunDoesNotWriteOrCommit(self, monkeypatch, graphsByTable):
+ conn = _FakeConn(graphsByTable)
+ monkeypatch.setattr(migrationModule, "_connect", lambda: conn)
+
+ summary = migrationModule.migrate(dryRun=True)
+
+ assert conn.updates == []
+ assert conn.committed is False
+ assert conn.closed is True
+ assert summary['"Automation2Workflow"']["scanned"] == 4
+ assert summary['"Automation2Workflow"']["rowsChanged"] == 2
+ assert summary['"Automation2Workflow"']["fieldsRewritten"] == 3
+ assert summary['"AutoVersion"']["rowsChanged"] == 1
+ assert summary['"AutoVersion"']["fieldsRewritten"] == 1
+
+ def test_liveRunWritesAndCommits(self, monkeypatch, graphsByTable):
+ conn = _FakeConn(graphsByTable)
+ monkeypatch.setattr(migrationModule, "_connect", lambda: conn)
+
+ summary = migrationModule.migrate(dryRun=False)
+
+ assert conn.committed is True
+ assert conn.closed is True
+
+ updatesByPk = {pk: graph for (_table, pk, graph) in conn.updates}
+ assert set(updatesByPk.keys()) == {"wf-legacy", "wf-graph-as-string", "ver-legacy"}
+
+ legacyGraph = updatesByPk["wf-legacy"]
+ n1Param = legacyGraph["nodes"][0]["parameters"]["featureInstanceId"]
+ n2Param = legacyGraph["nodes"][1]["parameters"]["featureInstanceId"]
+ assert n1Param["$type"] == "FeatureInstanceRef"
+ assert n1Param["featureCode"] == "trustee"
+ assert n1Param["id"] == "11111111-1111-1111-1111-111111111111"
+ assert n2Param["featureCode"] == "redmine"
+
+ verParam = updatesByPk["ver-legacy"]["nodes"][0]["parameters"]["featureInstanceId"]
+ assert verParam["featureCode"] == "ai"
+
+ stringSourcedGraph = updatesByPk["wf-graph-as-string"]
+ outlookParam = stringSourcedGraph["nodes"][0]["parameters"]["featureInstanceId"]
+ assert outlookParam["featureCode"] == "outlook"
+
+ assert summary['"Automation2Workflow"']["fieldsRewritten"] == 3
+ assert summary['"AutoVersion"']["fieldsRewritten"] == 1
+
+ def test_idempotency(self, monkeypatch, graphsByTable):
+ conn1 = _FakeConn(graphsByTable)
+ monkeypatch.setattr(migrationModule, "_connect", lambda: conn1)
+ migrationModule.migrate(dryRun=False)
+
+ firstUpdates = {pk: graph for (_t, pk, graph) in conn1.updates}
+ nextRows = {
+ '"Automation2Workflow"': [
+ {"pk": pk, "graph": graph}
+ for pk, graph in firstUpdates.items()
+ if pk.startswith("wf")
+ ],
+ '"AutoVersion"': [
+ {"pk": pk, "graph": graph}
+ for pk, graph in firstUpdates.items()
+ if pk.startswith("ver")
+ ],
+ }
+ conn2 = _FakeConn(nextRows)
+ monkeypatch.setattr(migrationModule, "_connect", lambda: conn2)
+ summary2 = migrationModule.migrate(dryRun=False)
+
+ assert conn2.updates == []
+ for table, counts in summary2.items():
+ assert counts["rowsChanged"] == 0, f"{table} not idempotent"
+ assert counts["fieldsRewritten"] == 0, f"{table} not idempotent"
diff --git a/tests/unit/serviceAgent/test_action_tool_adapter_typed.py b/tests/unit/serviceAgent/test_action_tool_adapter_typed.py
new file mode 100644
index 00000000..06edc01c
--- /dev/null
+++ b/tests/unit/serviceAgent/test_action_tool_adapter_typed.py
@@ -0,0 +1,127 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Tests for the catalog-driven JSON-Schema generator in actionToolAdapter
+(Phase 3 of the Typed Action Architecture).
+
+Covers `_catalogTypeToJsonSchema` and `_convertParameterSchema` with:
+ - Primitives (str/int/bool/float/Any)
+ - Catalog object schemas (recursive expansion, required fields, enums)
+ - List[X] (array with typed items)
+ - Dict[K,V] (object with typed additionalProperties)
+ - Unknown type → safe fallback (string)
+"""
+from __future__ import annotations
+
+from modules.serviceCenter.services.serviceAgent.actionToolAdapter import (
+ _catalogTypeToJsonSchema,
+ _convertParameterSchema,
+)
+
+
+class TestPrimitives:
+ def test_str(self):
+ assert _catalogTypeToJsonSchema("str") == {"type": "string"}
+
+ def test_int(self):
+ assert _catalogTypeToJsonSchema("int") == {"type": "integer"}
+
+ def test_bool(self):
+ assert _catalogTypeToJsonSchema("bool") == {"type": "boolean"}
+
+ def test_float(self):
+ assert _catalogTypeToJsonSchema("float") == {"type": "number"}
+
+ def test_anyHasNoTypeField(self):
+ # JSON Schema "any" is best expressed as an empty schema.
+ assert _catalogTypeToJsonSchema("Any") == {}
+
+
+class TestContainers:
+ def test_listOfPrimitive(self):
+ assert _catalogTypeToJsonSchema("List[str]") == {
+ "type": "array",
+ "items": {"type": "string"},
+ }
+
+ def test_listOfCatalogSchema(self):
+ out = _catalogTypeToJsonSchema("List[Document]")
+ assert out["type"] == "array"
+ assert out["items"]["type"] == "object"
+ # Recursive expansion delivered Document fields:
+ propsName = out["items"]["properties"].get("name", {})
+ assert propsName.get("type") == "string"
+
+ def test_dictWithPrimitiveValue(self):
+ assert _catalogTypeToJsonSchema("Dict[str,Any]") == {
+ "type": "object",
+ "additionalProperties": {},
+ }
+
+ def test_dictWithCatalogValue(self):
+ out = _catalogTypeToJsonSchema("Dict[str,Document]")
+ assert out["type"] == "object"
+ assert out["additionalProperties"]["type"] == "object"
+ assert "properties" in out["additionalProperties"]
+
+
+class TestCatalogObjects:
+ def test_connectionRefExpands(self):
+ out = _catalogTypeToJsonSchema("ConnectionRef")
+ assert out["type"] == "object"
+ assert "properties" in out
+ # ConnectionRef has 'id' (required) and 'authority' (required, discriminator)
+ assert "id" in out["properties"]
+ assert "authority" in out["properties"]
+ assert "id" in out["required"]
+ assert "authority" in out["required"]
+
+ def test_featureInstanceRefExpands(self):
+ out = _catalogTypeToJsonSchema("FeatureInstanceRef")
+ assert out["type"] == "object"
+ # mandateId is optional → must NOT be in required
+ assert "mandateId" not in out.get("required", [])
+ assert "id" in out["required"]
+
+ def test_descriptionPreserved(self):
+ out = _catalogTypeToJsonSchema("ConnectionRef")
+ assert "description" in out
+ assert "ConnectionRef" in out["description"]
+
+
+class TestUnknownFallback:
+ def test_unknownDefaultsToString(self):
+ out = _catalogTypeToJsonSchema("CompletelyUnknownType")
+ assert out["type"] == "string"
+ assert "unknown" in out.get("description", "").lower()
+
+ def test_emptyStringDefaultsToString(self):
+ assert _catalogTypeToJsonSchema("") == {"type": "string"}
+
+
+class TestConvertParameterSchema:
+ def test_buildsObjectSchemaWithRequiredList(self):
+ actionParams = {
+ "documentList": {"type": "DocumentList", "required": True, "description": "Eingabe"},
+ "prompt": {"type": "str", "required": False, "description": "Prompt-Text"},
+ }
+ schema = _convertParameterSchema(actionParams)
+ assert schema["type"] == "object"
+ assert "documentList" in schema["properties"]
+ assert "prompt" in schema["properties"]
+ assert schema["required"] == ["documentList"]
+ assert schema["properties"]["documentList"]["description"] == "Eingabe"
+ # documentList expands to an object schema (DocumentList is a catalog object)
+ assert schema["properties"]["documentList"]["type"] == "object"
+
+ def test_handlesMalformedParamsGracefully(self):
+ actionParams = {"weird": "not-a-dict"}
+ schema = _convertParameterSchema(actionParams)
+ assert schema["properties"]["weird"]["type"] == "string"
+
+ def test_typedRefProducesObjectNotString(self):
+ """Regression: pre-Phase-3 behaviour collapsed catalog refs to 'string'."""
+ actionParams = {"connection": {"type": "ConnectionRef", "required": True}}
+ schema = _convertParameterSchema(actionParams)
+ assert schema["properties"]["connection"]["type"] == "object"
+ assert "id" in schema["properties"]["connection"]["properties"]
diff --git a/tests/unit/teamsbot/__init__.py b/tests/unit/teamsbot/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unit/teamsbot/test_directorPrompts.py b/tests/unit/teamsbot/test_directorPrompts.py
new file mode 100644
index 00000000..f136438a
--- /dev/null
+++ b/tests/unit/teamsbot/test_directorPrompts.py
@@ -0,0 +1,604 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Unit tests for Teamsbot Director Prompts (Plan #5).
+
+Covers:
+* Datamodel limits, defaults, enum-validation
+* SpeechTeamsResponse needsAgent / agentReason fields
+* TeamsbotService._buildPersistentDirectorContext rendering
+* TeamsbotService.submitDirectorPrompt: queues, emits SSE event, returns created
+* TeamsbotService._processDirectorPrompt lifecycle:
+ queued -> running -> succeeded/consumed (one-shot vs persistent)
+* TeamsbotService._processDirectorPrompt failure path drops persistent prompt
+* TeamsbotService.removePersistentPrompt
+* getActiveService / _activeServices registry
+* TeamsbotObjects.getActivePersistentPrompts filtering
+
+The TeamsbotService constructor instantiates BrowserBotConnector, which is
+harmless (no network until joinMeeting). All DB / agent / SSE side-effects
+are stubbed via monkeypatch.
+"""
+
+from __future__ import annotations
+
+import asyncio
+from typing import Any, Dict, List, Optional
+from unittest.mock import MagicMock
+
+import pytest
+from pydantic import ValidationError
+
+from modules.features.teamsbot import service as serviceModule
+from modules.features.teamsbot.datamodelTeamsbot import (
+ DIRECTOR_PROMPT_FILE_LIMIT,
+ DIRECTOR_PROMPT_TEXT_LIMIT,
+ SpeechTeamsResponse,
+ TeamsbotConfig,
+ TeamsbotDirectorPrompt,
+ TeamsbotDirectorPromptCreateRequest,
+ TeamsbotDirectorPromptMode,
+ TeamsbotDirectorPromptStatus,
+)
+from modules.features.teamsbot.service import (
+ TeamsbotService,
+ _activeServices,
+ _sessionEvents,
+ getActiveService,
+)
+
+
+# ============================================================================
+# Helpers
+# ============================================================================
+
+class _FakeUser:
+ """Minimal stand-in for modules.datamodels.datamodelUam.User used by the
+ service layer. TeamsbotService only needs ``id`` for logging / interface
+ keying."""
+
+ def __init__(self, userId: str = "user-op-1") -> None:
+ self.id = userId
+
+
+class _FakeInterface:
+ """In-memory stand-in for TeamsbotObjects (only the director-prompt API).
+
+ Behaves like the real DB interface for the calls used by the service:
+ ``createDirectorPrompt``, ``updateDirectorPrompt``, ``getDirectorPrompt``,
+ ``getActivePersistentPrompts``, ``getActiveSystemBot``.
+ """
+
+ def __init__(self) -> None:
+ self.prompts: Dict[str, Dict[str, Any]] = {}
+ self.created: List[Dict[str, Any]] = []
+ self.updates: List[Dict[str, Any]] = []
+ self.deleted: List[str] = []
+
+ def createDirectorPrompt(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ record = dict(data)
+ if "id" not in record:
+ record["id"] = f"prompt-{len(self.prompts)+1}"
+ self.prompts[record["id"]] = record
+ self.created.append(record)
+ return record
+
+ def updateDirectorPrompt(self, promptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
+ if promptId not in self.prompts:
+ return None
+ self.prompts[promptId].update(updates)
+ self.updates.append({"id": promptId, **updates})
+ return self.prompts[promptId]
+
+ def getDirectorPrompt(self, promptId: str) -> Optional[Dict[str, Any]]:
+ return self.prompts.get(promptId)
+
+ def getActivePersistentPrompts(self, sessionId: str) -> List[Dict[str, Any]]:
+ terminal = {
+ TeamsbotDirectorPromptStatus.CONSUMED.value,
+ TeamsbotDirectorPromptStatus.FAILED.value,
+ }
+ return [
+ p
+ for p in self.prompts.values()
+ if p.get("sessionId") == sessionId
+ and p.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value
+ and p.get("status") not in terminal
+ ]
+
+ def getActiveSystemBot(self, mandateId: str) -> Optional[Dict[str, Any]]:
+ return None
+
+
+class _CapturedEvents(list):
+ """Helper to collect SSE events emitted by ``_emitSessionEvent``."""
+
+ async def append_event(self, sessionId: str, eventType: str, data: Any) -> None:
+ self.append({"sessionId": sessionId, "type": eventType, "data": data})
+
+
+def _patchInterface(monkeypatch: pytest.MonkeyPatch, fakeInterface: _FakeInterface) -> None:
+ """Replace ``getInterface`` in the teamsbot service module so the service
+ talks to our in-memory fake instead of PostgreSQL."""
+ from modules.features.teamsbot import interfaceFeatureTeamsbot as interfaceDb
+
+ monkeypatch.setattr(interfaceDb, "getInterface", lambda *args, **kwargs: fakeInterface)
+
+
+def _patchEmit(monkeypatch: pytest.MonkeyPatch) -> _CapturedEvents:
+ captured = _CapturedEvents()
+
+ async def _stubEmit(sessionId: str, eventType: str, data: Any) -> None:
+ await captured.append_event(sessionId, eventType, data)
+
+ monkeypatch.setattr(serviceModule, "_emitSessionEvent", _stubEmit)
+ return captured
+
+
+def _buildService() -> TeamsbotService:
+ """Build a TeamsbotService with a minimal config. BrowserBotConnector is
+ instantiated but never reached in these tests."""
+ config = TeamsbotConfig(botName="UnitTest Bot")
+ svc = TeamsbotService(
+ currentUser=_FakeUser(),
+ mandateId="mandate-x",
+ instanceId="instance-y",
+ config=config,
+ )
+ svc._activeSessionId = "session-1"
+ return svc
+
+
+@pytest.fixture(autouse=True)
+def _resetGlobals():
+ """Avoid cross-test bleed in module-level globals."""
+ _activeServices.clear()
+ _sessionEvents.clear()
+ yield
+ _activeServices.clear()
+ _sessionEvents.clear()
+
+
+# ============================================================================
+# 1) Datamodel
+# ============================================================================
+
+class TestDirectorPromptDatamodel:
+ def test_directorPromptDefaults(self):
+ prompt = TeamsbotDirectorPrompt(
+ sessionId="s1",
+ instanceId="i1",
+ operatorUserId="u1",
+ text="Hello world",
+ )
+ assert prompt.mode == TeamsbotDirectorPromptMode.ONE_SHOT
+ assert prompt.status == TeamsbotDirectorPromptStatus.QUEUED
+ assert prompt.fileIds == []
+ assert prompt.consumedAt is None
+ assert prompt.responseText is None
+ assert prompt.id # uuid auto-filled
+ assert prompt.createdAt # iso timestamp auto-filled
+
+ def test_directorPromptTextLimitEnforced(self):
+ with pytest.raises(ValidationError):
+ TeamsbotDirectorPrompt(
+ sessionId="s1",
+ instanceId="i1",
+ operatorUserId="u1",
+ text="x" * (DIRECTOR_PROMPT_TEXT_LIMIT + 1),
+ )
+
+ def test_directorPromptCreateRequestDefaults(self):
+ body = TeamsbotDirectorPromptCreateRequest(text="quick prompt")
+ assert body.mode == TeamsbotDirectorPromptMode.ONE_SHOT
+ assert body.fileIds == []
+
+ def test_directorPromptCreateRequestEmptyTextRejected(self):
+ with pytest.raises(ValidationError):
+ TeamsbotDirectorPromptCreateRequest(text="")
+
+ def test_directorPromptCreateRequestTooLongRejected(self):
+ with pytest.raises(ValidationError):
+ TeamsbotDirectorPromptCreateRequest(text="x" * (DIRECTOR_PROMPT_TEXT_LIMIT + 1))
+
+ def test_directorPromptStatusEnum(self):
+ assert TeamsbotDirectorPromptStatus.QUEUED.value == "queued"
+ assert TeamsbotDirectorPromptStatus.RUNNING.value == "running"
+ assert TeamsbotDirectorPromptStatus.SUCCEEDED.value == "succeeded"
+ assert TeamsbotDirectorPromptStatus.CONSUMED.value == "consumed"
+ assert TeamsbotDirectorPromptStatus.FAILED.value == "failed"
+
+ def test_directorPromptModeEnum(self):
+ assert TeamsbotDirectorPromptMode.ONE_SHOT.value == "oneShot"
+ assert TeamsbotDirectorPromptMode.PERSISTENT.value == "persistent"
+
+ def test_fileLimitConstantHasSaneValue(self):
+ assert DIRECTOR_PROMPT_FILE_LIMIT == 10
+ assert DIRECTOR_PROMPT_TEXT_LIMIT == 8000
+
+
+class TestSpeechTeamsResponseHybrid:
+ def test_needsAgentDefaultFalse(self):
+ resp = SpeechTeamsResponse(shouldRespond=False)
+ assert resp.needsAgent is False
+ assert resp.agentReason is None
+
+ def test_needsAgentEscalation(self):
+ resp = SpeechTeamsResponse(
+ shouldRespond=True,
+ responseText="Moment, ich recherchiere.",
+ needsAgent=True,
+ agentReason="webSearch SBB Schweiz",
+ detectedIntent="addressed",
+ )
+ assert resp.needsAgent is True
+ assert resp.agentReason == "webSearch SBB Schweiz"
+
+
+# ============================================================================
+# 2) Persistent Director Context Renderer
+# ============================================================================
+
+class TestBuildPersistentDirectorContext:
+ def test_emptyWhenNoPrompts(self):
+ svc = _buildService()
+ svc._activePersistentPrompts = []
+ assert svc._buildPersistentDirectorContext() == ""
+
+ def test_singlePrompt(self):
+ svc = _buildService()
+ svc._activePersistentPrompts = [
+ {"id": "p1", "text": "Antworte immer in Englisch."},
+ ]
+ rendered = svc._buildPersistentDirectorContext()
+ assert "OPERATOR_DIRECTIVES" in rendered
+ assert "- Antworte immer in Englisch." in rendered
+ assert "private" in rendered
+
+ def test_skipsBlankText(self):
+ svc = _buildService()
+ svc._activePersistentPrompts = [
+ {"id": "p1", "text": " "},
+ {"id": "p2", "text": "Sei hoeflich."},
+ ]
+ rendered = svc._buildPersistentDirectorContext()
+ assert "- Sei hoeflich." in rendered
+ assert "p1" not in rendered # the blank one is filtered out
+
+ def test_allBlankPromptsResultInEmpty(self):
+ svc = _buildService()
+ svc._activePersistentPrompts = [
+ {"id": "p1", "text": ""},
+ {"id": "p2", "text": " "},
+ ]
+ assert svc._buildPersistentDirectorContext() == ""
+
+
+# ============================================================================
+# 3) submitDirectorPrompt
+# ============================================================================
+
+class TestSubmitDirectorPrompt:
+ @pytest.mark.asyncio
+ async def test_oneShotQueuesAndEmits(self, monkeypatch):
+ fake = _FakeInterface()
+ events = _patchEmit(monkeypatch)
+ _patchInterface(monkeypatch, fake)
+
+ # Block the auto-process task from running, otherwise it would call
+ # the real agent service. We replace the coroutine factory.
+ async def _noProcess(prompt):
+ return None
+
+ svc = _buildService()
+ monkeypatch.setattr(svc, "_processDirectorPrompt", _noProcess)
+
+ created = await svc.submitDirectorPrompt(
+ sessionId="session-1",
+ operatorUserId="user-op-1",
+ text="Recherchier das im Internet.",
+ mode=TeamsbotDirectorPromptMode.ONE_SHOT,
+ fileIds=[],
+ )
+
+ assert created["status"] == TeamsbotDirectorPromptStatus.QUEUED.value
+ assert created["mode"] == TeamsbotDirectorPromptMode.ONE_SHOT.value
+ assert created["text"] == "Recherchier das im Internet."
+ assert created["sessionId"] == "session-1"
+ assert created["instanceId"] == "instance-y"
+ assert created["operatorUserId"] == "user-op-1"
+
+ # SSE event with the queued lifecycle marker
+ assert any(
+ e["type"] == "directorPrompt"
+ and e["data"]["status"] == TeamsbotDirectorPromptStatus.QUEUED.value
+ and e["data"]["mode"] == TeamsbotDirectorPromptMode.ONE_SHOT.value
+ for e in events
+ )
+
+ # In-memory persistent registry remains empty for one-shot.
+ assert svc._activePersistentPrompts == []
+
+ # Allow the (no-op) background task to settle so the loop is clean.
+ await asyncio.sleep(0)
+
+ @pytest.mark.asyncio
+ async def test_persistentPromptAppendsToInMemoryRegistry(self, monkeypatch):
+ fake = _FakeInterface()
+ _patchEmit(monkeypatch)
+ _patchInterface(monkeypatch, fake)
+
+ async def _noProcess(prompt):
+ return None
+
+ svc = _buildService()
+ monkeypatch.setattr(svc, "_processDirectorPrompt", _noProcess)
+
+ created = await svc.submitDirectorPrompt(
+ sessionId="session-1",
+ operatorUserId="user-op-1",
+ text="Antworte immer in Englisch.",
+ mode=TeamsbotDirectorPromptMode.PERSISTENT,
+ fileIds=["file-a", "file-b"],
+ )
+
+ assert created["mode"] == TeamsbotDirectorPromptMode.PERSISTENT.value
+ assert created["fileIds"] == ["file-a", "file-b"]
+ assert len(svc._activePersistentPrompts) == 1
+ assert svc._activePersistentPrompts[0]["id"] == created["id"]
+
+ await asyncio.sleep(0)
+
+
+# ============================================================================
+# 4) _processDirectorPrompt lifecycle
+# ============================================================================
+
+class TestProcessDirectorPromptLifecycle:
+ @pytest.mark.asyncio
+ async def test_oneShotSuccessTransitionsRunningThenConsumed(self, monkeypatch):
+ fake = _FakeInterface()
+ prompt = fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ id="prompt-success-1",
+ sessionId="session-1",
+ instanceId="instance-y",
+ operatorUserId="user-op-1",
+ text="Was ist die Hauptstadt von Frankreich?",
+ mode=TeamsbotDirectorPromptMode.ONE_SHOT,
+ ).model_dump()
+ )
+ events = _patchEmit(monkeypatch)
+ _patchInterface(monkeypatch, fake)
+
+ svc = _buildService()
+
+ async def _stubAgent(**kwargs):
+ return "Paris."
+
+ monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgent)
+
+ await svc._processDirectorPrompt(prompt)
+
+ statuses = [u.get("status") for u in fake.updates if u["id"] == prompt["id"]]
+ assert TeamsbotDirectorPromptStatus.RUNNING.value in statuses
+ assert TeamsbotDirectorPromptStatus.CONSUMED.value in statuses
+
+ final = fake.prompts[prompt["id"]]
+ assert final["status"] == TeamsbotDirectorPromptStatus.CONSUMED.value
+ assert final["responseText"] == "Paris."
+ assert final.get("consumedAt")
+
+ emittedStatuses = [
+ e["data"].get("status") for e in events if e["type"] == "directorPrompt"
+ ]
+ assert TeamsbotDirectorPromptStatus.RUNNING.value in emittedStatuses
+ assert TeamsbotDirectorPromptStatus.CONSUMED.value in emittedStatuses
+
+ @pytest.mark.asyncio
+ async def test_persistentSuccessStaysSucceededNotConsumed(self, monkeypatch):
+ fake = _FakeInterface()
+ prompt = fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ id="prompt-persist-1",
+ sessionId="session-1",
+ instanceId="instance-y",
+ operatorUserId="user-op-1",
+ text="Antworte immer in Englisch.",
+ mode=TeamsbotDirectorPromptMode.PERSISTENT,
+ ).model_dump()
+ )
+ _patchEmit(monkeypatch)
+ _patchInterface(monkeypatch, fake)
+
+ svc = _buildService()
+
+ async def _stubAgent(**kwargs):
+ return "Acknowledged."
+
+ monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgent)
+
+ await svc._processDirectorPrompt(prompt)
+
+ final = fake.prompts[prompt["id"]]
+ assert final["status"] == TeamsbotDirectorPromptStatus.SUCCEEDED.value
+ assert final["responseText"] == "Acknowledged."
+ # Persistent prompts must stay alive beyond the run.
+ assert final.get("consumedAt") is None
+
+ @pytest.mark.asyncio
+ async def test_failureMarksFailedAndDropsFromActivePersistent(self, monkeypatch):
+ fake = _FakeInterface()
+ prompt = fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ id="prompt-fail-1",
+ sessionId="session-1",
+ instanceId="instance-y",
+ operatorUserId="user-op-1",
+ text="Mach was Komplexes.",
+ mode=TeamsbotDirectorPromptMode.PERSISTENT,
+ ).model_dump()
+ )
+ events = _patchEmit(monkeypatch)
+ _patchInterface(monkeypatch, fake)
+
+ svc = _buildService()
+ svc._activePersistentPrompts = [prompt]
+
+ async def _stubAgentBoom(**kwargs):
+ raise RuntimeError("agent down")
+
+ monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgentBoom)
+
+ await svc._processDirectorPrompt(prompt)
+
+ final = fake.prompts[prompt["id"]]
+ assert final["status"] == TeamsbotDirectorPromptStatus.FAILED.value
+ assert "RuntimeError" in (final.get("statusMessage") or "")
+
+ # The failed persistent prompt is removed from the in-memory directives.
+ assert all(p["id"] != prompt["id"] for p in svc._activePersistentPrompts)
+
+ emittedStatuses = [
+ e["data"].get("status") for e in events if e["type"] == "directorPrompt"
+ ]
+ assert TeamsbotDirectorPromptStatus.FAILED.value in emittedStatuses
+
+
+# ============================================================================
+# 5) removePersistentPrompt
+# ============================================================================
+
+class TestRemovePersistentPrompt:
+ @pytest.mark.asyncio
+ async def test_removePersistentPromptMarksConsumedAndDrops(self, monkeypatch):
+ fake = _FakeInterface()
+ prompt = fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ id="prompt-rm-1",
+ sessionId="session-1",
+ instanceId="instance-y",
+ operatorUserId="user-op-1",
+ text="Bleib hoeflich.",
+ mode=TeamsbotDirectorPromptMode.PERSISTENT,
+ status=TeamsbotDirectorPromptStatus.SUCCEEDED,
+ ).model_dump()
+ )
+ events = _patchEmit(monkeypatch)
+ _patchInterface(monkeypatch, fake)
+
+ svc = _buildService()
+ svc._activePersistentPrompts = [prompt]
+
+ ok = await svc.removePersistentPrompt(prompt["id"])
+ assert ok is True
+
+ final = fake.prompts[prompt["id"]]
+ assert final["status"] == TeamsbotDirectorPromptStatus.CONSUMED.value
+ assert final.get("consumedAt")
+ assert final.get("statusMessage") == "Removed by operator"
+ assert svc._activePersistentPrompts == []
+
+ assert any(
+ e["type"] == "directorPrompt"
+ and e["data"].get("removed") is True
+ and e["data"].get("status") == TeamsbotDirectorPromptStatus.CONSUMED.value
+ for e in events
+ )
+
+ @pytest.mark.asyncio
+ async def test_removeUnknownPromptReturnsFalse(self, monkeypatch):
+ fake = _FakeInterface()
+ _patchEmit(monkeypatch)
+ _patchInterface(monkeypatch, fake)
+ svc = _buildService()
+ ok = await svc.removePersistentPrompt("unknown-id")
+ assert ok is False
+
+
+# ============================================================================
+# 6) _activeServices Registry
+# ============================================================================
+
+class TestActiveServicesRegistry:
+ def test_getActiveServiceReturnsNoneByDefault(self):
+ assert getActiveService("not-active") is None
+
+ def test_getActiveServiceReturnsRegistered(self):
+ svc = _buildService()
+ _activeServices["session-XYZ"] = svc
+ assert getActiveService("session-XYZ") is svc
+
+ def test_distinctSessionsMapToDistinctServices(self):
+ a = _buildService()
+ b = _buildService()
+ _activeServices["s1"] = a
+ _activeServices["s2"] = b
+ assert getActiveService("s1") is a
+ assert getActiveService("s2") is b
+ assert getActiveService("s1") is not getActiveService("s2")
+
+
+# ============================================================================
+# 7) Interface-level filtering for active persistent prompts
+# ============================================================================
+
+class TestGetActivePersistentPromptsFiltering:
+ """The interface-level helper is the source of truth for what gets
+ re-loaded into _activePersistentPrompts on (re)connect."""
+
+ def test_onlyPersistentNonTerminal(self):
+ fake = _FakeInterface()
+ # All four lifecycle states for the same session
+ for status in TeamsbotDirectorPromptStatus:
+ fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ sessionId="s1",
+ instanceId="i1",
+ operatorUserId="u1",
+ text=f"persist-{status.value}",
+ mode=TeamsbotDirectorPromptMode.PERSISTENT,
+ status=status,
+ ).model_dump()
+ )
+ # one-shot persistent-failure-irrelevant
+ fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ sessionId="s1",
+ instanceId="i1",
+ operatorUserId="u1",
+ text="oneShot-running",
+ mode=TeamsbotDirectorPromptMode.ONE_SHOT,
+ status=TeamsbotDirectorPromptStatus.RUNNING,
+ ).model_dump()
+ )
+
+ active = fake.getActivePersistentPrompts("s1")
+ statuses = {p.get("status") for p in active}
+
+ # CONSUMED and FAILED are terminal; ONE_SHOT is not persistent.
+ assert TeamsbotDirectorPromptStatus.CONSUMED.value not in statuses
+ assert TeamsbotDirectorPromptStatus.FAILED.value not in statuses
+ # All returned prompts are persistent
+ assert all(
+ p.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value for p in active
+ )
+ # Non-terminal persistent: QUEUED, RUNNING, SUCCEEDED -> 3 records
+ assert len(active) == 3
+
+ def test_filtersBySession(self):
+ fake = _FakeInterface()
+ fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ sessionId="s1", instanceId="i1", operatorUserId="u1",
+ text="A", mode=TeamsbotDirectorPromptMode.PERSISTENT,
+ ).model_dump()
+ )
+ fake.createDirectorPrompt(
+ TeamsbotDirectorPrompt(
+ sessionId="s2", instanceId="i1", operatorUserId="u1",
+ text="B", mode=TeamsbotDirectorPromptMode.PERSISTENT,
+ ).model_dump()
+ )
+ assert len(fake.getActivePersistentPrompts("s1")) == 1
+ assert len(fake.getActivePersistentPrompts("s2")) == 1
+ assert fake.getActivePersistentPrompts("ghost") == []
diff --git a/tests/unit/workflow/test_phase3_context_node.py b/tests/unit/workflow/test_phase3_context_node.py
index 300d861f..7172c6e7 100644
--- a/tests/unit/workflow/test_phase3_context_node.py
+++ b/tests/unit/workflow/test_phase3_context_node.py
@@ -2,12 +2,11 @@
import pytest
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
-from modules.features.graphicalEditor.portTypes import (
- PORT_TYPE_CATALOG,
- INPUT_EXTRACTORS,
- _extractUdmDocument,
- _extractUdmNodeList,
- _extractConsolidateResult,
+from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
+from modules.workflows.automation2.udmUpstreamShapes import (
+ _coerceConsolidateResultInput,
+ _coerceUdmDocumentInput,
+ _coerceUdmNodeListInput,
)
@@ -32,34 +31,28 @@ def test_udm_port_types_registered():
assert "ConsolidateResult" in PORT_TYPE_CATALOG
-def test_udm_extractors_registered():
- assert "UdmDocument" in INPUT_EXTRACTORS
- assert "UdmNodeList" in INPUT_EXTRACTORS
- assert "ConsolidateResult" in INPUT_EXTRACTORS
-
-
-def test_extractUdmDocument_from_direct():
+def test_coerceUdmDocument_from_direct():
upstream = {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []}
- result = _extractUdmDocument(upstream)
+ result = _coerceUdmDocumentInput(upstream)
assert result["sourceType"] == "pdf"
-def test_extractUdmDocument_from_nested():
+def test_coerceUdmDocument_from_nested():
upstream = {"udm": {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []}, "other": 1}
- result = _extractUdmDocument(upstream)
+ result = _coerceUdmDocumentInput(upstream)
assert result["sourceType"] == "pdf"
-def test_extractUdmNodeList():
+def test_coerceUdmNodeList():
upstream = {"nodes": [{"id": "n1"}, {"id": "n2"}], "count": 2}
- result = _extractUdmNodeList(upstream)
+ result = _coerceUdmNodeListInput(upstream)
assert result["count"] == 2
assert len(result["nodes"]) == 2
-def test_extractConsolidateResult():
+def test_coerceConsolidateResult():
upstream = {"result": {"headers": [], "rows": []}, "mode": "table", "count": 3}
- result = _extractConsolidateResult(upstream)
+ result = _coerceConsolidateResultInput(upstream)
assert result["mode"] == "table"
assert result["count"] == 3
diff --git a/tests/unit/workflows/test_automation2_graphUtils.py b/tests/unit/workflows/test_automation2_graphUtils.py
index 78077987..ff5df2cc 100644
--- a/tests/unit/workflows/test_automation2_graphUtils.py
+++ b/tests/unit/workflows/test_automation2_graphUtils.py
@@ -65,3 +65,102 @@ class TestResolveParameterReferences:
node_outputs = {"n1": {"country": "CH"}}
value = "Land: {{n1.country}}"
assert resolveParameterReferences(value, node_outputs) == "Land: CH"
+
+
+class TestWildcardIteration:
+ """Phase-4 typed Bindings-Resolver: ``*`` segment iterates over a list.
+
+ Path semantics:
+ ["docs", "*", "name"] ⇒ map "name" over each item in docs
+ ["docs", "*"] ⇒ the docs list itself (after passing through *)
+ Drops items whose remainder resolves to ``None`` (missing field).
+ """
+
+ def test_wildcard_maps_over_list_to_field(self):
+ node_outputs = {
+ "src": {
+ "documents": [
+ {"name": "a.pdf", "size": 10},
+ {"name": "b.pdf", "size": 20},
+ ],
+ }
+ }
+ value = {
+ "type": "ref",
+ "nodeId": "src",
+ "path": ["documents", "*", "name"],
+ }
+ assert resolveParameterReferences(value, node_outputs) == ["a.pdf", "b.pdf"]
+
+ def test_wildcard_terminal_returns_list_copy(self):
+ node_outputs = {"src": {"items": ["x", "y", "z"]}}
+ value = {"type": "ref", "nodeId": "src", "path": ["items", "*"]}
+ assert resolveParameterReferences(value, node_outputs) == ["x", "y", "z"]
+
+ def test_wildcard_drops_missing_fields(self):
+ node_outputs = {
+ "src": {
+ "rows": [
+ {"name": "a"},
+ {"otherField": 1},
+ {"name": "c"},
+ ]
+ }
+ }
+ value = {"type": "ref", "nodeId": "src", "path": ["rows", "*", "name"]}
+ assert resolveParameterReferences(value, node_outputs) == ["a", "c"]
+
+ def test_wildcard_on_non_list_returns_none(self):
+ node_outputs = {"src": {"docs": {"not": "a list"}}}
+ value = {"type": "ref", "nodeId": "src", "path": ["docs", "*", "name"]}
+ assert resolveParameterReferences(value, node_outputs) is None
+
+ def test_wildcard_nested(self):
+ node_outputs = {
+ "src": {
+ "groups": [
+ {"items": [{"v": 1}, {"v": 2}]},
+ {"items": [{"v": 3}]},
+ ]
+ }
+ }
+ value = {
+ "type": "ref",
+ "nodeId": "src",
+ "path": ["groups", "*", "items", "*", "v"],
+ }
+ assert resolveParameterReferences(value, node_outputs) == [[1, 2], [3]]
+
+ def test_wildcard_inside_transit_envelope(self):
+ node_outputs = {
+ "src": {
+ "_transit": True,
+ "data": {"documents": [{"name": "p.pdf"}, {"name": "q.pdf"}]},
+ }
+ }
+ value = {
+ "type": "ref",
+ "nodeId": "src",
+ "path": ["documents", "*", "name"],
+ }
+ assert resolveParameterReferences(value, node_outputs) == ["p.pdf", "q.pdf"]
+
+
+class TestPathContainsWildcard:
+ """``_pathContainsWildcard`` lets the engine decide between a scalar bind
+ and an iteration target (e.g. wrap a Loop container around the consumer).
+ """
+
+ def test_detects_wildcard(self):
+ from modules.workflows.automation2.graphUtils import _pathContainsWildcard
+ assert _pathContainsWildcard(["docs", "*", "name"]) is True
+ assert _pathContainsWildcard(["*"]) is True
+
+ def test_no_wildcard(self):
+ from modules.workflows.automation2.graphUtils import _pathContainsWildcard
+ assert _pathContainsWildcard(["docs", 0, "name"]) is False
+ assert _pathContainsWildcard([]) is False
+
+ def test_literal_star_in_int_segment_does_not_match(self):
+ from modules.workflows.automation2.graphUtils import _pathContainsWildcard
+ assert _pathContainsWildcard([1, 2, 3]) is False
diff --git a/tests/unit/workflows/test_featureInstanceRefMigration.py b/tests/unit/workflows/test_featureInstanceRefMigration.py
new file mode 100644
index 00000000..573f7b66
--- /dev/null
+++ b/tests/unit/workflows/test_featureInstanceRefMigration.py
@@ -0,0 +1,310 @@
+# Copyright (c) 2025 Patrick Motsch
+"""
+Phase-5 Schicht-4 — unit tests for ``materializeFeatureInstanceRefs`` and the
+runtime envelope unwrap in ``graphUtils.resolveParameterReferences``.
+
+Plan: ``wiki/c-work/1-plan/2026-04-typed-action-architecture.md`` (T11).
+"""
+from __future__ import annotations
+
+import copy
+
+import pytest
+
+from modules.workflows.automation2.featureInstanceRefMigration import (
+ materializeFeatureInstanceRefs,
+)
+from modules.workflows.automation2.graphUtils import (
+ _isTypedRefEnvelope,
+ _unwrapTypedRef,
+ resolveParameterReferences,
+)
+
+
+# ---------------------------------------------------------------------------
+# Migration: raw UUID -> typed envelope
+# ---------------------------------------------------------------------------
+
+
+class TestMaterializeFeatureInstanceRefs:
+ def test_emptyGraphIsReturnedAsIs(self):
+ out = materializeFeatureInstanceRefs({})
+ assert out == {}
+
+ def test_nonDictInputIsPassthrough(self):
+ # Defensive: callers may pass a None / list by accident.
+ assert materializeFeatureInstanceRefs(None) is None
+ assert materializeFeatureInstanceRefs([]) == []
+
+ def test_graphWithoutFeatureInstanceIdIsUnchanged(self):
+ graph = {"nodes": [{"id": "n1", "type": "trigger.manual", "parameters": {}}]}
+ original = copy.deepcopy(graph)
+ out = materializeFeatureInstanceRefs(graph)
+ assert out == original
+
+ def test_inputIsNotMutated(self):
+ graph = {
+ "nodes": [
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {"featureInstanceId": "abc-123"},
+ }
+ ]
+ }
+ snapshot = copy.deepcopy(graph)
+ materializeFeatureInstanceRefs(graph)
+ assert graph == snapshot
+
+ def test_rawUuidIsConvertedToEnvelope(self):
+ graph = {
+ "nodes": [
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {"featureInstanceId": "abc-123"},
+ }
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ param = out["nodes"][0]["parameters"]["featureInstanceId"]
+ assert param == {
+ "$type": "FeatureInstanceRef",
+ "id": "abc-123",
+ "featureCode": "trustee",
+ }
+
+ def test_rawUuidPreservedWhitespaceIsTrimmed(self):
+ graph = {
+ "nodes": [
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {"featureInstanceId": " abc-123 "},
+ }
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ assert out["nodes"][0]["parameters"]["featureInstanceId"]["id"] == "abc-123"
+
+ def test_emptyStringIsLeftUntouched(self):
+ # Empty featureInstanceId is the editor placeholder for "not yet bound";
+ # the migration must NOT pretend an empty value is a real UUID.
+ graph = {
+ "nodes": [
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {"featureInstanceId": ""},
+ }
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ assert out["nodes"][0]["parameters"]["featureInstanceId"] == ""
+
+ def test_alreadyTypedEnvelopeIsIdempotent(self):
+ envelope = {
+ "$type": "FeatureInstanceRef",
+ "id": "abc-123",
+ "featureCode": "trustee",
+ }
+ graph = {
+ "nodes": [
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {"featureInstanceId": envelope},
+ }
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ assert out["nodes"][0]["parameters"]["featureInstanceId"] == envelope
+
+ def test_runMigrationTwiceProducesSameResult(self):
+ graph = {
+ "nodes": [
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {"featureInstanceId": "abc-123"},
+ }
+ ]
+ }
+ once = materializeFeatureInstanceRefs(graph)
+ twice = materializeFeatureInstanceRefs(once)
+ assert once == twice
+
+ @pytest.mark.parametrize(
+ "nodeType,expectedFeatureCode",
+ [
+ ("trustee.extractFromFiles", "trustee"),
+ ("trustee.processDocuments", "trustee"),
+ ("redmine.createIssue", "redmine"),
+ ("clickup.createTask", "clickup"),
+ ("sharepoint.listFiles", "sharepoint"),
+ ("outlook.readEmails", "outlook"),
+ ("email.searchEmail", "outlook"),
+ ],
+ )
+ def test_featureCodeIsDerivedFromNodeTypePrefix(
+ self, nodeType, expectedFeatureCode
+ ):
+ graph = {
+ "nodes": [
+ {
+ "id": "n",
+ "type": nodeType,
+ "parameters": {"featureInstanceId": "uuid-x"},
+ }
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ env = out["nodes"][0]["parameters"]["featureInstanceId"]
+ assert env["featureCode"] == expectedFeatureCode
+
+ def test_unknownNodeTypePrefixOmitsFeatureCode(self):
+ graph = {
+ "nodes": [
+ {
+ "id": "n",
+ "type": "weird.unknown.action",
+ "parameters": {"featureInstanceId": "uuid-x"},
+ }
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ env = out["nodes"][0]["parameters"]["featureInstanceId"]
+ assert env == {"$type": "FeatureInstanceRef", "id": "uuid-x"}
+
+ def test_multipleNodesAreAllMigrated(self):
+ graph = {
+ "nodes": [
+ {
+ "id": "n5",
+ "type": "trustee.extractFromFiles",
+ "parameters": {"featureInstanceId": "uuid-1"},
+ },
+ {
+ "id": "n6",
+ "type": "trustee.queryData",
+ "parameters": {"featureInstanceId": "uuid-2"},
+ },
+ {
+ "id": "n9",
+ "type": "trustee.processDocuments",
+ "parameters": {"featureInstanceId": "uuid-3"},
+ },
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ ids = [n["parameters"]["featureInstanceId"]["id"] for n in out["nodes"]]
+ assert ids == ["uuid-1", "uuid-2", "uuid-3"]
+
+ def test_nodesWithoutParametersAreSkipped(self):
+ graph = {
+ "nodes": [
+ {"id": "n1", "type": "trigger.manual"},
+ {"id": "n2", "type": "trustee.queryData"}, # no parameters key
+ {
+ "id": "n3",
+ "type": "trustee.processDocuments",
+ "parameters": None,
+ },
+ ]
+ }
+ out = materializeFeatureInstanceRefs(graph)
+ assert out == graph
+
+
+# ---------------------------------------------------------------------------
+# Runtime envelope unwrap (graphUtils._unwrapTypedRef + resolveParameterReferences)
+# ---------------------------------------------------------------------------
+
+
+class TestIsTypedRefEnvelope:
+ def test_recognisesFeatureInstanceRef(self):
+ env = {"$type": "FeatureInstanceRef", "id": "abc"}
+ assert _isTypedRefEnvelope(env) is True
+
+ def test_recognisesConnectionRef(self):
+ env = {"$type": "ConnectionRef", "id": "abc"}
+ assert _isTypedRefEnvelope(env) is True
+
+ def test_rejectsRawDict(self):
+ assert _isTypedRefEnvelope({"id": "abc"}) is False
+
+ def test_rejectsUnknownType(self):
+ assert _isTypedRefEnvelope({"$type": "Foobar", "id": "abc"}) is False
+
+ def test_rejectsNonDict(self):
+ assert _isTypedRefEnvelope("abc") is False
+ assert _isTypedRefEnvelope(None) is False
+ assert _isTypedRefEnvelope(["abc"]) is False
+
+
+class TestUnwrapTypedRef:
+ def test_unwrapsFeatureInstanceRefToId(self):
+ env = {"$type": "FeatureInstanceRef", "id": "uuid-x", "featureCode": "trustee"}
+ assert _unwrapTypedRef(env) == "uuid-x"
+
+ def test_unwrapsConnectionRefToId(self):
+ env = {"$type": "ConnectionRef", "id": "conn-y", "authority": "msft"}
+ assert _unwrapTypedRef(env) == "conn-y"
+
+ def test_unwrapsSharePointFileRefToFilePath(self):
+ env = {"$type": "SharePointFileRef", "filePath": "/Sites/X/file.pdf"}
+ assert _unwrapTypedRef(env) == "/Sites/X/file.pdf"
+
+ def test_passthroughForNonEnvelope(self):
+ assert _unwrapTypedRef("plain-string") == "plain-string"
+ assert _unwrapTypedRef({"id": "abc"}) == {"id": "abc"}
+ assert _unwrapTypedRef(None) is None
+
+ def test_returnsEnvelopeIfPrimaryFieldMissing(self):
+ # Defensive: malformed envelope without ``id`` falls back to itself
+ # rather than silently dropping data.
+ env = {"$type": "FeatureInstanceRef", "featureCode": "trustee"}
+ assert _unwrapTypedRef(env) == env
+
+
+class TestResolveParameterReferencesUnwrap:
+ def test_typedEnvelopeAtTopLevelIsUnwrapped(self):
+ env = {"$type": "FeatureInstanceRef", "id": "uuid-z", "featureCode": "trustee"}
+ out = resolveParameterReferences(env, nodeOutputs={})
+ assert out == "uuid-z"
+
+ def test_typedEnvelopeNestedInDictIsUnwrapped(self):
+ params = {
+ "featureInstanceId": {
+ "$type": "FeatureInstanceRef",
+ "id": "uuid-z",
+ "featureCode": "trustee",
+ },
+ "mode": "lookup",
+ }
+ out = resolveParameterReferences(params, nodeOutputs={})
+ assert out == {"featureInstanceId": "uuid-z", "mode": "lookup"}
+
+ def test_typedEnvelopesInListAreUnwrappedElementwise(self):
+ params = [
+ {"$type": "FeatureInstanceRef", "id": "u1"},
+ {"$type": "FeatureInstanceRef", "id": "u2"},
+ "static",
+ ]
+ out = resolveParameterReferences(params, nodeOutputs={})
+ assert out == ["u1", "u2", "static"]
+
+ def test_typedEnvelopeIsResolvedBeforeRefLookup(self):
+ # If a workflow somehow contains both shapes, the typed envelope wins;
+ # ref-resolution is for upstream-bound DataRefs which never carry
+ # ``$type`` at the top level.
+ env = {
+ "$type": "FeatureInstanceRef",
+ "id": "uuid-z",
+ # nonsensical ``type: ref`` shadow — must be ignored.
+ "type": "ref",
+ "nodeId": "nope",
+ "path": ["whatever"],
+ }
+ out = resolveParameterReferences(env, nodeOutputs={"nope": {"whatever": "x"}})
+ assert out == "uuid-z"
From 24f0c3e2ebec7dd27d40dfb732a40de13079c6d3 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Sun, 26 Apr 2026 08:31:35 +0200
Subject: [PATCH 3/7] cleanup internal marked exports
---
app.py | 14 +-
modules/connectors/connectorDbPostgre.py | 98 ++++---
modules/datamodels/datamodelBase.py | 12 +-
modules/datamodels/datamodelFiles.py | 6 +-
modules/datamodels/datamodelMembership.py | 22 +-
modules/datamodels/datamodelRbac.py | 12 +-
modules/datamodels/datamodelSubscription.py | 2 +-
modules/datamodels/datamodelUam.py | 6 +-
modules/datamodels/datamodelUdm.py | 6 +-
modules/datamodels/datamodelUtils.py | 2 +
modules/demoConfigs/__init__.py | 8 +-
modules/demoConfigs/investorDemo2026.py | 8 +-
modules/demoConfigs/pwgDemo2026.py | 8 +-
.../chatbot/interfaceFeatureChatbot.py | 12 +-
.../features/chatbot/routeFeatureChatbot.py | 27 +-
.../commcoach/routeFeatureCommcoach.py | 10 +-
.../features/commcoach/serviceCommcoach.py | 22 +-
.../commcoach/serviceCommcoachScheduler.py | 6 +-
.../datamodelFeatureGraphicalEditor.py | 13 +-
.../features/graphicalEditor/nodeAdapter.py | 4 +-
.../nodeDefinitions/redmine.py | 31 ++-
.../nodeDefinitions/trustee.py | 38 +--
.../features/graphicalEditor/nodeRegistry.py | 4 +-
modules/features/graphicalEditor/portTypes.py | 20 +-
.../routeFeatureGraphicalEditor.py | 77 +++++-
modules/features/redmine/serviceRedmine.py | 4 +-
.../features/redmine/serviceRedmineStats.py | 4 +-
.../redmine/serviceRedmineStatsCache.py | 2 +-
.../features/redmine/serviceRedmineSync.py | 8 +-
.../features/teamsbot/routeFeatureTeamsbot.py | 18 +-
modules/features/teamsbot/service.py | 44 +--
.../trustee/accounting/accountingBridge.py | 4 +-
.../accounting/accountingConnectorBase.py | 45 +++
.../trustee/accounting/accountingDataSync.py | 236 +++++++++++++---
.../trustee/accounting/accountingRegistry.py | 2 +-
.../connectors/accountingConnectorAbacus.py | 183 +++++++++++-
.../connectors/accountingConnectorBexio.py | 175 +++++++++++-
.../connectors/accountingConnectorRma.py | 254 +++++++++++++++++
.../trustee/interfaceFeatureTrustee.py | 31 ++-
modules/features/trustee/mainTrustee.py | 10 +-
.../features/trustee/routeFeatureTrustee.py | 194 +++++++------
.../workspace/routeFeatureWorkspace.py | 8 +-
modules/interfaces/interfaceBootstrap.py | 8 +-
modules/interfaces/interfaceDbApp.py | 16 +-
modules/interfaces/interfaceDbBilling.py | 84 +++---
modules/interfaces/interfaceDbChat.py | 8 +-
modules/interfaces/interfaceDbKnowledge.py | 8 +-
modules/interfaces/interfaceDbManagement.py | 13 +-
modules/interfaces/interfaceDbSubscription.py | 6 +-
modules/interfaces/interfaceRbac.py | 64 +++--
modules/routes/routeAdminDemoConfig.py | 12 +-
modules/routes/routeAdminFeatures.py | 8 +-
modules/routes/routeAudit.py | 70 +++--
modules/routes/routeBilling.py | 174 ++++++------
modules/routes/routeDataConnections.py | 61 +---
modules/routes/routeDataFiles.py | 36 ++-
modules/routes/routeDataMandates.py | 28 +-
modules/routes/routeDataPrompts.py | 31 ++-
modules/routes/routeDataUsers.py | 73 +++--
modules/routes/routeHelpers.py | 196 ++++++++++---
modules/routes/routeI18n.py | 14 +-
modules/routes/routeInvitations.py | 33 +--
modules/routes/routeNotifications.py | 6 +-
modules/routes/routeSecurityLocal.py | 14 +-
modules/routes/routeStore.py | 4 +-
modules/routes/routeSubscription.py | 69 +++--
modules/routes/routeSystem.py | 4 +-
modules/routes/routeVoiceUser.py | 4 +-
modules/routes/routeWorkflowDashboard.py | 260 ++++++++++--------
.../services/serviceAgent/agentLoop.py | 2 +-
.../services/serviceAgent/mainServiceAgent.py | 4 +-
.../services/serviceExtraction/subPipeline.py | 4 +-
.../services/serviceExtraction/subRegistry.py | 6 +-
.../serviceKnowledge/mainServiceKnowledge.py | 4 +-
.../mainServiceSubscription.py | 16 +-
modules/shared/aiAuditLogger.py | 4 +-
modules/shared/attributeUtils.py | 48 ++--
modules/shared/dbRegistry.py | 2 +-
modules/shared/debugLogger.py | 12 +-
modules/shared/fkRegistry.py | 20 +-
modules/shared/i18nRegistry.py | 20 +-
modules/shared/notifyMandateAdmins.py | 8 +-
modules/shared/timeUtils.py | 2 +-
modules/system/databaseHealth.py | 14 +-
modules/system/mainSystem.py | 2 +-
.../workflows/automation2/executionEngine.py | 12 +-
.../executors/actionNodeExecutor.py | 6 +-
.../automation2/executors/dataExecutor.py | 10 +-
.../automation2/executors/flowExecutor.py | 16 +-
modules/workflows/automation2/graphUtils.py | 8 +-
modules/workflows/scheduler/mainScheduler.py | 2 +-
tests/demo/conftest.py | 4 +-
tests/demo/test_demo_api.py | 16 +-
tests/demo/test_pwg_demo_bootstrap.py | 4 +-
tests/test_service_redmine_stats_cache.py | 6 +-
tests/unit/datamodels/test_udm_bridge.py | 6 +-
.../test_trustee_template_workflows.py | 59 ++++
...test_accountingConnectorAbacus_balances.py | 94 +++++++
.../test_accountingConnectorBexio_balances.py | 114 ++++++++
.../test_accountingConnectorRma_balances.py | 156 +++++++++++
.../test_accountingDataSync_balances.py | 196 +++++++++++++
...est_featureInstanceRef_node_definitions.py | 95 +++++++
.../unit/graphicalEditor/test_node_adapter.py | 6 +-
.../test_route_options_feature_instance.py | 66 +++++
104 files changed, 2983 insertions(+), 1055 deletions(-)
create mode 100644 tests/unit/features/test_trustee_template_workflows.py
create mode 100644 tests/unit/features/trustee/test_accountingConnectorAbacus_balances.py
create mode 100644 tests/unit/features/trustee/test_accountingConnectorBexio_balances.py
create mode 100644 tests/unit/features/trustee/test_accountingConnectorRma_balances.py
create mode 100644 tests/unit/features/trustee/test_accountingDataSync_balances.py
create mode 100644 tests/unit/graphicalEditor/test_featureInstanceRef_node_definitions.py
create mode 100644 tests/unit/graphicalEditor/test_route_options_feature_instance.py
diff --git a/app.py b/app.py
index 8e3552b5..d4d0ba99 100644
--- a/app.py
+++ b/app.py
@@ -327,9 +327,9 @@ async def lifespan(app: FastAPI):
# Sync gateway i18n registry to DB and load translation cache
try:
- from modules.shared.i18nRegistry import _syncRegistryToDb, _loadCache
- await _syncRegistryToDb()
- await _loadCache()
+ from modules.shared.i18nRegistry import syncRegistryToDb, loadCache
+ await syncRegistryToDb()
+ await loadCache()
logger.info("i18n registry sync + cache load completed")
except Exception as e:
logger.warning(f"i18n registry sync failed (non-critical): {e}")
@@ -522,15 +522,15 @@ from modules.auth import (
# Per-request context middleware: language (Accept-Language) + user timezone (X-User-Timezone).
# Both are written into ContextVars and consumed by t() / resolveText() and getRequestNow()
# without having to thread them through every call site.
-from modules.shared.i18nRegistry import _setLanguage, normalizePrimaryLanguageTag
-from modules.shared.timeUtils import _setRequestTimezone
+from modules.shared.i18nRegistry import setLanguage, normalizePrimaryLanguageTag
+from modules.shared.timeUtils import setRequestTimezone
@app.middleware("http")
async def _requestContextMiddleware(request: Request, call_next):
acceptLang = request.headers.get("Accept-Language", "")
lang = normalizePrimaryLanguageTag(acceptLang, "de")
- _setLanguage(lang)
- _setRequestTimezone(request.headers.get("X-User-Timezone", ""))
+ setLanguage(lang)
+ setRequestTimezone(request.headers.get("X-User-Timezone", ""))
return await call_next(request)
app.add_middleware(CSRFMiddleware)
diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py
index 72098ff1..f2e7758e 100644
--- a/modules/connectors/connectorDbPostgre.py
+++ b/modules/connectors/connectorDbPostgre.py
@@ -76,7 +76,7 @@ def _isJsonbType(fieldType) -> bool:
return False
-def _get_model_fields(model_class) -> Dict[str, str]:
+def getModelFields(model_class) -> Dict[str, str]:
"""Get all fields from Pydantic model and map to SQL types.
Supports explicit db_type override via json_schema_extra={"db_type": "vector(1536)"}.
@@ -122,21 +122,27 @@ def _get_model_fields(model_class) -> Dict[str, str]:
def _get_fk_sort_meta(model_class) -> Dict[str, Dict[str, str]]:
- """Map FK field name -> {model, labelField} from json_schema_extra (fk_model + frontend_fk_display_field)."""
+ """Map FK field name -> {model, labelField} from json_schema_extra (``fk_model`` + ``fk_label_field``).
+
+ ``fk_model`` may be omitted if ``fk_target.table`` is set (table name = resolver / JOIN key).
+ """
result: Dict[str, Dict[str, str]] = {}
for name, field_info in model_class.model_fields.items():
extra = field_info.json_schema_extra
if not extra or not isinstance(extra, dict):
continue
fk_model = extra.get("fk_model")
- label_field = extra.get("frontend_fk_display_field")
+ tgt = extra.get("fk_target")
+ if not fk_model and isinstance(tgt, dict) and tgt.get("table"):
+ fk_model = tgt["table"]
+ label_field = extra.get("fk_label_field")
if fk_model and label_field:
result[name] = {"model": str(fk_model), "labelField": str(label_field)}
return result
-def _parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: str = "") -> None:
+def parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: str = "") -> None:
"""Parse record fields in-place: numeric typing, vector parsing, JSONB deserialization."""
import json as _json
@@ -189,7 +195,7 @@ _current_user_id: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar
)
-def _get_cached_connector(
+def getCachedConnector(
dbHost: str,
dbDatabase: str,
dbUser: str = None,
@@ -553,7 +559,7 @@ class DatabaseConnector:
}
# Desired columns based on model
- model_fields = _get_model_fields(model_class)
+ model_fields = getModelFields(model_class)
desired_columns = set(["id"]) | set(model_fields.keys())
# Add missing columns
@@ -633,7 +639,7 @@ class DatabaseConnector:
def _create_table_from_model(self, cursor, table: str, model_class: type) -> None:
"""Create table with columns matching Pydantic model fields."""
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
# Enable pgvector if any field uses vector type
if any(_isVectorType(sqlType) for sqlType in fields.values()):
@@ -666,7 +672,7 @@ class DatabaseConnector:
) -> None:
"""Save record to normalized table with explicit columns."""
# Get columns from Pydantic model instead of database schema
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
columns = ["id"] + [field for field in fields.keys() if field != "id"]
if not columns:
@@ -751,9 +757,9 @@ class DatabaseConnector:
# Convert row to dict and handle JSONB fields
record = dict(row)
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
- _parseRecordFields(record, fields, f"record {recordId}")
+ parseRecordFields(record, fields, f"record {recordId}")
return record
except Exception as e:
@@ -822,10 +828,10 @@ class DatabaseConnector:
cursor.execute(f'SELECT * FROM "{table}" ORDER BY "id"')
records = [dict(row) for row in cursor.fetchall()]
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
modelFields = model_class.model_fields
for record in records:
- _parseRecordFields(record, fields, f"table {table}")
+ parseRecordFields(record, fields, f"table {table}")
# Set type-aware defaults for NULL JSONB fields
for fieldName, fieldType in fields.items():
if fieldType == "JSONB" and fieldName in record and record[fieldName] is None:
@@ -1011,10 +1017,10 @@ class DatabaseConnector:
cursor.execute(query, where_values)
records = [dict(row) for row in cursor.fetchall()]
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
modelFields = model_class.model_fields
for record in records:
- _parseRecordFields(record, fields, f"table {table}")
+ parseRecordFields(record, fields, f"table {table}")
for fieldName, fieldType in fields.items():
if fieldType == "JSONB" and fieldName in record and record[fieldName] is None:
fieldInfo = modelFields.get(fieldName)
@@ -1055,7 +1061,7 @@ class DatabaseConnector:
Translate PaginationParams + recordFilter into SQL clauses.
Returns (where_clause, order_clause, limit_clause, values, count_values).
"""
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
validColumns = set(fields.keys())
where_parts: List[str] = []
@@ -1214,10 +1220,10 @@ class DatabaseConnector:
cursor.execute(dataSql, values)
records = [dict(row) for row in cursor.fetchall()]
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
modelFields = model_class.model_fields
for record in records:
- _parseRecordFields(record, fields, f"table {table}")
+ parseRecordFields(record, fields, f"table {table}")
for fieldName, fieldType in fields.items():
if fieldType == "JSONB" and fieldName in record and record[fieldName] is None:
fieldInfo = modelFields.get(fieldName)
@@ -1235,10 +1241,13 @@ class DatabaseConnector:
if fieldFilter and isinstance(fieldFilter, list):
records = [{f: r[f] for f in fieldFilter if f in r} for r in records]
- pageSize = pagination.pageSize if pagination else max(totalItems, 1)
- totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
+ enrichRowsWithFkLabels(records, model_class)
- return {"items": records, "totalItems": totalItems, "totalPages": totalPages}
+ pageSize = pagination.pageSize if pagination else max(totalItems, 1)
+ totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0
+
+ return {"items": records, "totalItems": totalItems, "totalPages": totalPages}
except Exception as e:
logger.error(f"Error in getRecordsetPaginated for table {table}: {e}")
return {"items": [], "totalItems": 0, "totalPages": 0}
@@ -1249,13 +1258,18 @@ class DatabaseConnector:
column: str,
pagination=None,
recordFilter: Dict[str, Any] = None,
- ) -> List[str]:
- """
- Returns sorted distinct non-null values for a column using SQL DISTINCT.
+ includeEmpty: bool = True,
+ ) -> List[Optional[str]]:
+ """Return sorted distinct values for a column using SQL DISTINCT.
+
+ When ``includeEmpty`` is True (default), NULL and empty-string rows are
+ represented as a single ``None`` entry at the end of the list — this
+ allows the frontend to offer a "(Leer)" filter option.
+
Applies cross-filtering (all filters except the requested column).
"""
table = model_class.__name__
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
if column not in fields:
return []
@@ -1274,18 +1288,28 @@ class DatabaseConnector:
where_clause, _, _, values, _ = \
self._buildPaginationClauses(model_class, pagination, recordFilter)
- sql = (
- f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{where_clause} '
- f'WHERE "{column}" IS NOT NULL AND "{column}"::TEXT != \'\' '
- if not where_clause else
- f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{where_clause} '
- f'AND "{column}" IS NOT NULL AND "{column}"::TEXT != \'\' '
- )
- sql += 'ORDER BY val'
+ nonNullCond = f'"{column}" IS NOT NULL AND "{column}"::TEXT != \'\''
+ if where_clause:
+ sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{where_clause} AND {nonNullCond} ORDER BY val'
+ else:
+ sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}" WHERE {nonNullCond} ORDER BY val'
with self.connection.cursor() as cursor:
cursor.execute(sql, values)
- return [row["val"] for row in cursor.fetchall()]
+ result: List[Optional[str]] = [row["val"] for row in cursor.fetchall()]
+
+ if includeEmpty:
+ emptyCond = f'"{column}" IS NULL OR "{column}"::TEXT = \'\''
+ if where_clause:
+ emptySql = f'SELECT 1 FROM "{table}"{where_clause} AND ({emptyCond}) LIMIT 1'
+ else:
+ emptySql = f'SELECT 1 FROM "{table}" WHERE ({emptyCond}) LIMIT 1'
+ with self.connection.cursor() as cursor:
+ cursor.execute(emptySql, values)
+ if cursor.fetchone():
+ result.append(None)
+
+ return result
except Exception as e:
logger.error(f"Error in getDistinctColumnValues for {table}.{column}: {e}")
return []
@@ -1419,7 +1443,7 @@ class DatabaseConnector:
if not self._ensureTableExists(model_class):
raise ValueError(f"Table {table} does not exist")
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
columns = ["id"] + [f for f in fields.keys() if f != "id"]
modelFields = model_class.model_fields
@@ -1541,7 +1565,7 @@ class DatabaseConnector:
if not self._ensureTableExists(model_class):
return 0
- fields = _get_model_fields(model_class)
+ fields = getModelFields(model_class)
clauses: List[str] = []
params: List[Any] = []
for key, val in recordFilter.items():
@@ -1659,9 +1683,9 @@ class DatabaseConnector:
cursor.execute(query, params)
records = [dict(row) for row in cursor.fetchall()]
- fields = _get_model_fields(modelClass)
+ fields = getModelFields(modelClass)
for record in records:
- _parseRecordFields(record, fields, f"semanticSearch {table}")
+ parseRecordFields(record, fields, f"semanticSearch {table}")
return records
except Exception as e:
diff --git a/modules/datamodels/datamodelBase.py b/modules/datamodels/datamodelBase.py
index 353f780b..2a65bcdc 100644
--- a/modules/datamodels/datamodelBase.py
+++ b/modules/datamodels/datamodelBase.py
@@ -8,12 +8,12 @@ from pydantic import BaseModel, Field
from modules.shared.i18nRegistry import i18nModel
-_MODEL_REGISTRY: Dict[str, Type["PowerOnModel"]] = {}
+MODEL_REGISTRY: Dict[str, Type["PowerOnModel"]] = {}
def _getModelByTableName(tableName: str) -> Optional[Type["PowerOnModel"]]:
"""Look up a PowerOnModel subclass by its table name (= class name)."""
- return _MODEL_REGISTRY.get(tableName)
+ return MODEL_REGISTRY.get(tableName)
@i18nModel("Basisdatensatz")
@@ -22,7 +22,7 @@ class PowerOnModel(BaseModel):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
- _MODEL_REGISTRY[cls.__name__] = cls
+ MODEL_REGISTRY[cls.__name__] = cls
sysCreatedAt: Optional[float] = Field(
default=None,
@@ -46,6 +46,9 @@ class PowerOnModel(BaseModel):
"frontend_required": False,
"frontend_visible": False,
"system": True,
+ "fk_model": "User",
+ "fk_label_field": "username",
+ "fk_target": {"db": "poweron_app", "table": "User"},
},
)
sysModifiedAt: Optional[float] = Field(
@@ -70,5 +73,8 @@ class PowerOnModel(BaseModel):
"frontend_required": False,
"frontend_visible": False,
"system": True,
+ "fk_model": "User",
+ "fk_label_field": "username",
+ "fk_target": {"db": "poweron_app", "table": "User"},
},
)
diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py
index c8b0c865..d9b78ddf 100644
--- a/modules/datamodels/datamodelFiles.py
+++ b/modules/datamodels/datamodelFiles.py
@@ -30,9 +30,8 @@ class FileItem(PowerOnModel):
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
- "frontend_fk_source": "/api/mandates/",
- "frontend_fk_display_field": "label",
"fk_model": "Mandate",
+ "fk_label_field": "label",
"fk_target": {"db": "poweron_app", "table": "Mandate"},
},
)
@@ -44,9 +43,8 @@ class FileItem(PowerOnModel):
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
- "frontend_fk_source": "/api/features/instances",
- "frontend_fk_display_field": "label",
"fk_model": "FeatureInstance",
+ "fk_label_field": "label",
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
},
)
diff --git a/modules/datamodels/datamodelMembership.py b/modules/datamodels/datamodelMembership.py
index f70fe035..5c7280d0 100644
--- a/modules/datamodels/datamodelMembership.py
+++ b/modules/datamodels/datamodelMembership.py
@@ -31,9 +31,8 @@ class UserMandate(PowerOnModel):
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
- "frontend_fk_source": "/api/users/",
- "frontend_fk_display_field": "username",
"fk_model": "User",
+ "fk_label_field": "username",
"fk_target": {"db": "poweron_app", "table": "User"},
},
)
@@ -44,9 +43,8 @@ class UserMandate(PowerOnModel):
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
- "frontend_fk_source": "/api/mandates/",
- "frontend_fk_display_field": "label",
"fk_model": "Mandate",
+ "fk_label_field": "label",
"fk_target": {"db": "poweron_app", "table": "Mandate"},
},
)
@@ -75,8 +73,8 @@ class FeatureAccess(PowerOnModel):
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
- "frontend_fk_source": "/api/users/",
- "frontend_fk_display_field": "username",
+ "fk_model": "User",
+ "fk_label_field": "username",
"fk_target": {"db": "poweron_app", "table": "User"},
},
)
@@ -87,8 +85,8 @@ class FeatureAccess(PowerOnModel):
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
- "frontend_fk_source": "/api/features/instances",
- "frontend_fk_display_field": "label",
+ "fk_model": "FeatureInstance",
+ "fk_label_field": "label",
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
},
)
@@ -127,8 +125,8 @@ class UserMandateRole(PowerOnModel):
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
- "frontend_fk_source": "/api/rbac/roles",
- "frontend_fk_display_field": "roleLabel",
+ "fk_model": "Role",
+ "fk_label_field": "roleLabel",
"fk_target": {"db": "poweron_app", "table": "Role"},
},
)
@@ -162,8 +160,8 @@ class FeatureAccessRole(PowerOnModel):
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
- "frontend_fk_source": "/api/rbac/roles",
- "frontend_fk_display_field": "roleLabel",
+ "fk_model": "Role",
+ "fk_label_field": "roleLabel",
"fk_target": {"db": "poweron_app", "table": "Role"},
},
)
diff --git a/modules/datamodels/datamodelRbac.py b/modules/datamodels/datamodelRbac.py
index 1214a96f..45aa76a7 100644
--- a/modules/datamodels/datamodelRbac.py
+++ b/modules/datamodels/datamodelRbac.py
@@ -63,8 +63,8 @@ class Role(PowerOnModel):
"frontend_readonly": True,
"frontend_visible": True,
"frontend_required": False,
- "frontend_fk_source": "/api/mandates/",
- "frontend_fk_display_field": "label",
+ "fk_model": "Mandate",
+ "fk_label_field": "label",
"fk_target": {"db": "poweron_app", "table": "Mandate"},
},
)
@@ -77,8 +77,8 @@ class Role(PowerOnModel):
"frontend_readonly": True,
"frontend_visible": True,
"frontend_required": False,
- "frontend_fk_source": "/api/features/instances",
- "frontend_fk_display_field": "label",
+ "fk_model": "FeatureInstance",
+ "fk_label_field": "label",
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
},
)
@@ -115,8 +115,8 @@ class AccessRule(PowerOnModel):
"frontend_type": "select",
"frontend_readonly": True,
"frontend_required": True,
- "frontend_fk_source": "/api/rbac/roles",
- "frontend_fk_display_field": "roleLabel",
+ "fk_model": "Role",
+ "fk_label_field": "roleLabel",
"fk_target": {"db": "poweron_app", "table": "Role"},
},
)
diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py
index 058f2e17..46ce1f31 100644
--- a/modules/datamodels/datamodelSubscription.py
+++ b/modules/datamodels/datamodelSubscription.py
@@ -407,7 +407,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = {
}
-def _getPlan(planKey: str) -> Optional[SubscriptionPlan]:
+def getPlan(planKey: str) -> Optional[SubscriptionPlan]:
"""Resolve a plan by key from the built-in catalog."""
return BUILTIN_PLANS.get(planKey)
diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py
index 90dd9452..5cfb4c37 100644
--- a/modules/datamodels/datamodelUam.py
+++ b/modules/datamodels/datamodelUam.py
@@ -397,6 +397,8 @@ class UserConnection(PowerOnModel):
"frontend_readonly": True,
"frontend_required": False,
"label": "Benutzer-ID",
+ "fk_model": "User",
+ "fk_label_field": "username",
"fk_target": {"db": "poweron_app", "table": "User"},
},
)
@@ -650,7 +652,7 @@ class UserInDB(User):
)
-def _normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]:
+def normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]:
"""
Coerce ttsVoiceMap payloads to Dict[str, str].
@@ -728,6 +730,6 @@ class UserVoicePreferences(PowerOnModel):
@field_validator("ttsVoiceMap", mode="before")
@classmethod
def _validateTtsVoiceMap(cls, value: Any) -> Optional[Dict[str, str]]:
- return _normalizeTtsVoiceMap(value)
+ return normalizeTtsVoiceMap(value)
diff --git a/modules/datamodels/datamodelUdm.py b/modules/datamodels/datamodelUdm.py
index 330467b4..794b71f0 100644
--- a/modules/datamodels/datamodelUdm.py
+++ b/modules/datamodels/datamodelUdm.py
@@ -177,7 +177,7 @@ def _groupKeyForPart(part: ContentPart) -> Tuple[str, int, str]:
_VALID_DOC_SOURCES = frozenset({"pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"})
-def _contentPartsToUdm(extracted: ContentExtracted, sourceType: str, sourcePath: str) -> UdmDocument:
+def contentPartsToUdm(extracted: ContentExtracted, sourceType: str, sourcePath: str) -> UdmDocument:
"""Convert flat ContentPart list into a UdmDocument using structural heuristics."""
parts = list(extracted.parts or [])
st: Literal["pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"] = (
@@ -290,7 +290,7 @@ def _stripUdmForReferences(udm: UdmDocument) -> UdmDocument:
return clone
-def _applyUdmOutputDetail(udm: UdmDocument, detail: str) -> UdmDocument:
+def applyUdmOutputDetail(udm: UdmDocument, detail: str) -> UdmDocument:
if detail == "structure":
return _stripUdmRaw(udm)
if detail == "references":
@@ -298,7 +298,7 @@ def _applyUdmOutputDetail(udm: UdmDocument, detail: str) -> UdmDocument:
return udm
-def _mimeToUdmSourceType(mimeType: str, fileName: str) -> Literal["pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"]:
+def mimeToUdmSourceType(mimeType: str, fileName: str) -> Literal["pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"]:
m = (mimeType or "").lower()
fn = (fileName or "").lower()
if m == "application/pdf" or fn.endswith(".pdf"):
diff --git a/modules/datamodels/datamodelUtils.py b/modules/datamodels/datamodelUtils.py
index f389d0d7..0c1bb8c6 100644
--- a/modules/datamodels/datamodelUtils.py
+++ b/modules/datamodels/datamodelUtils.py
@@ -27,6 +27,8 @@ class Prompt(PowerOnModel):
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
+ "fk_model": "Mandate",
+ "fk_label_field": "label",
"fk_target": {"db": "poweron_app", "table": "Mandate"},
},
)
diff --git a/modules/demoConfigs/__init__.py b/modules/demoConfigs/__init__.py
index 9e64cf96..5395f71b 100644
--- a/modules/demoConfigs/__init__.py
+++ b/modules/demoConfigs/__init__.py
@@ -2,7 +2,7 @@
Demo Configs — Auto-Discovery Module
Scans this folder for Python files that contain subclasses of _BaseDemoConfig
-and exposes them via _getAvailableDemoConfigs().
+and exposes them via getAvailableDemoConfigs().
"""
import importlib
@@ -18,7 +18,7 @@ logger = logging.getLogger(__name__)
_configCache: Dict[str, _BaseDemoConfig] = {}
-def _getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]:
+def getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]:
"""Return a dict of code -> instance for every discovered demo config."""
if _configCache:
return _configCache
@@ -43,7 +43,7 @@ def _getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]:
return _configCache
-def _getDemoConfigByCode(code: str) -> _BaseDemoConfig | None:
+def getDemoConfigByCode(code: str) -> _BaseDemoConfig | None:
"""Get a specific demo config by its code."""
- configs = _getAvailableDemoConfigs()
+ configs = getAvailableDemoConfigs()
return configs.get(code)
diff --git a/modules/demoConfigs/investorDemo2026.py b/modules/demoConfigs/investorDemo2026.py
index 81956c6d..f8fc678f 100644
--- a/modules/demoConfigs/investorDemo2026.py
+++ b/modules/demoConfigs/investorDemo2026.py
@@ -447,10 +447,10 @@ class InvestorDemo2026(_BaseDemoConfig):
if not mandateId:
return
try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface
+ from modules.interfaces.interfaceDbBilling import getRootInterface
from modules.datamodels.datamodelBilling import BillingSettings
- billingInterface = _getRootInterface()
+ billingInterface = getRootInterface()
existingSettings = billingInterface.getSettings(mandateId)
if existingSettings:
summary["skipped"].append(f"Billing for {mandateLabel} exists")
@@ -532,8 +532,8 @@ class InvestorDemo2026(_BaseDemoConfig):
summary["removed"].append(f"{len(roles)} roles in {mandateLabel}")
try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface
- billingDb = _getRootInterface().db
+ from modules.interfaces.interfaceDbBilling import getRootInterface
+ billingDb = getRootInterface().db
billingSettings = billingDb.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) or []
for bs in billingSettings:
billingDb.recordDelete(BillingSettings, bs.get("id"))
diff --git a/modules/demoConfigs/pwgDemo2026.py b/modules/demoConfigs/pwgDemo2026.py
index d4661bcf..f80760f9 100644
--- a/modules/demoConfigs/pwgDemo2026.py
+++ b/modules/demoConfigs/pwgDemo2026.py
@@ -377,9 +377,9 @@ class PwgDemo2026(_BaseDemoConfig):
return
try:
from modules.datamodels.datamodelBilling import BillingSettings
- from modules.interfaces.interfaceDbBilling import _getRootInterface
+ from modules.interfaces.interfaceDbBilling import getRootInterface
- billingInterface = _getRootInterface()
+ billingInterface = getRootInterface()
existingSettings = billingInterface.getSettings(mandateId)
if existingSettings:
summary["skipped"].append(f"Billing for {mandateLabel} exists")
@@ -708,8 +708,8 @@ class PwgDemo2026(_BaseDemoConfig):
db.recordDelete(Role, role.get("id"))
try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface
- billingDb = _getRootInterface().db
+ from modules.interfaces.interfaceDbBilling import getRootInterface
+ billingDb = getRootInterface().db
billingSettings = billingDb.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) or []
for bs in billingSettings:
billingDb.recordDelete(BillingSettings, bs.get("id"))
diff --git a/modules/features/chatbot/interfaceFeatureChatbot.py b/modules/features/chatbot/interfaceFeatureChatbot.py
index 28f6000c..68d672a4 100644
--- a/modules/features/chatbot/interfaceFeatureChatbot.py
+++ b/modules/features/chatbot/interfaceFeatureChatbot.py
@@ -139,13 +139,13 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI
try:
import os
from datetime import datetime, UTC
- from modules.shared.debugLogger import _getBaseDebugDir, _ensureDir
+ from modules.shared.debugLogger import getBaseDebugDir, ensureDir
from modules.interfaces.interfaceDbManagement import getInterface
# Create base debug directory (use base debug dir, not prompts subdirectory)
- baseDebugDir = _getBaseDebugDir()
+ baseDebugDir = getBaseDebugDir()
debug_root = os.path.join(baseDebugDir, 'messages')
- _ensureDir(debug_root)
+ ensureDir(debug_root)
# Generate timestamp
timestamp = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3]
@@ -210,7 +210,7 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI
safe_label = "default"
label_folder = os.path.join(message_path, safe_label)
- _ensureDir(label_folder)
+ ensureDir(label_folder)
# Store each document
for i, doc in enumerate(docs):
@@ -401,8 +401,8 @@ class ChatObjects:
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
- from modules.connectors.connectorDbPostgre import _get_cached_connector
- self.db = _get_cached_connector(
+ from modules.connectors.connectorDbPostgre import getCachedConnector
+ self.db = getCachedConnector(
dbHost=dbHost,
dbDatabase=dbDatabase,
dbUser=dbUser,
diff --git a/modules/features/chatbot/routeFeatureChatbot.py b/modules/features/chatbot/routeFeatureChatbot.py
index 06cf985d..4ee82fc5 100644
--- a/modules/features/chatbot/routeFeatureChatbot.py
+++ b/modules/features/chatbot/routeFeatureChatbot.py
@@ -204,19 +204,20 @@ def get_chatbot_threads(
normalized_wf["maxSteps"] = 10
normalized_workflows.append(normalized_wf)
- metadata = PaginationMetadata(
- currentPage=paginationParams.page if paginationParams else 1,
- pageSize=paginationParams.pageSize if paginationParams else len(workflows),
- totalItems=totalItems,
- totalPages=totalPages,
- sort=paginationParams.sort if paginationParams else [],
- filters=paginationParams.filters if paginationParams else None
- )
-
- return PaginatedResponse(
- items=normalized_workflows,
- pagination=metadata
- )
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
+ enriched = enrichRowsWithFkLabels(normalized_workflows, ChatbotConversation)
+
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
+ currentPage=paginationParams.page if paginationParams else 1,
+ pageSize=paginationParams.pageSize if paginationParams else len(workflows),
+ totalItems=totalItems,
+ totalPages=totalPages,
+ sort=paginationParams.sort if paginationParams else [],
+ filters=paginationParams.filters if paginationParams else None
+ ).model_dump(),
+ }
except HTTPException:
raise
diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py
index 99ae798e..bb83c13c 100644
--- a/modules/features/commcoach/routeFeatureCommcoach.py
+++ b/modules/features/commcoach/routeFeatureCommcoach.py
@@ -336,10 +336,10 @@ async def startSession(
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
voiceInterface = getVoiceInterface(context.user, mandateId)
- from .serviceCommcoach import _getUserVoicePrefs, _stripMarkdownForTts, _buildTtsConfigErrorMessage
- language, voiceName = _getUserVoicePrefs(userId, mandateId)
+ from .serviceCommcoach import getUserVoicePrefs, stripMarkdownForTts, buildTtsConfigErrorMessage
+ language, voiceName = getUserVoicePrefs(userId, mandateId)
ttsResult = await voiceInterface.textToSpeech(
- text=_stripMarkdownForTts(greetingText),
+ text=stripMarkdownForTts(greetingText),
languageCode=language,
voiceName=voiceName,
)
@@ -584,8 +584,8 @@ async def sendAudioStream(
if not audioBody:
raise HTTPException(status_code=400, detail=routeApiMsg("No audio data received"))
- from .serviceCommcoach import _getUserVoicePrefs
- language, _ = _getUserVoicePrefs(str(context.user.id), mandateId)
+ from .serviceCommcoach import getUserVoicePrefs
+ language, _ = getUserVoicePrefs(str(context.user.id), mandateId)
contextId = session.get("contextId")
service = CommcoachService(context.user, mandateId, instanceId)
diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py
index 332a4a01..8765e30c 100644
--- a/modules/features/commcoach/serviceCommcoach.py
+++ b/modules/features/commcoach/serviceCommcoach.py
@@ -79,7 +79,7 @@ def _selectConfiguredVoice(
return None
-def _buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawError: str = "") -> str:
+def buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawError: str = "") -> str:
if voiceName:
return (
f'Die konfigurierte Stimme "{voiceName}" für {language} ist ungültig oder nicht verfügbar. '
@@ -91,7 +91,7 @@ def _buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawErro
)
-def _getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple:
+def getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple:
"""Load voice language and voiceName from central UserVoicePreferences.
Returns (language, voiceName) tuple."""
try:
@@ -160,7 +160,7 @@ def _getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple:
return ("de-DE", None)
-def _stripMarkdownForTts(text: str) -> str:
+def stripMarkdownForTts(text: str) -> str:
"""Strip markdown formatting so TTS reads clean speech text."""
t = text
t = re.sub(r'\*\*(.+?)\*\*', r'\1', t)
@@ -346,9 +346,9 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
import base64
voiceInterface = getVoiceInterface(currentUser, mandateId)
- language, voiceName = _getUserVoicePrefs(str(currentUser.id), mandateId)
+ language, voiceName = getUserVoicePrefs(str(currentUser.id), mandateId)
ttsResult = await voiceInterface.textToSpeech(
- text=_stripMarkdownForTts(speechText),
+ text=stripMarkdownForTts(speechText),
languageCode=language,
voiceName=voiceName,
)
@@ -362,7 +362,7 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand
return
errorDetail = ttsResult.get("error", "Text-to-Speech failed")
await emitSessionEvent(sessionId, "error", {
- "message": _buildTtsConfigErrorMessage(language, voiceName, errorDetail),
+ "message": buildTtsConfigErrorMessage(language, voiceName, errorDetail),
"detail": errorDetail,
"ttsLanguage": language,
"ttsVoice": voiceName,
@@ -370,7 +370,7 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand
except Exception as e:
logger.warning(f"TTS failed for session {sessionId}: {e}")
await emitSessionEvent(sessionId, "error", {
- "message": _buildTtsConfigErrorMessage("de-DE", None, str(e)),
+ "message": buildTtsConfigErrorMessage("de-DE", None, str(e)),
"detail": str(e),
})
@@ -695,7 +695,7 @@ _TTS_WORD_LIMIT = 200
async def _prepareSpeechText(fullText: str, callAiFn) -> str:
"""Prepare text for TTS. Short responses used directly; long ones get summarized."""
- cleaned = _stripMarkdownForTts(fullText)
+ cleaned = stripMarkdownForTts(fullText)
wordCount = len(cleaned.split())
if wordCount <= _TTS_WORD_LIMIT:
return cleaned
@@ -1373,7 +1373,7 @@ class CommcoachService:
from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface
from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.shared.notifyMandateAdmins import _renderHtmlEmail, _resolveMandateName
+ from modules.shared.notifyMandateAdmins import renderHtmlEmail, resolveMandateName
rootInterface = getRootInterface()
user = rootInterface.getUser(self.userId)
@@ -1382,9 +1382,9 @@ class CommcoachService:
messaging = getMessagingInterface()
subject = f"Coaching-Session Zusammenfassung: {contextTitle}"
- mandateName = _resolveMandateName(self.mandateId)
+ mandateName = resolveMandateName(self.mandateId)
contentHtml = _buildSummaryEmailBlock(emailData, summary, contextTitle)
- htmlMessage = _renderHtmlEmail(
+ htmlMessage = renderHtmlEmail(
"Coaching-Session Zusammenfassung",
[
f'Thema: {contextTitle}',
diff --git a/modules/features/commcoach/serviceCommcoachScheduler.py b/modules/features/commcoach/serviceCommcoachScheduler.py
index dcbc1e86..00bc3b1e 100644
--- a/modules/features/commcoach/serviceCommcoachScheduler.py
+++ b/modules/features/commcoach/serviceCommcoachScheduler.py
@@ -64,7 +64,7 @@ async def _runDailyReminders():
from modules.connectors.connectorDbPostgre import DatabaseConnector
from .datamodelCommcoach import CoachingUserProfile, CoachingContextStatus
from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface
- from modules.shared.notifyMandateAdmins import _renderHtmlEmail, _resolveMandateName
+ from modules.shared.notifyMandateAdmins import renderHtmlEmail, resolveMandateName
dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data")
db = DatabaseConnector(
@@ -106,8 +106,8 @@ async def _runDailyReminders():
contextList = ", ".join(contextTitles)
subject = "Dein tägliches Coaching wartet"
- mandateName = _resolveMandateName(profile.get("mandateId"))
- htmlMessage = _renderHtmlEmail(
+ mandateName = resolveMandateName(profile.get("mandateId"))
+ htmlMessage = renderHtmlEmail(
"Zeit für dein tägliches Coaching",
[
f"Du hast aktuell {len(contexts)} aktive Coaching-Themen.",
diff --git a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py
index b86c295a..63572649 100644
--- a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py
+++ b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py
@@ -68,8 +68,7 @@ class AutoWorkflow(PowerOnModel):
"frontend_readonly": True,
"frontend_required": False,
"label": "Mandanten-ID",
- "frontend_fk_source": "/api/mandates/",
- "frontend_fk_display_field": "label",
+ "fk_label_field": "label",
"fk_model": "Mandate",
"fk_target": {"db": "poweron_app", "table": "Mandate"},
},
@@ -81,8 +80,7 @@ class AutoWorkflow(PowerOnModel):
"frontend_readonly": True,
"frontend_required": False,
"label": "Feature-Instanz-ID",
- "frontend_fk_source": "/api/features/instances",
- "frontend_fk_display_field": "label",
+ "fk_label_field": "label",
"fk_model": "FeatureInstance",
"fk_target": {"db": "poweron_app", "table": "FeatureInstance"},
},
@@ -220,6 +218,8 @@ class AutoVersion(PowerOnModel):
"frontend_readonly": True,
"frontend_required": False,
"label": "Veröffentlicht von",
+ "fk_model": "User",
+ "fk_label_field": "username",
"fk_target": {"db": "poweron_app", "table": "User"},
},
)
@@ -259,8 +259,7 @@ class AutoRun(PowerOnModel):
"frontend_readonly": True,
"frontend_required": False,
"label": "Mandanten-ID",
- "frontend_fk_source": "/api/mandates/",
- "frontend_fk_display_field": "label",
+ "fk_label_field": "label",
"fk_model": "Mandate",
"fk_target": {"db": "poweron_app", "table": "Mandate"},
},
@@ -273,6 +272,8 @@ class AutoRun(PowerOnModel):
"frontend_readonly": True,
"frontend_required": False,
"label": "Auslöser",
+ "fk_model": "User",
+ "fk_label_field": "username",
"fk_target": {"db": "poweron_app", "table": "User"},
},
)
diff --git a/modules/features/graphicalEditor/nodeAdapter.py b/modules/features/graphicalEditor/nodeAdapter.py
index ed7ec711..f0cd1469 100644
--- a/modules/features/graphicalEditor/nodeAdapter.py
+++ b/modules/features/graphicalEditor/nodeAdapter.py
@@ -73,7 +73,7 @@ def _isMethodBoundNode(node: Mapping[str, Any]) -> bool:
return bool(node.get("_method") and node.get("_action"))
-def _bindsActionFromLegacy(node: Mapping[str, Any]) -> Optional[str]:
+def bindsActionFromLegacy(node: Mapping[str, Any]) -> Optional[str]:
"""Build the canonical 'method.action' identifier from a legacy node dict.
Returns None for framework-primitive nodes (trigger/flow/input/data).
@@ -121,7 +121,7 @@ def _adapterFromLegacyNode(node: Mapping[str, Any]) -> Optional[NodeAdapter]:
if not _isMethodBoundNode(node):
return None
- bindsAction = _bindsActionFromLegacy(node)
+ bindsAction = bindsActionFromLegacy(node)
if not bindsAction:
return None
diff --git a/modules/features/graphicalEditor/nodeDefinitions/redmine.py b/modules/features/graphicalEditor/nodeDefinitions/redmine.py
index 55a6e7c7..d9ea8bab 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/redmine.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/redmine.py
@@ -4,6 +4,19 @@
from modules.shared.i18nRegistry import t
+# Typed FeatureInstance binding (replaces legacy `string, hidden`).
+# - type FeatureInstanceRef[redmine] is filtered by the DataPicker.
+# - frontendType "featureInstance" is rendered by FeatureInstancePicker which
+# loads /options/feature.instance?featureCode=redmine for the current mandate.
+_REDMINE_INSTANCE_PARAM = {
+ "name": "featureInstanceId",
+ "type": "FeatureInstanceRef[redmine]",
+ "required": True,
+ "frontendType": "featureInstance",
+ "frontendOptions": {"featureCode": "redmine"},
+ "description": t("Redmine-Mandant"),
+}
+
REDMINE_NODES = [
{
"id": "redmine.readTicket",
@@ -11,8 +24,7 @@ REDMINE_NODES = [
"label": t("Ticket lesen"),
"description": t("Einzelnes Redmine-Ticket aus dem Mirror laden."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Redmine Feature-Instanz-ID")},
+ dict(_REDMINE_INSTANCE_PARAM),
{"name": "ticketId", "type": "number", "required": True, "frontendType": "number",
"description": t("Redmine-Ticket-ID")},
],
@@ -30,8 +42,7 @@ REDMINE_NODES = [
"label": t("Tickets auflisten"),
"description": t("Tickets aus dem lokalen Mirror mit Filtern (Tracker, Status, Zeitraum, Zuweisung)."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Redmine Feature-Instanz-ID")},
+ dict(_REDMINE_INSTANCE_PARAM),
{"name": "trackerIds", "type": "string", "required": False, "frontendType": "text",
"description": t("Tracker-IDs (Komma-separiert)"), "default": ""},
{"name": "status", "type": "string", "required": False, "frontendType": "text",
@@ -59,8 +70,7 @@ REDMINE_NODES = [
"label": t("Ticket erstellen"),
"description": t("Neues Ticket in Redmine anlegen. Mirror wird sofort aktualisiert."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Redmine Feature-Instanz-ID")},
+ dict(_REDMINE_INSTANCE_PARAM),
{"name": "subject", "type": "string", "required": True, "frontendType": "text",
"description": t("Ticket-Titel")},
{"name": "trackerId", "type": "number", "required": True, "frontendType": "number",
@@ -92,8 +102,7 @@ REDMINE_NODES = [
"label": t("Ticket bearbeiten"),
"description": t("Felder eines Redmine-Tickets aktualisieren. Nur gesetzte Felder werden uebertragen."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Redmine Feature-Instanz-ID")},
+ dict(_REDMINE_INSTANCE_PARAM),
{"name": "ticketId", "type": "number", "required": True, "frontendType": "number",
"description": t("Ticket-ID")},
{"name": "subject", "type": "string", "required": False, "frontendType": "text",
@@ -129,8 +138,7 @@ REDMINE_NODES = [
"label": t("Statistik laden"),
"description": t("Aggregierte Kennzahlen (KPIs, Durchsatz, Status-Verteilung, Backlog) aus dem Mirror."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Redmine Feature-Instanz-ID")},
+ dict(_REDMINE_INSTANCE_PARAM),
{"name": "dateFrom", "type": "string", "required": False, "frontendType": "date",
"description": t("Zeitraum ab")},
{"name": "dateTo", "type": "string", "required": False, "frontendType": "date",
@@ -154,8 +162,7 @@ REDMINE_NODES = [
"label": t("Mirror synchronisieren"),
"description": t("Tickets und Beziehungen aus Redmine in den lokalen Mirror uebernehmen."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Redmine Feature-Instanz-ID")},
+ dict(_REDMINE_INSTANCE_PARAM),
{"name": "force", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": t("Vollsync erzwingen (ignoriert lastSyncAt)"), "default": False},
],
diff --git a/modules/features/graphicalEditor/nodeDefinitions/trustee.py b/modules/features/graphicalEditor/nodeDefinitions/trustee.py
index 5f7de2b2..0a8e7cd7 100644
--- a/modules/features/graphicalEditor/nodeDefinitions/trustee.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/trustee.py
@@ -3,6 +3,20 @@
from modules.shared.i18nRegistry import t
+# Typed FeatureInstance binding (replaces legacy `string, hidden`).
+# - type uses the discriminator notation `FeatureInstanceRef[]` so the
+# DataPicker / RequiredAttributePicker can filter compatible upstream paths.
+# - frontendType "featureInstance" is rendered by FeatureInstancePicker which
+# loads /options/feature.instance?featureCode=trustee for the current mandate.
+_TRUSTEE_INSTANCE_PARAM = {
+ "name": "featureInstanceId",
+ "type": "FeatureInstanceRef[trustee]",
+ "required": True,
+ "frontendType": "featureInstance",
+ "frontendOptions": {"featureCode": "trustee"},
+ "description": t("Trustee-Mandant"),
+}
+
TRUSTEE_NODES = [
{
"id": "trustee.refreshAccountingData",
@@ -10,8 +24,7 @@ TRUSTEE_NODES = [
"label": t("Buchhaltungsdaten aktualisieren"),
"description": t("Buchhaltungsdaten aus externem System importieren/aktualisieren."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Trustee Feature-Instanz-ID")},
+ dict(_TRUSTEE_INSTANCE_PARAM),
{"name": "forceRefresh", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": t("Import erzwingen"), "default": False},
{"name": "dateFrom", "type": "string", "required": False, "frontendType": "date",
@@ -39,8 +52,7 @@ TRUSTEE_NODES = [
{"name": "sharepointFolder", "type": "string", "required": False, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": t("SharePoint-Ordnerpfad"), "default": ""},
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Trustee Feature-Instanz-ID")},
+ dict(_TRUSTEE_INSTANCE_PARAM),
{"name": "prompt", "type": "string", "required": False, "frontendType": "textarea",
"description": t("AI-Prompt für Extraktion"), "default": ""},
],
@@ -62,12 +74,11 @@ TRUSTEE_NODES = [
"description": t("TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen."),
"parameters": [
# Type matches what producers actually emit: ActionResult.documents
- # is `List[ActionDocument]` (see datamodelChat.ActionResult). The
+ # is List[ActionDocument] (see datamodelChat.ActionResult). The
# DataPicker uses this string to filter compatible upstream paths.
{"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
- "description": t("Dokumentenliste eines Upstream-Producers (z.B. trustee.extractFromFiles → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")},
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Trustee Feature-Instanz-ID")},
+ "description": t("Dokumentenliste — gebunden via DataRef.")},
+ dict(_TRUSTEE_INSTANCE_PARAM),
],
"inputs": 1,
"outputs": 1,
@@ -83,13 +94,9 @@ TRUSTEE_NODES = [
"label": t("In Buchhaltung synchronisieren"),
"description": t("Trustee-Positionen in Buchhaltungssystem übertragen."),
"parameters": [
- # Type matches what producers actually emit: ActionResult.documents
- # is `List[ActionDocument]` (see datamodelChat.ActionResult). The
- # DataPicker uses this string to filter compatible upstream paths.
{"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
- "description": t("Verarbeitete Dokumentenliste eines Upstream-Producers (z.B. trustee.processDocuments → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")},
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Trustee Feature-Instanz-ID")},
+ "description": t("Verarbeitete Dokumentenliste — gebunden via DataRef.")},
+ dict(_TRUSTEE_INSTANCE_PARAM),
],
"inputs": 1,
"outputs": 1,
@@ -105,8 +112,7 @@ TRUSTEE_NODES = [
"label": t("Treuhand-Daten abfragen"),
"description": t("Daten aus der Trustee-DB lesen (Lookup, Aggregation, Roh-Export). Pendant zu refreshAccountingData ohne externen Sync."),
"parameters": [
- {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
- "description": t("Trustee Feature-Instanz-ID")},
+ dict(_TRUSTEE_INSTANCE_PARAM),
{"name": "mode", "type": "string", "required": True, "frontendType": "select",
"frontendOptions": {"options": ["lookup", "raw", "aggregate"]},
"description": t("Abfragemodus"), "default": "lookup"},
diff --git a/modules/features/graphicalEditor/nodeRegistry.py b/modules/features/graphicalEditor/nodeRegistry.py
index dd302282..632e98fc 100644
--- a/modules/features/graphicalEditor/nodeRegistry.py
+++ b/modules/features/graphicalEditor/nodeRegistry.py
@@ -9,7 +9,7 @@ import logging
from typing import Dict, List, Any, Optional
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
-from modules.features.graphicalEditor.nodeAdapter import _bindsActionFromLegacy
+from modules.features.graphicalEditor.nodeAdapter import bindsActionFromLegacy
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES
from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText
@@ -50,7 +50,7 @@ def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
fields.
"""
lang = normalizePrimaryLanguageTag(language, "en")
- bindsAction = _bindsActionFromLegacy(node)
+ bindsAction = bindsActionFromLegacy(node)
out = dict(node)
for key in list(out.keys()):
if key.startswith("_"):
diff --git a/modules/features/graphicalEditor/portTypes.py b/modules/features/graphicalEditor/portTypes.py
index b607316a..e8d5b48d 100644
--- a/modules/features/graphicalEditor/portTypes.py
+++ b/modules/features/graphicalEditor/portTypes.py
@@ -610,7 +610,7 @@ SYSTEM_VARIABLES: Dict[str, Dict[str, str]] = {
}
-def _resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any:
+def resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any:
"""Resolve a system variable name to its runtime value."""
from datetime import datetime, timezone
@@ -642,7 +642,7 @@ def _resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any:
# Output normalizers
# ---------------------------------------------------------------------------
-def _normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]:
+def normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]:
"""
Normalize raw executor output to match the declared port schema.
Ensures _success/_error meta-fields are always present.
@@ -696,12 +696,12 @@ def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]:
# Transit helpers
# ---------------------------------------------------------------------------
-def _wrapTransit(data: Any, meta: Dict[str, Any]) -> Dict[str, Any]:
+def wrapTransit(data: Any, meta: Dict[str, Any]) -> Dict[str, Any]:
"""Wrap data in a Transit envelope."""
return {"_transit": True, "_meta": meta, "data": data}
-def _unwrapTransit(output: Any) -> Any:
+def unwrapTransit(output: Any) -> Any:
"""Unwrap a Transit envelope, returning the inner data."""
if isinstance(output, dict) and output.get("_transit"):
return output.get("data")
@@ -726,10 +726,10 @@ def _resolveTransitChain(
return out
sources = connectionMap.get(current, [])
if not sources:
- return _unwrapTransit(out)
+ return unwrapTransit(out)
srcId = sources[0][0] if sources else None
if not srcId:
- return _unwrapTransit(out)
+ return unwrapTransit(out)
current = srcId
return nodeOutputs.get(nodeId)
@@ -738,7 +738,7 @@ def _resolveTransitChain(
# Schema derivation for dynamic outputs
# ---------------------------------------------------------------------------
-def _derive_form_payload_schema_from_param(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]:
+def deriveFormPayloadSchemaFromParam(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]:
"""Derive output schema from a field-builder JSON list (``fields``, ``formFields``, …)."""
fields_param = (node.get("parameters") or {}).get(param_key)
if not fields_param or not isinstance(fields_param, list):
@@ -776,7 +776,7 @@ def _derive_form_payload_schema_from_param(node: Dict[str, Any], param_key: str)
def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
"""Derive output schema from form field definitions (``parameters.fields``)."""
- return _derive_form_payload_schema_from_param(node, "fields")
+ return deriveFormPayloadSchemaFromParam(node, "fields")
def parse_graph_defined_output_schema(
@@ -796,9 +796,9 @@ def parse_graph_defined_output_schema(
schema_spec = output_port.get("schema")
if isinstance(schema_spec, dict) and schema_spec.get("kind") == "fromGraph":
param_key = str(schema_spec.get("parameter") or "fields")
- return _derive_form_payload_schema_from_param(node, param_key)
+ return deriveFormPayloadSchemaFromParam(node, param_key)
if output_port.get("dynamic") and output_port.get("deriveFrom"):
- return _derive_form_payload_schema_from_param(node, str(output_port.get("deriveFrom")))
+ return deriveFormPayloadSchemaFromParam(node, str(output_port.get("deriveFrom")))
if isinstance(schema_spec, str) and schema_spec:
return PORT_TYPE_CATALOG.get(schema_spec)
return None
diff --git a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
index 4332df50..dc136395 100644
--- a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
+++ b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
@@ -14,7 +14,7 @@ from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPExceptio
from fastapi.responses import JSONResponse, StreamingResponse, Response
from modules.auth import limiter, getRequestContext, RequestContext
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
-from modules.routes.routeHelpers import _applyFiltersAndSort
+from modules.routes.routeHelpers import applyFiltersAndSort
from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices
from modules.features.graphicalEditor.nodeRegistry import getNodeTypesForApi
@@ -230,6 +230,65 @@ def get_user_connection_options(
return {"options": options}
+@router.get("/{instanceId}/options/feature.instance")
+@limiter.limit("60/minute")
+def get_feature_instance_options(
+ request: Request,
+ instanceId: str = Path(..., description="GraphicalEditor feature instance ID (workflow context)"),
+ featureCode: str = Query(..., description="Feature code to filter by (e.g. 'trustee', 'redmine', 'clickup')"),
+ enabledOnly: bool = Query(True, description="If true (default), only enabled feature instances are returned"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Return mandate-scoped FeatureInstances for the given featureCode.
+
+ Used by node parameters with frontendType='featureInstance' (e.g. Trustee
+ or Redmine nodes that need to bind to a specific tenant FeatureInstance).
+ Always restricted to the calling user's mandate (derived from the workflow
+ feature instance) so the picker never leaks foreign-mandate instances.
+
+ Response: { options: [ { value: "", label: " ([code])" } ] }
+ """
+ mandateId = _validateInstanceAccess(instanceId, context)
+ if not context.user:
+ raise HTTPException(status_code=401, detail=routeApiMsg("Authentication required"))
+ code = (featureCode or "").strip().lower()
+ if not code:
+ raise HTTPException(status_code=400, detail=routeApiMsg("featureCode query parameter is required"))
+ if not mandateId:
+ return {"options": []}
+
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ rootInterface = getRootInterface()
+ try:
+ instances = rootInterface.getFeatureInstancesByMandate(
+ mandateId, enabledOnly=bool(enabledOnly)
+ ) or []
+ except Exception as e:
+ logger.error(
+ "get_feature_instance_options: failed to load instances mandateId=%s: %s",
+ mandateId, e, exc_info=True,
+ )
+ return {"options": []}
+
+ options: List[Dict[str, str]] = []
+ for fi in instances:
+ fiCode = (getattr(fi, "featureCode", "") or "").strip().lower()
+ if fiCode != code:
+ continue
+ fiId = str(getattr(fi, "id", "") or "")
+ if not fiId:
+ continue
+ rawLabel = getattr(fi, "label", None) or getattr(fi, "name", None) or fiId
+ options.append({"value": fiId, "label": f"{rawLabel} ({fiCode})"})
+
+ logger.info(
+ "graphicalEditor feature.instance options: instanceId=%s mandateId=%s "
+ "featureCode=%s enabledOnly=%s -> %d options",
+ instanceId, mandateId, code, enabledOnly, len(options),
+ )
+ return {"options": options}
+
+
@router.post("/{instanceId}/execute")
@limiter.limit("30/minute")
async def post_execute(
@@ -474,6 +533,10 @@ def get_templates(
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
templates = iface.getTemplates(scope=scope)
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow
+ enrichRowsWithFkLabels(templates, AutoWorkflow)
+
paginationParams = None
if pagination:
try:
@@ -485,7 +548,7 @@ def get_templates(
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
- filtered = _applyFiltersAndSort(templates, paginationParams)
+ filtered = applyFiltersAndSort(templates, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
@@ -906,15 +969,15 @@ async def _runEditorAgent(
enrichedPrompt = prompt
if dataSourceIds:
- from modules.features.workspace.routeFeatureWorkspace import _buildDataSourceContext
+ from modules.features.workspace.routeFeatureWorkspace import buildDataSourceContext
chatSvc = getService("chat", ctx)
- dsInfo = _buildDataSourceContext(chatSvc, dataSourceIds)
+ dsInfo = buildDataSourceContext(chatSvc, dataSourceIds)
if dsInfo:
enrichedPrompt = f"{prompt}\n\n[Active Data Sources]\n{dsInfo}"
if featureDataSourceIds:
- from modules.features.workspace.routeFeatureWorkspace import _buildFeatureDataSourceContext
- fdsInfo = _buildFeatureDataSourceContext(featureDataSourceIds)
+ from modules.features.workspace.routeFeatureWorkspace import buildFeatureDataSourceContext
+ fdsInfo = buildFeatureDataSourceContext(featureDataSourceIds)
if fdsInfo:
enrichedPrompt = f"{enrichedPrompt}\n\n[Attached Feature Data Sources]\n{fdsInfo}"
@@ -1224,7 +1287,7 @@ def get_workflows(
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
- filtered = _applyFiltersAndSort(enriched, paginationParams)
+ filtered = applyFiltersAndSort(enriched, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
diff --git a/modules/features/redmine/serviceRedmine.py b/modules/features/redmine/serviceRedmine.py
index e244bd84..f0cfbfb4 100644
--- a/modules/features/redmine/serviceRedmine.py
+++ b/modules/features/redmine/serviceRedmine.py
@@ -48,7 +48,7 @@ from modules.features.redmine.interfaceFeatureRedmine import (
RedmineObjects,
getInterface,
)
-from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache
+from modules.features.redmine.serviceRedmineStatsCache import getStatsCache
logger = logging.getLogger(__name__)
@@ -334,7 +334,7 @@ def getTicket(
def _invalidateCache(featureInstanceId: str) -> None:
try:
- _getStatsCache().invalidateInstance(featureInstanceId)
+ getStatsCache().invalidateInstance(featureInstanceId)
except Exception as e:
logger.warning(f"Failed to invalidate stats cache for {featureInstanceId}: {e}")
diff --git a/modules/features/redmine/serviceRedmineStats.py b/modules/features/redmine/serviceRedmineStats.py
index 2cfed27c..33a83aa7 100644
--- a/modules/features/redmine/serviceRedmineStats.py
+++ b/modules/features/redmine/serviceRedmineStats.py
@@ -38,7 +38,7 @@ from modules.features.redmine.datamodelRedmine import (
RedmineThroughputBucket,
RedmineTicketDto,
)
-from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache
+from modules.features.redmine.serviceRedmineStatsCache import getStatsCache
logger = logging.getLogger(__name__)
@@ -69,7 +69,7 @@ async def getStats(
if status_norm not in {"*", "open", "closed"}:
status_norm = "*"
- cache = _getStatsCache()
+ cache = getStatsCache()
# Cache key now includes the new dimensions so different filter combos
# don't collide. ``_freeze`` (in the cache module) hashes lists/sets
# for us, so we can pass them directly as extra dimensions.
diff --git a/modules/features/redmine/serviceRedmineStatsCache.py b/modules/features/redmine/serviceRedmineStatsCache.py
index 46ad9372..12176178 100644
--- a/modules/features/redmine/serviceRedmineStatsCache.py
+++ b/modules/features/redmine/serviceRedmineStatsCache.py
@@ -123,7 +123,7 @@ class RedmineStatsCache:
_globalCache: Optional[RedmineStatsCache] = None
-def _getStatsCache() -> RedmineStatsCache:
+def getStatsCache() -> RedmineStatsCache:
"""Process-wide singleton."""
global _globalCache
if _globalCache is None:
diff --git a/modules/features/redmine/serviceRedmineSync.py b/modules/features/redmine/serviceRedmineSync.py
index 2c631630..2fd269d1 100644
--- a/modules/features/redmine/serviceRedmineSync.py
+++ b/modules/features/redmine/serviceRedmineSync.py
@@ -38,7 +38,7 @@ from modules.features.redmine.datamodelRedmine import (
RedmineTicketMirror,
)
from modules.features.redmine.interfaceFeatureRedmine import getInterface
-from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache
+from modules.features.redmine.serviceRedmineStatsCache import getStatsCache
logger = logging.getLogger(__name__)
@@ -134,7 +134,7 @@ async def runSync(
durationMs=duration_ms,
lastSyncAt=now_epoch,
)
- _getStatsCache().invalidateInstance(featureInstanceId)
+ getStatsCache().invalidateInstance(featureInstanceId)
return RedmineSyncResultDto(
instanceId=featureInstanceId,
@@ -188,7 +188,7 @@ async def upsertSingleTicket(
now_epoch = time.time()
_upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch)
relations_upserted = _replaceRelations(iface, featureInstanceId, issue, now_epoch)
- _getStatsCache().invalidateInstance(featureInstanceId)
+ getStatsCache().invalidateInstance(featureInstanceId)
return relations_upserted
@@ -202,7 +202,7 @@ def deleteMirroredTicket(
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
deleted = iface.deleteMirroredTicket(featureInstanceId, int(issueId))
iface.deleteMirroredRelationsForIssue(featureInstanceId, int(issueId))
- _getStatsCache().invalidateInstance(featureInstanceId)
+ getStatsCache().invalidateInstance(featureInstanceId)
return deleted
diff --git a/modules/features/teamsbot/routeFeatureTeamsbot.py b/modules/features/teamsbot/routeFeatureTeamsbot.py
index 37cb2d77..3368f9fc 100644
--- a/modules/features/teamsbot/routeFeatureTeamsbot.py
+++ b/modules/features/teamsbot/routeFeatureTeamsbot.py
@@ -383,7 +383,7 @@ async def streamSession(
async def _eventGenerator():
"""Generate SSE events from the session event queue."""
- from .service import _sessionEvents
+ from .service import sessionEvents
# Send initial session state
yield f"data: {json.dumps({'type': 'sessionState', 'data': session})}\n\n"
@@ -394,10 +394,10 @@ async def streamSession(
yield f"data: {json.dumps({'type': 'botConnectionState', 'data': {'connected': _getActiveService(sessionId) is not None}})}\n\n"
# Stream events
- eventQueue = _sessionEvents.get(sessionId)
+ eventQueue = sessionEvents.get(sessionId)
if not eventQueue:
- _sessionEvents[sessionId] = asyncio.Queue()
- eventQueue = _sessionEvents[sessionId]
+ sessionEvents[sessionId] = asyncio.Queue()
+ eventQueue = sessionEvents[sessionId]
try:
while True:
@@ -810,8 +810,8 @@ async def deleteUserAccount(
# MFA Code Submission (relayed to active bot session)
# =========================================================================
-_mfaCodeQueues: dict = {}
-_mfaWaitTasks: dict = {}
+mfaCodeQueues: dict = {}
+mfaWaitTasks: dict = {}
@router.post("/{instanceId}/sessions/{sessionId}/mfa")
@limiter.limit("10/minute")
@@ -834,7 +834,7 @@ async def submitMfaCode(
logger.info(f"MFA submission for session {sessionId}: action={mfaAction}, codeLen={len(mfaCode)}")
- queue = _mfaCodeQueues.get(sessionId)
+ queue = mfaCodeQueues.get(sessionId)
if queue:
await queue.put({"action": mfaAction, "code": mfaCode})
return {"submitted": True}
@@ -981,7 +981,7 @@ async def testVoice(
):
"""Test TTS voice with AI-generated sample text in the correct language."""
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- from .service import _createAiService
+ from .service import createAiService
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
mandateId = _validateInstanceAccess(instanceId, context)
@@ -992,7 +992,7 @@ async def testVoice(
botName = body.get("botName", "AI Assistant")
try:
- aiService = _createAiService(context.user, mandateId, instanceId)
+ aiService = createAiService(context.user, mandateId, instanceId)
await aiService.ensureAiObjectsInitialized()
aiRequest = AiCallRequest(
diff --git a/modules/features/teamsbot/service.py b/modules/features/teamsbot/service.py
index 2067a7f2..6d9df074 100644
--- a/modules/features/teamsbot/service.py
+++ b/modules/features/teamsbot/service.py
@@ -532,7 +532,7 @@ def getActiveService(sessionId: str) -> Optional["TeamsbotService"]:
# AI Service Factory (for billing-aware AI calls)
# =========================================================================
-def _createAiService(user, mandateId, featureInstanceId=None):
+def createAiService(user, mandateId, featureInstanceId=None):
"""Create a properly wired AiService via the service center."""
ctx = ServiceCenterContext(
user=user,
@@ -546,15 +546,15 @@ def _createAiService(user, mandateId, featureInstanceId=None):
# =========================================================================
# Session Event Queues (for SSE streaming to frontend)
# =========================================================================
-_sessionEvents: Dict[str, asyncio.Queue] = {}
+sessionEvents: Dict[str, asyncio.Queue] = {}
async def _emitSessionEvent(sessionId: str, eventType: str, data: Any):
"""Emit an event to the session's SSE stream.
Creates the queue on-demand so events are never silently dropped."""
- if sessionId not in _sessionEvents:
- _sessionEvents[sessionId] = asyncio.Queue()
- await _sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getIsoTimestamp()})
+ if sessionId not in sessionEvents:
+ sessionEvents[sessionId] = asyncio.Queue()
+ await sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getIsoTimestamp()})
def _normalizeGatewayHostForBotWs(host: str) -> str:
@@ -709,7 +709,7 @@ class TeamsbotService:
interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId)
# Initialize SSE event queue
- _sessionEvents[sessionId] = asyncio.Queue()
+ sessionEvents[sessionId] = asyncio.Queue()
try:
# Update status to JOINING
@@ -798,7 +798,7 @@ class TeamsbotService:
})
# Cleanup event queue
- _sessionEvents.pop(sessionId, None)
+ sessionEvents.pop(sessionId, None)
# =========================================================================
# Browser Bot WebSocket Communication
@@ -1048,9 +1048,9 @@ class TeamsbotService:
"timestamp": getIsoTimestamp(),
})
- from .routeFeatureTeamsbot import _mfaCodeQueues, _mfaWaitTasks
+ from .routeFeatureTeamsbot import mfaCodeQueues, mfaWaitTasks
mfaQueue = asyncio.Queue()
- _mfaCodeQueues[sessionId] = mfaQueue
+ mfaCodeQueues[sessionId] = mfaQueue
async def _waitAndForwardMfa(sid, queue, ws):
try:
@@ -1075,10 +1075,10 @@ class TeamsbotService:
except asyncio.CancelledError:
logger.info(f"[WS] MFA wait cancelled for session {sid} (resolved via page)")
finally:
- _mfaCodeQueues.pop(sid, None)
- _mfaWaitTasks.pop(sid, None)
+ mfaCodeQueues.pop(sid, None)
+ mfaWaitTasks.pop(sid, None)
- _mfaWaitTasks[sessionId] = asyncio.create_task(
+ mfaWaitTasks[sessionId] = asyncio.create_task(
_waitAndForwardMfa(sessionId, mfaQueue, websocket)
)
@@ -1100,11 +1100,11 @@ class TeamsbotService:
elif msgType == "mfaResolved":
success = message.get("success", False)
logger.info(f"[WS] MFA resolved: success={success}")
- from .routeFeatureTeamsbot import _mfaCodeQueues, _mfaWaitTasks
- task = _mfaWaitTasks.pop(sessionId, None)
+ from .routeFeatureTeamsbot import mfaCodeQueues, mfaWaitTasks
+ task = mfaWaitTasks.pop(sessionId, None)
if task and not task.done():
task.cancel()
- _mfaCodeQueues.pop(sessionId, None)
+ mfaCodeQueues.pop(sessionId, None)
await _emitSessionEvent(sessionId, "mfaResolved", {
"success": success,
"timestamp": getIsoTimestamp(),
@@ -1844,7 +1844,7 @@ class TeamsbotService:
)
try:
- aiService = _createAiService(
+ aiService = createAiService(
self.currentUser, self.mandateId, self.instanceId
)
await aiService.ensureAiObjectsInitialized()
@@ -1976,7 +1976,7 @@ class TeamsbotService:
)
try:
- aiService = _createAiService(
+ aiService = createAiService(
self.currentUser, self.mandateId, self.instanceId
)
await aiService.ensureAiObjectsInitialized()
@@ -2195,7 +2195,7 @@ class TeamsbotService:
# Call SPEECH_TEAMS
try:
- aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId)
+ aiService = createAiService(self.currentUser, self.mandateId, self.instanceId)
await aiService.ensureAiObjectsInitialized()
request = AiCallRequest(
@@ -3767,7 +3767,7 @@ class TeamsbotService:
)
try:
- aiService = _createAiService(
+ aiService = createAiService(
self.currentUser, self.mandateId, self.instanceId
)
await aiService.ensureAiObjectsInitialized()
@@ -3930,7 +3930,7 @@ class TeamsbotService:
"""Summarize a long user-provided session context to its essential points.
This reduces token usage in every subsequent AI call."""
try:
- aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId)
+ aiService = createAiService(self.currentUser, self.mandateId, self.instanceId)
await aiService.ensureAiObjectsInitialized()
request = AiCallRequest(
@@ -3980,7 +3980,7 @@ class TeamsbotService:
lines.append(f"[{speaker}]: {text}")
textToSummarize = "\n".join(lines)
- aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId)
+ aiService = createAiService(self.currentUser, self.mandateId, self.instanceId)
await aiService.ensureAiObjectsInitialized()
request = AiCallRequest(
@@ -4021,7 +4021,7 @@ class TeamsbotService:
for t in transcripts
)
- aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId)
+ aiService = createAiService(self.currentUser, self.mandateId, self.instanceId)
await aiService.ensureAiObjectsInitialized()
request = AiCallRequest(
diff --git a/modules/features/trustee/accounting/accountingBridge.py b/modules/features/trustee/accounting/accountingBridge.py
index b91cd83e..2a267b73 100644
--- a/modules/features/trustee/accounting/accountingBridge.py
+++ b/modules/features/trustee/accounting/accountingBridge.py
@@ -16,7 +16,7 @@ from .accountingConnectorBase import (
AccountingChart,
SyncResult,
)
-from .accountingRegistry import _getAccountingRegistry
+from .accountingRegistry import getAccountingRegistry
logger = logging.getLogger(__name__)
@@ -26,7 +26,7 @@ class AccountingBridge:
def __init__(self, trusteeInterface):
self._trusteeInterface = trusteeInterface
- self._registry = _getAccountingRegistry()
+ self._registry = getAccountingRegistry()
async def getActiveConfig(self, featureInstanceId: str) -> Optional[Dict[str, Any]]:
"""Load the active TrusteeAccountingConfig for a feature instance."""
diff --git a/modules/features/trustee/accounting/accountingConnectorBase.py b/modules/features/trustee/accounting/accountingConnectorBase.py
index c5124184..5d76c997 100644
--- a/modules/features/trustee/accounting/accountingConnectorBase.py
+++ b/modules/features/trustee/accounting/accountingConnectorBase.py
@@ -39,6 +39,26 @@ class AccountingChart(BaseModel):
accountType: Optional[str] = None
+class AccountingPeriodBalance(BaseModel):
+ """Balance snapshot for one account in one period.
+
+ Mirrors the `TrusteeDataAccountBalance` table 1:1 so
+ `accountingDataSync._persistBalances` can persist connector output without
+ re-mapping. `closingBalance` is always the *cumulative* balance at the end
+ of the period (NOT the period's net movement). `periodMonth=0` denotes the
+ annual bucket (closing balance per fiscal year-end).
+ """
+ accountNumber: str
+ periodYear: int
+ periodMonth: int = 0
+ openingBalance: float = 0.0
+ debitTotal: float = 0.0
+ creditTotal: float = 0.0
+ closingBalance: float = 0.0
+ currency: str = "CHF"
+ asOfDate: Optional[str] = None
+
+
class SyncResult(BaseModel):
"""Result of a sync operation."""
success: bool
@@ -126,6 +146,31 @@ class BaseAccountingConnector(ABC):
accountNumbers: pre-fetched account numbers (avoids redundant API call). Override in connectors that support it."""
return []
+ async def getAccountBalances(
+ self,
+ config: Dict[str, Any],
+ years: List[int],
+ accountNumbers: Optional[List[str]] = None,
+ ) -> List[AccountingPeriodBalance]:
+ """Read closing balances per account and period from the external system.
+
+ Contract:
+ - One row per (accountNumber, periodYear, periodMonth).
+ - `periodMonth=0` => annual bucket (closing balance per fiscal year-end).
+ - `periodMonth=1..12` => closing balance per end of that calendar month.
+ - `closingBalance` MUST be the *cumulative* balance at period end,
+ including all prior-year carry-over and yearend bookings -- NOT the
+ period's net movement.
+ - `openingBalance` MUST be the cumulative balance at period start
+ (= previous period's closingBalance).
+
+ Default returns []; `AccountingDataSync` will then fall back to a
+ local cumulative aggregation from journal lines. Override in
+ connectors that can fetch authoritative balances from the source
+ system (e.g. RMA `/gl/saldo`).
+ """
+ return []
+
async def uploadDocument(
self,
config: Dict[str, Any],
diff --git a/modules/features/trustee/accounting/accountingDataSync.py b/modules/features/trustee/accounting/accountingDataSync.py
index ef8789ea..0770ead5 100644
--- a/modules/features/trustee/accounting/accountingDataSync.py
+++ b/modules/features/trustee/accounting/accountingDataSync.py
@@ -25,7 +25,7 @@ from pathlib import Path
from typing import Callable, Dict, Any, List, Optional, Type
from .accountingConnectorBase import BaseAccountingConnector
-from .accountingRegistry import _getAccountingRegistry
+from .accountingRegistry import getAccountingRegistry
logger = logging.getLogger(__name__)
@@ -33,6 +33,72 @@ logger = logging.getLogger(__name__)
_HEARTBEAT_EVERY = 500
+def _isIncomeStatementAccount(accountNumber: str) -> bool:
+ """Swiss KMU-Kontenrahmen heuristic: 1xxx + 2xxx -> balance sheet
+ (cumulative carry-over across years); 3xxx..9xxx -> income statement
+ (reset to 0 at fiscal-year start). Used by the local fallback only;
+ when a connector returns balances, those values are used verbatim.
+ """
+ a = (accountNumber or "").strip()
+ if not a or not a[0].isdigit():
+ return False
+ return a[0] not in ("1", "2")
+
+
+def _resolveBalanceYears(
+ dateFrom: Optional[str],
+ dateTo: Optional[str],
+ oldestBookingDate: Optional[str],
+ newestBookingDate: Optional[str],
+) -> List[int]:
+ """Derive the list of years for which the connector should compute balances.
+
+ Prefers the ``dateFrom``/``dateTo`` import window the user requested. Falls
+ back to the actual oldest/newest booking date observed in the imported
+ journal (so e.g. a `dateTo=None` import still produces balances for every
+ year that has data). If nothing is known, returns the current year as a
+ sensible default.
+ """
+ def _yearOf(s: Optional[str]) -> Optional[int]:
+ if not s:
+ return None
+ try:
+ return int(str(s)[:4])
+ except (TypeError, ValueError):
+ return None
+
+ fromYear = _yearOf(dateFrom) or _yearOf(oldestBookingDate)
+ toYear = _yearOf(dateTo) or _yearOf(newestBookingDate)
+ if fromYear is None and toYear is None:
+ return [time.gmtime().tm_year]
+ if fromYear is None:
+ fromYear = toYear
+ if toYear is None:
+ toYear = fromYear
+ if toYear < fromYear:
+ fromYear, toYear = toYear, fromYear
+ return list(range(fromYear, toYear + 1))
+
+
+def _balanceModelToRow(b: Any, scope: Dict[str, Any]) -> Dict[str, Any]:
+ """Map an ``AccountingPeriodBalance`` (or compatible dict) to a DB row."""
+ if isinstance(b, dict):
+ get = b.get
+ else:
+ get = lambda k, default=None: getattr(b, k, default)
+ return {
+ "accountNumber": str(get("accountNumber", "") or ""),
+ "periodYear": int(get("periodYear", 0) or 0),
+ "periodMonth": int(get("periodMonth", 0) or 0),
+ "openingBalance": round(float(get("openingBalance", 0) or 0), 2),
+ "debitTotal": round(float(get("debitTotal", 0) or 0), 2),
+ "creditTotal": round(float(get("creditTotal", 0) or 0), 2),
+ "closingBalance": round(float(get("closingBalance", 0) or 0), 2),
+ "currency": str(get("currency", "CHF") or "CHF"),
+ **scope,
+ }
+
+
def _isDebugDumpEnabled() -> bool:
"""Whether to write raw connector payloads to disk for offline inspection.
@@ -101,7 +167,7 @@ class AccountingDataSync:
def __init__(self, trusteeInterface):
self._if = trusteeInterface
- self._registry = _getAccountingRegistry()
+ self._registry = getAccountingRegistry()
async def importData(
self,
@@ -246,18 +312,39 @@ class AccountingDataSync:
logger.error(f"Import contacts failed: {e}", exc_info=True)
summary["errors"].append(f"Contacts: {e}")
- # ---- Phase 4: Compute account balances ----
- # Progress budget: 90-95 %. Pure DB aggregation, no external calls.
+ # ---- Phase 4: Account balances ----
+ # Progress budget: 88-95 %. Connector first (RMA -> /gl/saldo, Bexio
+ # & Abacus -> aggregated journal). On empty/failed connector output
+ # we fall back to a *correct* cumulative aggregation from the
+ # journal lines we just persisted.
+ connectorBalances: list = []
+ balanceSource = "local-fallback"
try:
- _progress(90, "Berechne Kontensaldi...")
+ _progress(88, "Lade Kontensaldi vom Buchhaltungssystem...")
+ balanceYears = _resolveBalanceYears(dateFrom, dateTo, summary.get("oldestBookingDate"), summary.get("newestBookingDate"))
+ connectorBalances = await connector.getAccountBalances(
+ connConfig,
+ years=balanceYears,
+ accountNumbers=fetchedAccountNumbers or None,
+ )
+ _dumpSyncData("accountBalances", connectorBalances)
+ if connectorBalances:
+ balanceSource = "connector"
+ except Exception as e:
+ logger.warning(f"Connector getAccountBalances failed, will use local fallback: {e}", exc_info=True)
+ summary["errors"].append(f"Balances connector: {e}")
+
+ try:
+ _progress(92, "Speichere Kontensaldi...")
balanceCount = await asyncio.to_thread(
self._persistBalances, featureInstanceId, mandateId,
TrusteeDataJournalEntry, TrusteeDataJournalLine, TrusteeDataAccountBalance,
+ connectorBalances, balanceSource,
)
summary["accountBalances"] = balanceCount
- _progress(95, f"{balanceCount} Saldi berechnet.")
+ _progress(95, f"{balanceCount} Saldi gespeichert (source={balanceSource}).")
except Exception as e:
- logger.error(f"Compute balances failed: {e}", exc_info=True)
+ logger.error(f"Persist balances failed: {e}", exc_info=True)
summary["errors"].append(f"Balances: {e}")
cfgId = cfgRecord.get("id")
@@ -401,12 +488,66 @@ class AccountingDataSync:
logger.info(f"Persisted {n} contacts for {featureInstanceId} in {time.time() - t0:.1f}s")
return n
- def _persistBalances(self, featureInstanceId: str, mandateId: str,
- modelEntry: Type, modelLine: Type, modelBalance: Type) -> int:
- """Re-aggregate journal lines into monthly + annual balances."""
+ def _persistBalances(
+ self,
+ featureInstanceId: str,
+ mandateId: str,
+ modelEntry: Type,
+ modelLine: Type,
+ modelBalance: Type,
+ connectorBalances: list,
+ source: str,
+ ) -> int:
+ """Persist account balances per (account, period) into ``TrusteeDataAccountBalance``.
+
+ Source of truth (``source="connector"``): the list returned by
+ ``BaseAccountingConnector.getAccountBalances`` is persisted 1:1.
+
+ Fallback (``source="local-fallback"``): aggregate the just-persisted
+ journal lines into **cumulative** balances. Unlike the previous
+ implementation, this version (a) carries the cumulative balance
+ forward across months/years for balance-sheet accounts, (b) resets
+ income-statement accounts at fiscal-year start, and (c) computes
+ ``openingBalance`` correctly as the previous period's
+ ``closingBalance``. ``openingBalance`` of the very first imported
+ period stays at 0 (no prior data available -- by design; see plan
+ document for rationale).
+ """
t0 = time.time()
self._bulkClear(modelBalance, featureInstanceId)
+ scope = {"featureInstanceId": featureInstanceId, "mandateId": mandateId}
+ if connectorBalances:
+ rows = [_balanceModelToRow(b, scope) for b in connectorBalances]
+ n = self._bulkCreate(modelBalance, rows)
+ logger.info(
+ f"Persisted {n} balances for {featureInstanceId} in {time.time() - t0:.1f}s "
+ f"(source={source})"
+ )
+ return n
+
+ rows = self._buildLocalBalanceFallback(featureInstanceId, modelEntry, modelLine, scope)
+ n = self._bulkCreate(modelBalance, rows)
+ logger.info(
+ f"Persisted {n} balances for {featureInstanceId} in {time.time() - t0:.1f}s "
+ f"(source={source})"
+ )
+ return n
+
+ def _buildLocalBalanceFallback(
+ self,
+ featureInstanceId: str,
+ modelEntry: Type,
+ modelLine: Type,
+ scope: Dict[str, Any],
+ ) -> List[Dict[str, Any]]:
+ """Aggregate ``TrusteeDataJournalLine`` rows into cumulative period balances.
+
+ Returns rows ready for ``_bulkCreate``. Walks every account
+ chronologically through all years observed in the journal so the
+ cumulative balance and per-period opening are exact (within the
+ bounds of the imported window).
+ """
entries = self._if.db.getRecordset(
modelEntry, recordFilter={"featureInstanceId": featureInstanceId},
) or []
@@ -421,7 +562,9 @@ class AccountingDataSync:
modelLine, recordFilter={"featureInstanceId": featureInstanceId},
) or []
- buckets: Dict[tuple, Dict[str, float]] = defaultdict(lambda: {"debit": 0.0, "credit": 0.0})
+ movements: Dict[tuple, Dict[str, float]] = defaultdict(lambda: {"debit": 0.0, "credit": 0.0})
+ observedYears: set = set()
+ observedAccounts: set = set()
for ln in lines:
if isinstance(ln, dict):
jeid = ln.get("journalEntryId", "")
@@ -437,7 +580,7 @@ class AccountingDataSync:
bdate = entryDates.get(jeid, "")
if not accNo or not bdate:
continue
- parts = bdate.split("-")
+ parts = str(bdate).split("-")
if len(parts) < 2:
continue
try:
@@ -445,29 +588,56 @@ class AccountingDataSync:
month = int(parts[1])
except ValueError:
continue
+ movements[(accNo, year, month)]["debit"] += debit
+ movements[(accNo, year, month)]["credit"] += credit
+ observedYears.add(year)
+ observedAccounts.add(accNo)
- buckets[(accNo, year, month)]["debit"] += debit
- buckets[(accNo, year, month)]["credit"] += credit
- buckets[(accNo, year, 0)]["debit"] += debit
- buckets[(accNo, year, 0)]["credit"] += credit
+ if not observedYears or not observedAccounts:
+ return []
- scope = {"featureInstanceId": featureInstanceId, "mandateId": mandateId}
- rows = [{
- "accountNumber": accNo,
- "periodYear": year,
- "periodMonth": month,
- "openingBalance": 0.0,
- "debitTotal": round(totals["debit"], 2),
- "creditTotal": round(totals["credit"], 2),
- "closingBalance": round(totals["debit"] - totals["credit"], 2),
- "currency": "CHF",
- **scope,
- } for (accNo, year, month), totals in buckets.items()]
- n = self._bulkCreate(modelBalance, rows)
- logger.info(
- f"Persisted {n} balances for {featureInstanceId} in {time.time() - t0:.1f}s"
- )
- return n
+ sortedYears = sorted(observedYears)
+ rows: List[Dict[str, Any]] = []
+ for accNo in sorted(observedAccounts):
+ isER = _isIncomeStatementAccount(accNo)
+ cumulativeOpeningOfYear = 0.0
+ for year in sortedYears:
+ yearOpening = 0.0 if isER else cumulativeOpeningOfYear
+ running = yearOpening
+ yearDebit = 0.0
+ yearCredit = 0.0
+ for month in range(1, 13):
+ opening = running
+ mov = movements.get((accNo, year, month), {"debit": 0.0, "credit": 0.0})
+ running = opening + mov["debit"] - mov["credit"]
+ yearDebit += mov["debit"]
+ yearCredit += mov["credit"]
+ if mov["debit"] == 0 and mov["credit"] == 0 and opening == 0 and running == 0:
+ continue
+ rows.append({
+ "accountNumber": accNo,
+ "periodYear": year,
+ "periodMonth": month,
+ "openingBalance": round(opening, 2),
+ "debitTotal": round(mov["debit"], 2),
+ "creditTotal": round(mov["credit"], 2),
+ "closingBalance": round(running, 2),
+ "currency": "CHF",
+ **scope,
+ })
+ rows.append({
+ "accountNumber": accNo,
+ "periodYear": year,
+ "periodMonth": 0,
+ "openingBalance": round(yearOpening, 2),
+ "debitTotal": round(yearDebit, 2),
+ "creditTotal": round(yearCredit, 2),
+ "closingBalance": round(running, 2),
+ "currency": "CHF",
+ **scope,
+ })
+ cumulativeOpeningOfYear = running
+ return rows
# ===== Low-level bulk helpers =====
diff --git a/modules/features/trustee/accounting/accountingRegistry.py b/modules/features/trustee/accounting/accountingRegistry.py
index ca5e27d9..fe1b20d5 100644
--- a/modules/features/trustee/accounting/accountingRegistry.py
+++ b/modules/features/trustee/accounting/accountingRegistry.py
@@ -74,7 +74,7 @@ class AccountingRegistry:
_registryInstance: Optional[AccountingRegistry] = None
-def _getAccountingRegistry() -> AccountingRegistry:
+def getAccountingRegistry() -> AccountingRegistry:
"""Singleton access to the accounting registry."""
global _registryInstance
if _registryInstance is None:
diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py b/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py
index 0269a654..e03e7df7 100644
--- a/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py
+++ b/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py
@@ -6,12 +6,22 @@ API docs: https://downloads.abacus.ch/fileadmin/ablage/abaconnect/htmlfiles/docs
Auth: OAuth 2.0 Client Credentials (Service User).
Each Abacus instance has its own host URL; there is no central cloud endpoint.
Entity API uses OData V4 format.
+
+Account balances:
+ Abacus exposes an ``AccountBalances`` entity (per fiscal year), but its
+ availability depends on the customer's Abacus license / Profile and is
+ NOT guaranteed for all instances. The robust default is therefore to
+ aggregate balances locally from ``GeneralJournalEntries`` (always
+ present). If a future iteration confirms the entity for a specific
+ instance, ``getAccountBalances`` can be extended to prefer that source
+ via a config flag (e.g. ``useAccountBalancesEntity: true``).
"""
import base64
+import calendar
import logging
import time
-from typing import List, Dict, Any, Optional
+from typing import List, Dict, Any, Optional, Tuple
import aiohttp
@@ -19,6 +29,7 @@ from ..accountingConnectorBase import (
BaseAccountingConnector,
AccountingBooking,
AccountingChart,
+ AccountingPeriodBalance,
ConnectorConfigField,
SyncResult,
)
@@ -27,6 +38,21 @@ from modules.shared.i18nRegistry import t
logger = logging.getLogger(__name__)
+def _formatLastDayOfMonth(year: int, month: int) -> str:
+ lastDay = calendar.monthrange(year, month)[1]
+ return f"{year:04d}-{month:02d}-{lastDay:02d}"
+
+
+def _isIncomeStatementAccount(accountNumber: str) -> bool:
+ """Swiss KMU-Kontenrahmen heuristic: 1xxx + 2xxx -> balance sheet (cumulative);
+ 3xxx..9xxx -> income statement (reset per fiscal year).
+ """
+ a = (accountNumber or "").strip()
+ if not a or not a[0].isdigit():
+ return False
+ return a[0] not in ("1", "2")
+
+
class AccountingConnectorAbacus(BaseAccountingConnector):
def __init__(self):
@@ -341,3 +367,158 @@ class AccountingConnectorAbacus(BaseAccountingConnector):
except Exception as e:
logger.error(f"Abacus getVendors error: {e}")
return []
+
+ async def getAccountBalances(
+ self,
+ config: Dict[str, Any],
+ years: List[int],
+ accountNumbers: Optional[List[str]] = None,
+ ) -> List[AccountingPeriodBalance]:
+ """Aggregate account balances from ``GeneralJournalEntries`` (OData V4).
+
+ Strategy:
+ 1. Page through ``GET GeneralJournalEntries?$filter=JournalDate le YYYY-12-31``
+ until ``@odata.nextLink`` is exhausted. Including ALL prior years
+ is required to compute the carry-over for balance-sheet accounts.
+ 2. Per (account, year, month) accumulate ``DebitAmount``/``CreditAmount``
+ from ``Lines``.
+ 3. Income-statement accounts (3xxx-9xxx) reset to 0 per fiscal year;
+ balance-sheet accounts (1xxx-2xxx) carry their cumulative balance.
+
+ Optional optimization (not yet active): if the customer's Abacus
+ instance ships the ``AccountBalances`` OData entity, it can return
+ authoritative period balances directly. Detect via a probe GET on
+ ``AccountBalances?$top=1`` and prefer that source. This is intentionally
+ deferred until we hit a customer where the entity is available --
+ the local aggregation is always-correct fallback.
+ """
+ if not years:
+ return []
+ sortedYears = sorted({int(y) for y in years if y})
+ minYear = sortedYears[0]
+ maxYear = sortedYears[-1]
+ accountNumbersSet = set(accountNumbers) if accountNumbers else None
+
+ headers = await self._buildAuthHeaders(config)
+ if not headers:
+ logger.warning("Abacus getAccountBalances: no access token, skipping")
+ return []
+
+ rawEntries = await self._fetchAllJournalEntries(config, headers, dateTo=f"{maxYear}-12-31")
+
+ movements: Dict[Tuple[str, int, int], Dict[str, float]] = {}
+ seenAccounts: set = set()
+ for entry in rawEntries:
+ dateRaw = str(entry.get("JournalDate") or "")[:10]
+ if len(dateRaw) < 7:
+ continue
+ try:
+ year = int(dateRaw[:4])
+ month = int(dateRaw[5:7])
+ except ValueError:
+ continue
+ for line in (entry.get("Lines") or []):
+ accNo = str(line.get("AccountId") or "").strip()
+ if not accNo:
+ continue
+ seenAccounts.add(accNo)
+ try:
+ debit = float(line.get("DebitAmount") or 0)
+ credit = float(line.get("CreditAmount") or 0)
+ except (TypeError, ValueError):
+ continue
+ if debit == 0 and credit == 0:
+ continue
+ bucket = movements.setdefault((accNo, year, month), {"debit": 0.0, "credit": 0.0})
+ bucket["debit"] += debit
+ bucket["credit"] += credit
+
+ results: List[AccountingPeriodBalance] = []
+ for accNo in sorted(seenAccounts):
+ if accountNumbersSet is not None and accNo not in accountNumbersSet:
+ continue
+ isER = _isIncomeStatementAccount(accNo)
+
+ preMinYearBalance = 0.0
+ if not isER:
+ for (a, yr, _mo), m in movements.items():
+ if a == accNo and yr < minYear:
+ preMinYearBalance += m["debit"] - m["credit"]
+
+ cumulativeOpeningOfYear = preMinYearBalance
+ for year in sortedYears:
+ yearOpening = 0.0 if isER else cumulativeOpeningOfYear
+ running = yearOpening
+ yearDebit = 0.0
+ yearCredit = 0.0
+ for month in range(1, 13):
+ opening = running
+ mov = movements.get((accNo, year, month), {"debit": 0.0, "credit": 0.0})
+ running = opening + mov["debit"] - mov["credit"]
+ yearDebit += mov["debit"]
+ yearCredit += mov["credit"]
+ results.append(AccountingPeriodBalance(
+ accountNumber=accNo,
+ periodYear=year,
+ periodMonth=month,
+ openingBalance=round(opening, 2),
+ debitTotal=round(mov["debit"], 2),
+ creditTotal=round(mov["credit"], 2),
+ closingBalance=round(running, 2),
+ currency="CHF",
+ asOfDate=_formatLastDayOfMonth(year, month),
+ ))
+
+ results.append(AccountingPeriodBalance(
+ accountNumber=accNo,
+ periodYear=year,
+ periodMonth=0,
+ openingBalance=round(yearOpening, 2),
+ debitTotal=round(yearDebit, 2),
+ creditTotal=round(yearCredit, 2),
+ closingBalance=round(running, 2),
+ currency="CHF",
+ asOfDate=f"{year}-12-31",
+ ))
+
+ cumulativeOpeningOfYear = running
+
+ logger.info(
+ "Abacus getAccountBalances: %s rows from %s journal entries (years=%s)",
+ len(results), len(rawEntries), sortedYears,
+ )
+ return results
+
+ async def _fetchAllJournalEntries(
+ self,
+ config: Dict[str, Any],
+ headers: Dict[str, str],
+ dateTo: str,
+ ) -> List[Dict[str, Any]]:
+ """Page through ``GeneralJournalEntries`` (OData V4) following ``@odata.nextLink``.
+
+ We filter ``JournalDate le dateTo`` to bound the result, but include
+ ALL prior years (no lower bound) so cumulative balance-sheet
+ carry-over is correct.
+ """
+ results: List[Dict[str, Any]] = []
+ baseUrl = self._buildEntityUrl(config, f"GeneralJournalEntries?$filter=JournalDate le {dateTo}")
+ nextUrl: Optional[str] = baseUrl
+ async with aiohttp.ClientSession() as session:
+ while nextUrl:
+ try:
+ async with session.get(nextUrl, headers=headers, timeout=aiohttp.ClientTimeout(total=60)) as resp:
+ if resp.status != 200:
+ body = await resp.text()
+ logger.warning("Abacus GeneralJournalEntries HTTP %s: %s", resp.status, body[:200])
+ break
+ data = await resp.json()
+ except Exception as ex:
+ logger.warning("Abacus GeneralJournalEntries request failed: %s", ex)
+ break
+ page = data.get("value") or []
+ if not isinstance(page, list):
+ break
+ results.extend(page)
+ nextUrl = data.get("@odata.nextLink")
+ return results
diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py b/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py
index dcb3233d..28c2a334 100644
--- a/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py
+++ b/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py
@@ -7,10 +7,20 @@ Auth: Personal Access Token (PAT) as Bearer token.
Base URL: https://api.bexio.com/
Note: Bexio uses internal account IDs (int), not account numbers.
The connector caches the chart of accounts to resolve accountNumber -> account_id.
+
+Account balances:
+ Bexio does NOT expose a dedicated saldo endpoint (no equivalent to RMA's
+ ``/gl/saldo``). ``getAccountBalances`` therefore aggregates balances
+ locally by paginating ``GET /3.0/accounting/journal`` (max 2000 rows per
+ page) and computing cumulative balances per (account, period). Income-
+ statement accounts (3xxx-9xxx in the Swiss KMU-Kontenrahmen) are reset
+ at the start of each fiscal year; balance-sheet accounts (1xxx-2xxx)
+ carry their cumulative balance across years.
"""
+import calendar
import logging
-from typing import List, Dict, Any, Optional
+from typing import List, Dict, Any, Optional, Tuple
import aiohttp
@@ -18,6 +28,7 @@ from ..accountingConnectorBase import (
BaseAccountingConnector,
AccountingBooking,
AccountingChart,
+ AccountingPeriodBalance,
ConnectorConfigField,
SyncResult,
)
@@ -26,6 +37,23 @@ from modules.shared.i18nRegistry import t
logger = logging.getLogger(__name__)
_DEFAULT_API_BASE_URL = "https://api.bexio.com/"
+_JOURNAL_PAGE_SIZE = 2000
+
+
+def _formatLastDayOfMonth(year: int, month: int) -> str:
+ lastDay = calendar.monthrange(year, month)[1]
+ return f"{year:04d}-{month:02d}-{lastDay:02d}"
+
+
+def _isIncomeStatementAccount(accountNumber: str) -> bool:
+ """Swiss KMU-Kontenrahmen: 1xxx Aktiven + 2xxx Passiven -> balance sheet
+ (cumulative balance carried across years); 3xxx..9xxx -> income statement
+ (reset to 0 at fiscal-year start).
+ """
+ a = (accountNumber or "").strip()
+ if not a or not a[0].isdigit():
+ return False
+ return a[0] not in ("1", "2")
class AccountingConnectorBexio(BaseAccountingConnector):
@@ -260,3 +288,148 @@ class AccountingConnectorBexio(BaseAccountingConnector):
except Exception as e:
logger.error(f"Bexio getCustomers error: {e}")
return []
+
+ async def getAccountBalances(
+ self,
+ config: Dict[str, Any],
+ years: List[int],
+ accountNumbers: Optional[List[str]] = None,
+ ) -> List[AccountingPeriodBalance]:
+ """Aggregate account balances locally from ``/3.0/accounting/journal``.
+
+ Bexio offers no per-account saldo endpoint, so we paginate the full
+ journal up to the latest requested fiscal year-end and compute
+ opening / debit / credit / closing per (account, period). For balance-
+ sheet accounts the cumulative carry-over from prior years is included;
+ for income-statement accounts the balance is reset at the start of
+ every requested fiscal year (per Swiss accounting principles).
+ """
+ if not years:
+ return []
+ sortedYears = sorted({int(y) for y in years if y})
+ minYear = sortedYears[0]
+ maxYear = sortedYears[-1]
+ accountNumbersSet = set(accountNumbers) if accountNumbers else None
+
+ accounts = await self._loadRawAccounts(config)
+ accIdToNumber: Dict[int, str] = {acc.get("id"): str(acc.get("account_no", "")) for acc in accounts if acc.get("id") is not None and acc.get("account_no") is not None}
+ if not accIdToNumber:
+ logger.warning("Bexio getAccountBalances: chart of accounts is empty -- cannot derive balances")
+ return []
+
+ rawEntries = await self._fetchAllJournalRows(config, dateTo=f"{maxYear}-12-31")
+
+ movements: Dict[Tuple[str, int, int], Dict[str, float]] = {}
+ for e in rawEntries:
+ dateRaw = str(e.get("date") or "")[:10]
+ if len(dateRaw) < 7:
+ continue
+ try:
+ year = int(dateRaw[:4])
+ month = int(dateRaw[5:7])
+ except ValueError:
+ continue
+ try:
+ amount = float(e.get("amount") or 0)
+ except (TypeError, ValueError):
+ continue
+ if amount == 0:
+ continue
+ debitAcc = accIdToNumber.get(e.get("debit_account_id"))
+ creditAcc = accIdToNumber.get(e.get("credit_account_id"))
+ if debitAcc:
+ bucket = movements.setdefault((debitAcc, year, month), {"debit": 0.0, "credit": 0.0})
+ bucket["debit"] += amount
+ if creditAcc:
+ bucket = movements.setdefault((creditAcc, year, month), {"debit": 0.0, "credit": 0.0})
+ bucket["credit"] += amount
+
+ accountsByNumber = sorted({n for n in accIdToNumber.values() if n})
+ results: List[AccountingPeriodBalance] = []
+
+ for accNo in accountsByNumber:
+ if accountNumbersSet is not None and accNo not in accountNumbersSet:
+ continue
+ isER = _isIncomeStatementAccount(accNo)
+
+ preMinYearBalance = 0.0
+ if not isER:
+ for (a, yr, _mo), m in movements.items():
+ if a == accNo and yr < minYear:
+ preMinYearBalance += m["debit"] - m["credit"]
+
+ cumulativeOpeningOfYear = preMinYearBalance
+ for year in sortedYears:
+ if isER:
+ yearOpening = 0.0
+ else:
+ yearOpening = cumulativeOpeningOfYear
+
+ running = yearOpening
+ yearDebit = 0.0
+ yearCredit = 0.0
+ for month in range(1, 13):
+ opening = running
+ mov = movements.get((accNo, year, month), {"debit": 0.0, "credit": 0.0})
+ running = opening + mov["debit"] - mov["credit"]
+ yearDebit += mov["debit"]
+ yearCredit += mov["credit"]
+ results.append(AccountingPeriodBalance(
+ accountNumber=accNo,
+ periodYear=year,
+ periodMonth=month,
+ openingBalance=round(opening, 2),
+ debitTotal=round(mov["debit"], 2),
+ creditTotal=round(mov["credit"], 2),
+ closingBalance=round(running, 2),
+ currency="CHF",
+ asOfDate=_formatLastDayOfMonth(year, month),
+ ))
+
+ results.append(AccountingPeriodBalance(
+ accountNumber=accNo,
+ periodYear=year,
+ periodMonth=0,
+ openingBalance=round(yearOpening, 2),
+ debitTotal=round(yearDebit, 2),
+ creditTotal=round(yearCredit, 2),
+ closingBalance=round(running, 2),
+ currency="CHF",
+ asOfDate=f"{year}-12-31",
+ ))
+
+ cumulativeOpeningOfYear = running
+
+ logger.info("Bexio getAccountBalances: %s rows from %s journal entries (years=%s)", len(results), len(rawEntries), sortedYears)
+ return results
+
+ async def _fetchAllJournalRows(self, config: Dict[str, Any], dateTo: str) -> List[Dict[str, Any]]:
+ """Paginate ``GET /3.0/accounting/journal?to=YYYY-12-31`` and return all rows.
+
+ Bexio caps page size at 2000; we fetch until a short page is returned.
+ Failures abort early (returning whatever rows were collected) -- the
+ caller logs the row count, so partial data is visible.
+ """
+ rows: List[Dict[str, Any]] = []
+ offset = 0
+ url = self._buildUrl(config, "3.0/accounting/journal")
+ async with aiohttp.ClientSession() as session:
+ while True:
+ params = {"to": dateTo, "limit": str(_JOURNAL_PAGE_SIZE), "offset": str(offset)}
+ try:
+ async with session.get(url, headers=self._buildHeaders(config), params=params, timeout=aiohttp.ClientTimeout(total=60)) as resp:
+ if resp.status != 200:
+ body = await resp.text()
+ logger.warning("Bexio /accounting/journal HTTP %s offset=%s: %s", resp.status, offset, body[:200])
+ break
+ page = await resp.json()
+ except Exception as ex:
+ logger.warning("Bexio /accounting/journal request failed offset=%s: %s", offset, ex)
+ break
+ if not isinstance(page, list) or not page:
+ break
+ rows.extend(page)
+ if len(page) < _JOURNAL_PAGE_SIZE:
+ break
+ offset += _JOURNAL_PAGE_SIZE
+ return rows
diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
index 9e372099..98634127 100644
--- a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
+++ b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
@@ -9,6 +9,7 @@ Base URL: https://service.runmyaccounts.com/api/latest/clients/{clientName}/
"""
import asyncio
+import calendar
import json
import logging
import re
@@ -21,6 +22,7 @@ from ..accountingConnectorBase import (
BaseAccountingConnector,
AccountingBooking,
AccountingChart,
+ AccountingPeriodBalance,
ConnectorConfigField,
SyncResult,
)
@@ -31,6 +33,73 @@ logger = logging.getLogger(__name__)
_DEFAULT_API_BASE_URL = "https://service.runmyaccounts.com/api/latest/clients/"
+def _formatLastDayOfMonth(year: int, month: int) -> str:
+ """Return ``YYYY-MM-DD`` of the last day of a calendar month."""
+ lastDay = calendar.monthrange(year, month)[1]
+ return f"{year:04d}-{month:02d}-{lastDay:02d}"
+
+
+def _isIncomeStatementAccount(accountNumber: str) -> bool:
+ """Decide whether an account is part of the income statement (Erfolgsrechnung).
+
+ Swiss KMU-Kontenrahmen: 1xxx Aktiven, 2xxx Passiven (incl. 28xx
+ Eigenkapital) -> balance sheet; 3xxx..9xxx -> income statement.
+ Used by the RMA connector to choose between the two `/gl/saldo` query
+ variants (with vs. without ``from`` parameter).
+ """
+ a = (accountNumber or "").strip()
+ if not a or not a[0].isdigit():
+ return False
+ return a[0] not in ("1", "2")
+
+
+def _parseSaldoBody(body: str) -> List[tuple]:
+ """Parse the response body of ``GET /gl/saldo`` (JSON or XML).
+
+ Returns a list of ``(accountNumber, saldo)`` tuples. The endpoint
+ delivers ``{"row": [{"column": [accno, label, saldo]}, ...]}`` (JSON) or
+ ``accnolabelsaldo
...``
+ (XML). Rows that cannot be parsed are silently skipped to keep one bad row
+ from poisoning the whole sync.
+ """
+ if not body or not body.strip():
+ return []
+ rows: List[tuple] = []
+ try:
+ data = json.loads(body)
+ items = data.get("row") if isinstance(data, dict) else data
+ if isinstance(items, dict):
+ items = [items]
+ if isinstance(items, list):
+ for item in items:
+ if not isinstance(item, dict):
+ continue
+ cols = item.get("column") or []
+ if isinstance(cols, list) and len(cols) >= 3:
+ accno = str(cols[0]).strip()
+ try:
+ saldo = float(cols[2])
+ except (TypeError, ValueError):
+ continue
+ if accno:
+ rows.append((accno, saldo))
+ return rows
+ except (json.JSONDecodeError, ValueError):
+ pass
+ rowMatches = re.findall(r"(.*?)
", body, re.DOTALL)
+ for raw in rowMatches:
+ cols = re.findall(r"([^<]*)", raw)
+ if len(cols) >= 3:
+ accno = cols[0].strip()
+ try:
+ saldo = float(cols[2])
+ except (TypeError, ValueError):
+ continue
+ if accno:
+ rows.append((accno, saldo))
+ return rows
+
+
class AccountingConnectorRma(BaseAccountingConnector):
def getConnectorType(self) -> str:
@@ -447,6 +516,191 @@ class AccountingConnectorRma(BaseAccountingConnector):
logger.error(f"RMA getJournalEntries error: {e}", exc_info=True)
return []
+ async def getAccountBalances(
+ self,
+ config: Dict[str, Any],
+ years: List[int],
+ accountNumbers: Optional[List[str]] = None,
+ ) -> List[AccountingPeriodBalance]:
+ """Fetch authoritative closing balances per account and period via RMA's
+ ``GET /gl/saldo`` endpoint.
+
+ For each requested year we issue 13 API calls (one per month-end + one
+ for the prior fiscal year-end as opening reference). The endpoint
+ returns the cumulative balance per account at the requested ``to`` date,
+ already including prior-year carry-over and yearend bookings -- which
+ is exactly the value the local journal-line aggregation cannot
+ reconstruct when the import window covers only part of the history.
+
+ ``accno`` is mandatory; we use a digit-length-grouped wildcard
+ (``xxxx`` matches all 4-digit accounts, ``xxxxx`` all 5-digit, etc.)
+ derived from the chart of accounts, so 1-2 calls cover every account
+ per period.
+ """
+ if not years:
+ return []
+
+ accountNumbersSet: Optional[set] = set(accountNumbers) if accountNumbers else None
+ wildcardPatterns = await self._resolveWildcardPatterns(config)
+ if not wildcardPatterns:
+ logger.warning("RMA getAccountBalances: chart of accounts is empty, no wildcards derivable")
+ return []
+
+ results: List[AccountingPeriodBalance] = []
+ sortedYears = sorted({int(y) for y in years if y})
+
+ for year in sortedYears:
+ priorYearEnd = f"{year - 1}-12-31"
+ priorSaldosRaw = await self._fetchSaldoMapForDate(config, wildcardPatterns, priorYearEnd)
+ # ER (income statement) accounts reset to 0 at the start of each
+ # fiscal year -- prior-year YTD must NOT carry forward as opening.
+ priorSaldos = {a: (0.0 if _isIncomeStatementAccount(a) else v) for a, v in priorSaldosRaw.items()}
+
+ runningOpening: Dict[str, float] = dict(priorSaldos)
+ decSaldos: Dict[str, float] = {}
+
+ for month in range(1, 13):
+ lastDay = _formatLastDayOfMonth(year, month)
+ saldos = await self._fetchSaldoMapForDate(config, wildcardPatterns, lastDay)
+
+ accountKeys = set(saldos.keys()) | set(runningOpening.keys())
+ for accno in accountKeys:
+ if accountNumbersSet is not None and accno not in accountNumbersSet:
+ continue
+ closing = saldos.get(accno, runningOpening.get(accno, 0.0))
+ opening = runningOpening.get(accno, 0.0)
+ results.append(AccountingPeriodBalance(
+ accountNumber=accno,
+ periodYear=year,
+ periodMonth=month,
+ openingBalance=round(opening, 2),
+ closingBalance=round(closing, 2),
+ currency="CHF",
+ asOfDate=lastDay,
+ ))
+ runningOpening = {**runningOpening, **saldos}
+ if month == 12:
+ decSaldos = dict(saldos)
+
+ annualKeys = set(decSaldos.keys()) | set(priorSaldos.keys())
+ for accno in annualKeys:
+ if accountNumbersSet is not None and accno not in accountNumbersSet:
+ continue
+ closing = decSaldos.get(accno, priorSaldos.get(accno, 0.0))
+ opening = priorSaldos.get(accno, 0.0)
+ results.append(AccountingPeriodBalance(
+ accountNumber=accno,
+ periodYear=year,
+ periodMonth=0,
+ openingBalance=round(opening, 2),
+ closingBalance=round(closing, 2),
+ currency="CHF",
+ asOfDate=f"{year}-12-31",
+ ))
+
+ logger.info(
+ "RMA getAccountBalances: %s rows for years=%s, wildcards=%s",
+ len(results), sortedYears, wildcardPatterns,
+ )
+ return results
+
+ async def _resolveWildcardPatterns(self, config: Dict[str, Any]) -> List[str]:
+ """Derive `accno` wildcard patterns from the chart of accounts.
+
+ RMA's `/gl/saldo` requires `accno`; using digit-length-grouped
+ wildcards (`xxxx`, `xxxxx`, ...) lets us cover every account in 1-2
+ calls per period instead of one call per account number.
+ """
+ try:
+ charts = await self.getChartOfAccounts(config)
+ except Exception as ex:
+ logger.warning("RMA _resolveWildcardPatterns: getChartOfAccounts failed: %s", ex)
+ return []
+ lengths = set()
+ for c in charts:
+ accno = (c.accountNumber or "").strip()
+ if accno.isdigit():
+ lengths.add(len(accno))
+ return [("x" * n) for n in sorted(lengths)]
+
+ async def _fetchSaldoMapForDate(
+ self,
+ config: Dict[str, Any],
+ wildcardPatterns: List[str],
+ toDate: str,
+ ) -> Dict[str, float]:
+ """Call `/gl/saldo` and return ``{accountNumber: cumulativeSaldo}``.
+
+ Per RMA docs ("Warning: Chart of the balance sheet do not need a from
+ date. Charts of the income statement need from and to parameter."),
+ we issue **two** calls per pattern:
+
+ * No ``from`` -> correct cumulative saldo for balance-sheet accounts
+ (1xxx, 2xxx in Swiss KMU-Kontenrahmen).
+ * ``from=YYYY-01-01`` (year of ``toDate``) -> correct YTD result for
+ income-statement accounts (3xxx..9xxx, which reset annually).
+
+ Per account number we keep the value from the appropriate call.
+ Empty / failed responses are logged at DEBUG and skipped to avoid
+ aborting the whole sync.
+ """
+ yearStart = f"{toDate[:4]}-01-01"
+ bsRows: Dict[str, float] = {}
+ erRows: Dict[str, float] = {}
+ for pattern in wildcardPatterns:
+ try:
+ bs = await self._fetchSaldoRows(config, accno=pattern, fromDate=None, toDate=toDate)
+ except Exception as ex:
+ logger.debug("RMA _fetchSaldoMapForDate(BS, pattern=%s, to=%s) failed: %s", pattern, toDate, ex)
+ bs = []
+ try:
+ er = await self._fetchSaldoRows(config, accno=pattern, fromDate=yearStart, toDate=toDate)
+ except Exception as ex:
+ logger.debug("RMA _fetchSaldoMapForDate(ER, pattern=%s, %s..%s) failed: %s", pattern, yearStart, toDate, ex)
+ er = []
+ for accno, saldo in bs:
+ bsRows[accno] = saldo
+ for accno, saldo in er:
+ erRows[accno] = saldo
+
+ merged: Dict[str, float] = {}
+ for accno in set(bsRows) | set(erRows):
+ if _isIncomeStatementAccount(accno):
+ merged[accno] = erRows.get(accno, bsRows.get(accno, 0.0))
+ else:
+ merged[accno] = bsRows.get(accno, erRows.get(accno, 0.0))
+ return merged
+
+ async def _fetchSaldoRows(
+ self,
+ config: Dict[str, Any],
+ accno: str,
+ fromDate: Optional[str],
+ toDate: str,
+ ) -> List[tuple]:
+ """Single `/gl/saldo` call. Returns list of ``(accountNumber, saldo)`` tuples."""
+ url = self._buildUrl(config, "gl/saldo")
+ params: Dict[str, str] = {
+ "accno": accno,
+ "to": toDate,
+ "bookkeeping_main_curr": "true",
+ }
+ if fromDate:
+ params["from"] = fromDate
+ async with aiohttp.ClientSession() as session:
+ async with session.get(
+ url,
+ headers=self._buildHeaders(config),
+ params=params,
+ timeout=aiohttp.ClientTimeout(total=20),
+ ) as resp:
+ if resp.status != 200:
+ body = await resp.text()
+ logger.debug("RMA /gl/saldo accno=%s from=%s to=%s -> HTTP %s: %s", accno, fromDate, toDate, resp.status, body[:200])
+ return []
+ body = await resp.text()
+ return _parseSaldoBody(body)
+
async def _fetchGlBulk(self, config: Dict[str, Any], params: Dict[str, str]) -> List[Dict[str, Any]]:
"""Try GET /gl to fetch journal entries in bulk (not all RMA versions support this)."""
try:
diff --git a/modules/features/trustee/interfaceFeatureTrustee.py b/modules/features/trustee/interfaceFeatureTrustee.py
index b1a6aab6..9f1c911a 100644
--- a/modules/features/trustee/interfaceFeatureTrustee.py
+++ b/modules/features/trustee/interfaceFeatureTrustee.py
@@ -1109,10 +1109,15 @@ class TrusteeObjects:
)
def _cleanDocumentRecords(records):
- return [
- TrusteeDocument(**{k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"})
- for r in records
- ]
+ cleaned = []
+ for r in records:
+ labelCols = {k: v for k, v in r.items() if k.endswith("Label")}
+ filteredFields = {k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"}
+ doc = TrusteeDocument(**filteredFields)
+ d = doc.model_dump()
+ d.update(labelCols)
+ cleaned.append(d)
+ return cleaned
if isinstance(result, PaginatedResult):
result.items = _cleanDocumentRecords(result.items)
@@ -1133,10 +1138,15 @@ class TrusteeObjects:
)
def _cleanDocumentRecords(records):
- return [
- TrusteeDocument(**{k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"})
- for r in records
- ]
+ cleaned = []
+ for r in records:
+ labelCols = {k: v for k, v in r.items() if k.endswith("Label")}
+ filteredFields = {k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"}
+ doc = TrusteeDocument(**filteredFields)
+ d = doc.model_dump()
+ d.update(labelCols)
+ cleaned.append(d)
+ return cleaned
if isinstance(result, PaginatedResult):
result.items = _cleanDocumentRecords(result.items)
@@ -1297,10 +1307,13 @@ class TrusteeObjects:
def _cleanAndValidate(records):
items = []
for record in records:
+ labelCols = {k: v for k, v in record.items() if k.endswith("Label")}
cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_") or k in keepFields}
position = self._toTrusteePositionOrDelete(cleanedRecord, deleteCorrupt=True)
if position is not None:
- items.append(position)
+ d = position.model_dump()
+ d.update(labelCols)
+ items.append(d)
return items
if isinstance(result, PaginatedResult):
diff --git a/modules/features/trustee/mainTrustee.py b/modules/features/trustee/mainTrustee.py
index 0799fa1c..020aeda5 100644
--- a/modules/features/trustee/mainTrustee.py
+++ b/modules/features/trustee/mainTrustee.py
@@ -394,9 +394,15 @@ TEMPLATE_WORKFLOWS = [
{"id": "extract", "type": "trustee.extractFromFiles", "label": "Dokumente extrahieren", "_method": "trustee", "_action": "extractFromFiles",
"parameters": {"featureInstanceId": "{{featureInstanceId}}", "prompt": ""}, "position": {"x": 250, "y": 0}},
{"id": "process", "type": "trustee.processDocuments", "label": "Verarbeiten", "_method": "trustee", "_action": "processDocuments",
- "parameters": {"documentList": [], "featureInstanceId": "{{featureInstanceId}}"}, "position": {"x": 500, "y": 0}},
+ "parameters": {
+ "documentList": {"type": "ref", "nodeId": "extract", "path": ["documents"]},
+ "featureInstanceId": "{{featureInstanceId}}",
+ }, "position": {"x": 500, "y": 0}},
{"id": "sync", "type": "trustee.syncToAccounting", "label": "Synchronisieren", "_method": "trustee", "_action": "syncToAccounting",
- "parameters": {"documentList": [], "featureInstanceId": "{{featureInstanceId}}"}, "position": {"x": 750, "y": 0}},
+ "parameters": {
+ "documentList": {"type": "ref", "nodeId": "process", "path": ["documents"]},
+ "featureInstanceId": "{{featureInstanceId}}",
+ }, "position": {"x": 750, "y": 0}},
],
"connections": [
{"source": "trigger", "sourcePort": 0, "target": "extract", "targetPort": 0},
diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py
index fbdd0966..021251fc 100644
--- a/modules/features/trustee/routeFeatureTrustee.py
+++ b/modules/features/trustee/routeFeatureTrustee.py
@@ -412,34 +412,41 @@ def get_position_options(
# ===== Organisation Routes =====
-@router.get("/{instanceId}/organisations", response_model=PaginatedResponse[TrusteeOrganisation])
+@router.get("/{instanceId}/organisations")
@limiter.limit("30/minute")
def get_organisations(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[TrusteeOrganisation]:
+):
"""Get all organisations for a feature instance with optional pagination."""
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
mandateId = _validateInstanceAccess(instanceId, context)
paginationParams = _parsePagination(pagination)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.getAllOrganisations(paginationParams)
+ def _toDicts(items):
+ return [r.model_dump() if hasattr(r, "model_dump") else r for r in items]
+
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeOrganisation)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None
- )
- )
- return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None)
+ ).model_dump(),
+ }
+ items = result if isinstance(result, list) else result.items
+ enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeOrganisation)
+ return {"items": enriched, "pagination": None}
@router.get("/{instanceId}/organisations/{orgId}", response_model=TrusteeOrganisation)
@@ -525,34 +532,41 @@ def delete_organisation(
# ===== Role Routes =====
-@router.get("/{instanceId}/roles", response_model=PaginatedResponse[TrusteeRole])
+@router.get("/{instanceId}/roles")
@limiter.limit("30/minute")
def get_roles(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[TrusteeRole]:
+):
"""Get all roles with optional pagination."""
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
mandateId = _validateInstanceAccess(instanceId, context)
paginationParams = _parsePagination(pagination)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.getAllRoles(paginationParams)
+ def _toDicts(items):
+ return [r.model_dump() if hasattr(r, "model_dump") else r for r in items]
+
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeRole)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None
- )
- )
- return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None)
+ ).model_dump(),
+ }
+ items = result if isinstance(result, list) else result.items
+ enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeRole)
+ return {"items": enriched, "pagination": None}
@router.get("/{instanceId}/roles/{roleId}", response_model=TrusteeRole)
@@ -638,34 +652,41 @@ def delete_role(
# ===== Access Routes =====
-@router.get("/{instanceId}/access", response_model=PaginatedResponse[TrusteeAccess])
+@router.get("/{instanceId}/access")
@limiter.limit("30/minute")
def get_all_access(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[TrusteeAccess]:
+):
"""Get all access records with optional pagination."""
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
mandateId = _validateInstanceAccess(instanceId, context)
paginationParams = _parsePagination(pagination)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.getAllAccess(paginationParams)
+ def _toDicts(items):
+ return [r.model_dump() if hasattr(r, "model_dump") else r for r in items]
+
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeAccess)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None
- )
- )
- return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None)
+ ).model_dump(),
+ }
+ items = result if isinstance(result, list) else result.items
+ enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeAccess)
+ return {"items": enriched, "pagination": None}
@router.get("/{instanceId}/access/{accessId}", response_model=TrusteeAccess)
@@ -781,34 +802,41 @@ def delete_access(
# ===== Contract Routes =====
-@router.get("/{instanceId}/contracts", response_model=PaginatedResponse[TrusteeContract])
+@router.get("/{instanceId}/contracts")
@limiter.limit("30/minute")
def get_contracts(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[TrusteeContract]:
+):
"""Get all contracts with optional pagination."""
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
mandateId = _validateInstanceAccess(instanceId, context)
paginationParams = _parsePagination(pagination)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.getAllContracts(paginationParams)
+ def _toDicts(items):
+ return [r.model_dump() if hasattr(r, "model_dump") else r for r in items]
+
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeContract)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None
- )
- )
- return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None)
+ ).model_dump(),
+ }
+ items = result if isinstance(result, list) else result.items
+ enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeContract)
+ return {"items": enriched, "pagination": None}
@router.get("/{instanceId}/contracts/{contractId}", response_model=TrusteeContract)
@@ -909,7 +937,7 @@ def delete_contract(
# ===== Document Routes =====
-@router.get("/{instanceId}/documents", response_model=PaginatedResponse[TrusteeDocument])
+@router.get("/{instanceId}/documents")
@limiter.limit("30/minute")
def get_documents(
request: Request,
@@ -918,7 +946,7 @@ def get_documents(
mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[TrusteeDocument]:
+):
"""Get all documents (metadata only) with optional pagination."""
mandateId = _validateInstanceAccess(instanceId, context)
@@ -929,19 +957,23 @@ def get_documents(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.getAllDocuments(paginationParams)
+ def _itemsToDicts(items):
+ return [r.model_dump() if hasattr(r, 'model_dump') else r for r in items]
+
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ return {
+ "items": _itemsToDicts(result.items),
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None
- )
- )
- return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None)
+ ).model_dump(),
+ }
+ items = result if isinstance(result, list) else result.items
+ return {"items": _itemsToDicts(items), "pagination": None}
def _handleDocumentMode(instanceId, mandateId, mode, column, pagination, context):
@@ -1154,7 +1186,7 @@ def delete_document(
# ===== Position Routes =====
-@router.get("/{instanceId}/positions", response_model=PaginatedResponse[TrusteePosition])
+@router.get("/{instanceId}/positions")
@limiter.limit("30/minute")
def get_positions(
request: Request,
@@ -1163,7 +1195,7 @@ def get_positions(
mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[TrusteePosition]:
+):
"""Get all positions with optional pagination."""
mandateId = _validateInstanceAccess(instanceId, context)
@@ -1174,19 +1206,23 @@ def get_positions(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.getAllPositions(paginationParams)
+ def _itemsToDicts(items):
+ return [r.model_dump() if hasattr(r, 'model_dump') else r for r in items]
+
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ return {
+ "items": _itemsToDicts(result.items),
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None
- )
- )
- return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None)
+ ).model_dump(),
+ }
+ items = result if isinstance(result, list) else result.items
+ return {"items": _itemsToDicts(items), "pagination": None}
def _handlePositionMode(instanceId, mandateId, mode, column, pagination, context):
@@ -1347,8 +1383,8 @@ def get_available_accounting_connectors(
) -> List[Dict[str, Any]]:
"""List all available accounting system connectors with their config fields."""
_validateInstanceAccess(instanceId, context)
- from .accounting.accountingRegistry import _getAccountingRegistry
- return _getAccountingRegistry().getAvailableConnectors()
+ from .accounting.accountingRegistry import getAccountingRegistry
+ return getAccountingRegistry().getAvailableConnectors()
# Placeholder returned for secret config fields so frontend can prefill form without sending real secrets.
@@ -1357,8 +1393,8 @@ _CONFIG_PLACEHOLDER = "***"
def _getConfigMasked(connectorType: str, plainConfig: Dict[str, Any]) -> Dict[str, str]:
"""Build config with secret values replaced by placeholder for GET response."""
- from .accounting.accountingRegistry import _getAccountingRegistry
- connector = _getAccountingRegistry().getConnector(connectorType)
+ from .accounting.accountingRegistry import getAccountingRegistry
+ connector = getAccountingRegistry().getConnector(connectorType)
if not connector:
return {k: (v if isinstance(v, str) else str(v)) for k, v in (plainConfig or {}).items()}
secretKeys = {f.key for f in connector.getRequiredConfigFields() if f.secret}
@@ -2081,13 +2117,13 @@ def _serializeRoleForApi(role) -> Dict[str, Any]:
return payload
-@router.get("/{instanceId}/instance-roles", response_model=PaginatedResponse)
+@router.get("/{instanceId}/instance-roles")
@limiter.limit("30/minute")
def get_instance_roles(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse:
+):
"""
Get all roles for this feature instance.
Requires feature admin permission.
@@ -2095,14 +2131,9 @@ def get_instance_roles(
mandateId = _validateInstanceAdmin(instanceId, context)
rootInterface = getRootInterface()
-
- # Get instance-specific roles (Pydantic models)
roles = rootInterface.getRolesByFeatureCode("trustee", featureInstanceId=instanceId)
- return PaginatedResponse(
- items=[_serializeRoleForApi(r) for r in roles],
- pagination=None
- )
+ return {"items": [_serializeRoleForApi(r) for r in roles], "pagination": None}
@router.get("/{instanceId}/instance-roles/{roleId}", response_model=Dict[str, Any])
@@ -2129,14 +2160,14 @@ def get_instance_role(
return _serializeRoleForApi(role)
-@router.get("/{instanceId}/instance-roles/{roleId}/rules", response_model=PaginatedResponse)
+@router.get("/{instanceId}/instance-roles/{roleId}/rules")
@limiter.limit("30/minute")
def get_instance_role_rules(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
roleId: str = Path(..., description="Role ID"),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse:
+):
"""
Get all AccessRules for a specific instance role.
Requires feature admin permission.
@@ -2145,18 +2176,13 @@ def get_instance_role_rules(
rootInterface = getRootInterface()
- # Verify role belongs to this instance (Pydantic model)
role = rootInterface.getRole(roleId)
if not role or str(role.featureInstanceId) != instanceId:
raise HTTPException(status_code=404, detail=f"Role {roleId} not found in this instance")
- # Get AccessRules for this role (Pydantic models)
rules = rootInterface.getAccessRulesByRole(roleId)
- return PaginatedResponse(
- items=[r.model_dump() for r in rules],
- pagination=None
- )
+ return {"items": [r.model_dump() for r in rules], "pagination": None}
@router.post("/{instanceId}/instance-roles/{roleId}/rules", response_model=Dict[str, Any], status_code=201)
@@ -2336,6 +2362,7 @@ def _paginatedReadEndpoint(
handleFilterValuesInMemory,
handleIdsInMemory,
parseCrossFilterPagination,
+ enrichRowsWithFkLabels,
)
from fastapi.responses import JSONResponse
@@ -2401,23 +2428,28 @@ def _paginatedReadEndpoint(
featureCode=interface.FEATURE_CODE,
)
+ def _itemsToDicts(rawItems):
+ return [r.model_dump() if hasattr(r, "model_dump") else r for r in rawItems]
+
if paginationParams and hasattr(result, "items"):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_itemsToDicts(result.items), modelClass)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None,
- ),
- )
+ ).model_dump(),
+ }
items = result.items if hasattr(result, "items") else result
- return PaginatedResponse(items=items, pagination=None)
+ enriched = enrichRowsWithFkLabels(_itemsToDicts(items), modelClass)
+ return {"items": enriched, "pagination": None}
-@router.get("/{instanceId}/data/accounts", response_model=PaginatedResponse[TrusteeDataAccount])
+@router.get("/{instanceId}/data/accounts")
@limiter.limit("30/minute")
def get_data_accounts(
request: Request,
@@ -2438,7 +2470,7 @@ def get_data_accounts(
)
-@router.get("/{instanceId}/data/journal-entries", response_model=PaginatedResponse[TrusteeDataJournalEntry])
+@router.get("/{instanceId}/data/journal-entries")
@limiter.limit("30/minute")
def get_data_journal_entries(
request: Request,
@@ -2459,7 +2491,7 @@ def get_data_journal_entries(
)
-@router.get("/{instanceId}/data/journal-lines", response_model=PaginatedResponse[TrusteeDataJournalLine])
+@router.get("/{instanceId}/data/journal-lines")
@limiter.limit("30/minute")
def get_data_journal_lines(
request: Request,
@@ -2480,7 +2512,7 @@ def get_data_journal_lines(
)
-@router.get("/{instanceId}/data/contacts", response_model=PaginatedResponse[TrusteeDataContact])
+@router.get("/{instanceId}/data/contacts")
@limiter.limit("30/minute")
def get_data_contacts(
request: Request,
@@ -2501,7 +2533,7 @@ def get_data_contacts(
)
-@router.get("/{instanceId}/data/account-balances", response_model=PaginatedResponse[TrusteeDataAccountBalance])
+@router.get("/{instanceId}/data/account-balances")
@limiter.limit("30/minute")
def get_data_account_balances(
request: Request,
@@ -2522,7 +2554,7 @@ def get_data_account_balances(
)
-@router.get("/{instanceId}/accounting/configs", response_model=PaginatedResponse[TrusteeAccountingConfig])
+@router.get("/{instanceId}/accounting/configs")
@limiter.limit("30/minute")
def get_accounting_configs(
request: Request,
@@ -2548,7 +2580,7 @@ def get_accounting_configs(
)
-@router.get("/{instanceId}/accounting/syncs", response_model=PaginatedResponse[TrusteeAccountingSync])
+@router.get("/{instanceId}/accounting/syncs")
@limiter.limit("30/minute")
def get_accounting_syncs(
request: Request,
diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py
index 1c44d54d..96313293 100644
--- a/modules/features/workspace/routeFeatureWorkspace.py
+++ b/modules/features/workspace/routeFeatureWorkspace.py
@@ -191,7 +191,7 @@ _SOURCE_TYPE_TO_SERVICE = {
}
-def _buildDataSourceContext(chatService, dataSourceIds: List[str]) -> str:
+def buildDataSourceContext(chatService, dataSourceIds: List[str]) -> str:
"""Build a description of active data sources for the agent prompt."""
parts = [
"The user has attached the following external data sources to this prompt.",
@@ -229,7 +229,7 @@ def _buildDataSourceContext(chatService, dataSourceIds: List[str]) -> str:
return "\n".join(parts) if found else ""
-def _buildFeatureDataSourceContext(featureDataSourceIds: List[str]) -> str:
+def buildFeatureDataSourceContext(featureDataSourceIds: List[str]) -> str:
"""Build a description of attached feature data sources for the agent prompt."""
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
from modules.security.rbacCatalog import getCatalogService
@@ -735,12 +735,12 @@ async def _runWorkspaceAgent(
enrichedPrompt = prompt
if dataSourceIds:
- dsInfo = _buildDataSourceContext(chatService, dataSourceIds)
+ dsInfo = buildDataSourceContext(chatService, dataSourceIds)
if dsInfo:
enrichedPrompt = f"{prompt}\n\n[Active Data Sources]\n{dsInfo}"
if featureDataSourceIds:
- fdsInfo = _buildFeatureDataSourceContext(featureDataSourceIds)
+ fdsInfo = buildFeatureDataSourceContext(featureDataSourceIds)
if fdsInfo:
enrichedPrompt = f"{enrichedPrompt}\n\n[Attached Feature Data Sources]\n{fdsInfo}"
diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py
index 3e8bf4ea..a6ae0052 100644
--- a/modules/interfaces/interfaceBootstrap.py
+++ b/modules/interfaces/interfaceBootstrap.py
@@ -139,7 +139,7 @@ def _bootstrapBilling() -> None:
Idempotent: only creates missing settings/accounts.
"""
try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface as getBillingRootInterface
+ from modules.interfaces.interfaceDbBilling import getRootInterface as getBillingRootInterface
billingInterface = getBillingRootInterface()
@@ -1968,11 +1968,11 @@ def initRootMandateBilling(mandateId: str) -> None:
Creates mandate pool account and user audit accounts.
"""
try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface
+ from modules.interfaces.interfaceDbBilling import getRootInterface as getBillingRootInterface
from modules.interfaces.interfaceDbApp import getRootInterface as getAppRootInterface
from modules.datamodels.datamodelBilling import BillingSettings
- billingInterface = _getRootInterface()
+ billingInterface = getBillingRootInterface()
appInterface = getAppRootInterface()
existingSettings = billingInterface.getSettings(mandateId)
@@ -2012,7 +2012,7 @@ def _initRootMandateSubscription(mandateId: str) -> None:
Called during bootstrap after billing init.
"""
try:
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface
from modules.datamodels.datamodelSubscription import (
MandateSubscription,
SubscriptionStatusEnum,
diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py
index c754684f..d1593473 100644
--- a/modules/interfaces/interfaceDbApp.py
+++ b/modules/interfaces/interfaceDbApp.py
@@ -15,7 +15,7 @@ from typing import Dict, Any, List, Optional, Union
from passlib.context import CryptContext
import uuid
-from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached_connector
+from modules.connectors.connectorDbPostgre import DatabaseConnector, getCachedConnector
from modules.shared.configuration import APP_CONFIG
from modules.shared.dbRegistry import registerDatabase
from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp
@@ -143,7 +143,7 @@ class AppObjects:
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
- self.db = _get_cached_connector(
+ self.db = getCachedConnector(
dbHost=dbHost,
dbDatabase=dbDatabase,
dbUser=dbUser,
@@ -1594,8 +1594,8 @@ class AppObjects:
if not adminRoleId:
raise ValueError(f"No admin role found for mandate {mandateId} — cannot assign user without role")
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
- from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
+ from modules.interfaces.interfaceDbBilling import getRootInterface as _getBillingRoot
from datetime import datetime, timezone, timedelta
now = datetime.now(timezone.utc)
@@ -1693,7 +1693,7 @@ class AppObjects:
from modules.datamodels.datamodelSubscription import (
SubscriptionStatusEnum, BUILTIN_PLANS,
)
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
from datetime import datetime, timezone, timedelta
activated = 0
@@ -1936,7 +1936,7 @@ class AppObjects:
logger.info(f"Cascade: deleted {len(memberships)} UserMandates for mandate {mandateId}")
# 3. Cancel Stripe subscriptions + delete MandateSubscription records (poweron_billing)
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
subInterface = _getSubRoot()
subs = subInterface.listForMandate(mandateId)
for sub in subs:
@@ -1954,7 +1954,7 @@ class AppObjects:
logger.info(f"Cascade: deleted {len(subs)} subscriptions for mandate {mandateId}")
# 3b. Delete Billing data (poweron_billing)
- from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot
+ from modules.interfaces.interfaceDbBilling import getRootInterface as _getBillingRoot
billingDb = _getBillingRoot().db
billingAccounts = billingDb.getRecordset(BillingAccount, recordFilter={"mandateId": mandateId})
for acc in billingAccounts:
@@ -2202,7 +2202,7 @@ class AppObjects:
Balance is always on the mandate pool (PREPAY_MANDATE). User accounts are for audit trail only.
"""
try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface as getBillingRootInterface
+ from modules.interfaces.interfaceDbBilling import getRootInterface as getBillingRootInterface
billingInterface = getBillingRootInterface()
settings = billingInterface.getSettings(mandateId)
diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py
index a4af7b25..db1ee619 100644
--- a/modules/interfaces/interfaceDbBilling.py
+++ b/modules/interfaces/interfaceDbBilling.py
@@ -134,7 +134,7 @@ def getInterface(currentUser: User, mandateId: str = None) -> "BillingObjects":
return _billingInterfaces[cacheKey]
-def _getRootInterface() -> "BillingObjects":
+def getRootInterface() -> "BillingObjects":
"""Get interface with system access for bootstrap operations."""
from modules.security.rootAccess import getRootUser
rootUser = getRootUser()
@@ -888,7 +888,7 @@ class BillingObjects:
prev = self._parseSettingsDateTime(settings.get("storagePeriodStartAt"))
if prev is not None and abs((prev - periodStartAt).total_seconds()) < 2:
return
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
usedMB = float(_getSubRoot().getMandateDataVolumeMB(mandateId))
self.updateSettings(
@@ -911,13 +911,13 @@ class BillingObjects:
settings = self.getSettings(mandateId)
if not settings:
return None
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
- from modules.datamodels.datamodelSubscription import _getPlan
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
+ from modules.datamodels.datamodelSubscription import getPlan
subIface = _getSubRoot()
usedMB = float(subIface.getMandateDataVolumeMB(mandateId))
sub = subIface.getOperativeForMandate(mandateId)
- plan = _getPlan(sub.get("planKey", "")) if sub else None
+ plan = getPlan(sub.get("planKey", "")) if sub else None
includedMB = plan.maxDataVolumeMB if plan and plan.maxDataVolumeMB is not None else None
if includedMB is None:
return None
@@ -971,13 +971,13 @@ class BillingObjects:
Amount = budgetAiPerUserCHF * activeUsers (dynamic, not the static plan.budgetAiCHF).
Should be called once per billing period (initial activation + each invoice.paid).
Returns the created CREDIT transaction or None if budget is 0."""
- from modules.datamodels.datamodelSubscription import _getPlan
+ from modules.datamodels.datamodelSubscription import getPlan
- plan = _getPlan(planKey)
+ plan = getPlan(planKey)
if not plan or not plan.budgetAiPerUserCHF or plan.budgetAiPerUserCHF <= 0:
return None
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
subRoot = _getSubRoot()
activeUsers = max(subRoot.countActiveUsers(mandateId), 1)
amount = plan.budgetAiPerUserCHF * activeUsers
@@ -1027,13 +1027,13 @@ class BillingObjects:
delta > 0: user added -> CREDIT pro-rata portion
delta < 0: user removed -> DEBIT pro-rata portion
"""
- from modules.datamodels.datamodelSubscription import _getPlan
+ from modules.datamodels.datamodelSubscription import getPlan
- plan = _getPlan(planKey)
+ plan = getPlan(planKey)
if not plan or not plan.budgetAiPerUserCHF or plan.budgetAiPerUserCHF <= 0:
return None
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
subRoot = _getSubRoot()
operative = subRoot.getOperativeForMandate(mandateId)
if not operative:
@@ -1221,7 +1221,7 @@ class BillingObjects:
if not mandate or not getattr(mandate, "enabled", True):
continue
- mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "")
+ mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})"
settings = self.getSettings(mandateId)
if not settings:
@@ -1280,13 +1280,12 @@ class BillingObjects:
if not userAccount:
continue
- # Get transactions for user's account (all transactions are on user accounts now)
transactions = self.getTransactions(userAccount["id"], limit=limit)
mandate = appInterface.getMandate(mandateId)
- mandateName = ""
+ mandateName = f"NA({mandateId})"
if mandate:
- mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "")
+ mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})"
for t in transactions:
t["mandateId"] = mandateId
@@ -1333,9 +1332,9 @@ class BillingObjects:
continue
mandate = appInterface.getMandate(mandateId)
- mandateName = ""
+ mandateName = f"NA({mandateId})"
if mandate:
- mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "")
+ mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})"
allMandateAccounts = self.db.getRecordset(
BillingAccount,
@@ -1387,11 +1386,10 @@ class BillingObjects:
for mandateId in targetMandateIds:
transactions = self.getTransactionsByMandate(mandateId, limit=limit)
- # Get mandate name
mandate = appInterface.getMandate(mandateId)
- mandateName = ""
+ mandateName = f"NA({mandateId})"
if mandate:
- mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "")
+ mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})"
for t in transactions:
t["mandateId"] = mandateId
@@ -1439,7 +1437,6 @@ class BillingObjects:
for s in allSettings:
settingsMap[s.get("mandateId")] = s
- # Get user info efficiently
userIds = list(set(acc.get("userId") for acc in allAccounts if acc.get("userId")))
userMap = {}
for userId in userIds:
@@ -1447,16 +1444,15 @@ class BillingObjects:
if user:
displayName = getattr(user, 'displayName', None) or (user.get("displayName") if isinstance(user, dict) else None)
username = getattr(user, 'username', None) or (user.get("username") if isinstance(user, dict) else None)
- userMap[userId] = displayName or username or userId
+ userMap[userId] = displayName or username or f"NA({userId})"
- # Get mandate info efficiently
mandateMap = {}
mandateIdList = list(set(acc.get("mandateId") for acc in allAccounts if acc.get("mandateId")))
for mandateId in mandateIdList:
mandate = appInterface.getMandate(mandateId)
if mandate:
- mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "")
- mandateMap[mandateId] = mandateName
+ mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None)
+ mandateMap[mandateId] = mandateName or f"NA({mandateId})"
for account in allAccounts:
mandateId = account.get("mandateId")
@@ -1475,9 +1471,9 @@ class BillingObjects:
balances.append({
"accountId": account.get("id"),
"mandateId": mandateId,
- "mandateName": mandateMap.get(mandateId, ""),
+ "mandateName": mandateMap.get(mandateId) or (f"NA({mandateId})" if mandateId else None),
"userId": userId,
- "userName": userMap.get(userId, userId),
+ "userName": userMap.get(userId) or (f"NA({userId})" if userId else None),
"balance": balance,
"warningThreshold": warningThreshold,
"isWarning": balance <= warningThreshold,
@@ -1596,14 +1592,14 @@ class BillingObjects:
if pageUserIds:
users = appInterface.getUsersByIds(list(pageUserIds))
for uid, u in users.items():
- dn = getattr(u, "displayName", None) or getattr(u, "username", None) or uid
+ dn = getattr(u, "displayName", None) or getattr(u, "username", None) or f"NA({uid})"
userMap[uid] = dn
mandateMap: Dict[str, str] = {}
if pageMandateIds:
mandates = appInterface.getMandatesByIds(list(pageMandateIds))
for mid, m in mandates.items():
- mandateMap[mid] = getattr(m, "label", None) or getattr(m, "name", None) or mid
+ mandateMap[mid] = getattr(m, "label", None) or getattr(m, "name", None) or f"NA({mid})"
enriched = []
for t in pageItems:
@@ -1613,9 +1609,9 @@ class BillingObjects:
mid = acc.get("mandateId")
txUserId = row.get("createdByUserId") or acc.get("userId")
row["mandateId"] = mid
- row["mandateName"] = mandateMap.get(mid, "")
+ row["mandateName"] = mandateMap.get(mid) or (f"NA({mid})" if mid else None)
row["userId"] = txUserId
- row["userName"] = userMap.get(txUserId, txUserId) if txUserId else None
+ row["userName"] = userMap.get(txUserId) or (f"NA({txUserId})" if txUserId else None)
enriched.append(row)
return PaginatedResult(items=enriched, totalItems=totalItems, totalPages=totalPages)
@@ -1639,12 +1635,12 @@ class BillingObjects:
first, then builds a single SQL query with OR-combined conditions.
"""
import math
- from modules.connectors.connectorDbPostgre import _get_model_fields, _parseRecordFields
+ from modules.connectors.connectorDbPostgre import getModelFields, parseRecordFields
from modules.datamodels.datamodelUam import UserInDB
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
table = BillingTransaction.__name__
- fields = _get_model_fields(BillingTransaction)
+ fields = getModelFields(BillingTransaction)
pattern = f"%{searchTerm}%"
# Resolve matching user / mandate IDs via the app DB (which is separate
@@ -1785,7 +1781,7 @@ class BillingObjects:
records = [dict(row) for row in cur.fetchall()]
for rec in records:
- _parseRecordFields(rec, fields, f"search table {table}")
+ parseRecordFields(rec, fields, f"search table {table}")
totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0
return {"items": records, "totalItems": totalItems, "totalPages": totalPages}
@@ -2023,7 +2019,7 @@ class BillingObjects:
appInterface = getAppInterface(self.currentUser)
mandates = appInterface.getMandatesByIds(mandateIds)
return sorted(
- {getattr(m, "label", None) or getattr(m, "name", "") or mid for mid, m in mandates.items()},
+ {getattr(m, "label", None) or getattr(m, "name", None) or f"NA({mid})" for mid, m in mandates.items()},
key=lambda v: v.lower(),
)
@@ -2035,7 +2031,7 @@ class BillingObjects:
appInterface = getAppInterface(self.currentUser)
users = appInterface.getUsersByIds(values)
return sorted(
- {getattr(u, "displayName", None) or getattr(u, "username", None) or uid for uid, u in users.items()},
+ {getattr(u, "displayName", None) or getattr(u, "username", None) or f"NA({uid})" for uid, u in users.items()},
key=lambda v: v.lower(),
)
@@ -2075,7 +2071,6 @@ class BillingObjects:
"userId": acc.get("userId")
}
- # Get user info efficiently
userIds = list(set(acc.get("userId") for acc in allAccounts if acc.get("userId")))
userMap = {}
for userId in userIds:
@@ -2083,16 +2078,15 @@ class BillingObjects:
if user:
displayName = getattr(user, 'displayName', None) or (user.get("displayName") if isinstance(user, dict) else None)
username = getattr(user, 'username', None) or (user.get("username") if isinstance(user, dict) else None)
- userMap[userId] = displayName or username or userId
+ userMap[userId] = displayName or username or f"NA({userId})"
- # Get mandate info efficiently
mandateMap = {}
mandateIdList = list(set(acc.get("mandateId") for acc in allAccounts if acc.get("mandateId")))
for mandateId in mandateIdList:
mandate = appInterface.getMandate(mandateId)
if mandate:
- mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "")
- mandateMap[mandateId] = mandateName
+ mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None)
+ mandateMap[mandateId] = mandateName or f"NA({mandateId})"
# Get transactions for all accounts and collect createdByUserIds
rawTransactions = []
@@ -2123,18 +2117,16 @@ class BillingObjects:
if user:
displayName = getattr(user, 'displayName', None) or (user.get("displayName") if isinstance(user, dict) else None)
username = getattr(user, 'username', None) or (user.get("username") if isinstance(user, dict) else None)
- userMap[uid] = displayName or username or uid
+ userMap[uid] = displayName or username or f"NA({uid})"
- # Enrich transactions
for t in rawTransactions:
mandateId = t.pop("_accountMandateId", None)
accountUserId = t.pop("_accountUserId", None)
t["mandateId"] = mandateId
- t["mandateName"] = mandateMap.get(mandateId, "")
- # Prefer createdByUserId (per-transaction) over account-derived userId
+ t["mandateName"] = mandateMap.get(mandateId) or (f"NA({mandateId})" if mandateId else None)
txUserId = t.get("createdByUserId") or accountUserId
t["userId"] = txUserId
- t["userName"] = userMap.get(txUserId, txUserId) if txUserId else None
+ t["userName"] = userMap.get(txUserId) or (f"NA({txUserId})" if txUserId else None)
allTransactions.append(t)
except Exception as e:
diff --git a/modules/interfaces/interfaceDbChat.py b/modules/interfaces/interfaceDbChat.py
index be097263..1b7ec59a 100644
--- a/modules/interfaces/interfaceDbChat.py
+++ b/modules/interfaces/interfaceDbChat.py
@@ -62,13 +62,13 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI
try:
import os
from datetime import datetime, UTC
- from modules.shared.debugLogger import _getBaseDebugDir, _ensureDir
+ from modules.shared.debugLogger import getBaseDebugDir, ensureDir
from modules.interfaces.interfaceDbManagement import getInterface
# Create base debug directory (use base debug dir, not prompts subdirectory)
- baseDebugDir = _getBaseDebugDir()
+ baseDebugDir = getBaseDebugDir()
debug_root = os.path.join(baseDebugDir, 'messages')
- _ensureDir(debug_root)
+ ensureDir(debug_root)
# Generate timestamp
timestamp = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3]
@@ -133,7 +133,7 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI
safe_label = "default"
label_folder = os.path.join(message_path, safe_label)
- _ensureDir(label_folder)
+ ensureDir(label_folder)
# Store each document
for i, doc in enumerate(docs):
diff --git a/modules/interfaces/interfaceDbKnowledge.py b/modules/interfaces/interfaceDbKnowledge.py
index a12ac048..f819615e 100644
--- a/modules/interfaces/interfaceDbKnowledge.py
+++ b/modules/interfaces/interfaceDbKnowledge.py
@@ -11,7 +11,7 @@ from collections import defaultdict
from datetime import datetime, timezone, timedelta
from typing import Dict, Any, List, Optional
-from modules.connectors.connectorDbPostgre import _get_cached_connector
+from modules.connectors.connectorDbPostgre import getCachedConnector
from modules.shared.dbRegistry import registerDatabase
from modules.datamodels.datamodelKnowledge import FileContentIndex, ContentChunk, RoundMemory, WorkflowMemory
from modules.datamodels.datamodelUam import User
@@ -43,7 +43,7 @@ class KnowledgeObjects:
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
- self.db = _get_cached_connector(
+ self.db = getCachedConnector(
dbHost=dbHost,
dbDatabase=dbDatabase,
dbUser=dbUser,
@@ -103,9 +103,9 @@ class KnowledgeObjects:
ok = self.db.recordDelete(FileContentIndex, fileId)
if ok and mandateId:
try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface
+ from modules.interfaces.interfaceDbBilling import getRootInterface
- _getRootInterface().reconcileMandateStorageBilling(str(mandateId))
+ getRootInterface().reconcileMandateStorageBilling(str(mandateId))
except Exception as ex:
logger.warning("reconcileMandateStorageBilling after delete failed: %s", ex)
return ok
diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py
index cca98ffa..e6cee0b8 100644
--- a/modules/interfaces/interfaceDbManagement.py
+++ b/modules/interfaces/interfaceDbManagement.py
@@ -13,7 +13,7 @@ import math
import mimetypes
from typing import Dict, Any, List, Optional, Union
-from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached_connector
+from modules.connectors.connectorDbPostgre import DatabaseConnector, getCachedConnector
from modules.shared.dbRegistry import registerDatabase
from modules.interfaces.interfaceRbac import getRecordsetWithRBAC, getRecordsetPaginatedWithRBAC
from modules.security.rbac import RbacClass
@@ -136,7 +136,7 @@ class ComponentObjects:
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
- self.db = _get_cached_connector(
+ self.db = getCachedConnector(
dbHost=dbHost,
dbDatabase=dbDatabase,
dbUser=dbUser,
@@ -992,8 +992,11 @@ class ComponentObjects:
if file.get("neutralize") is None:
file["neutralize"] = False
+ labelCols = {k: v for k, v in file.items() if k.endswith("Label")}
fileItem = FileItem(**file)
- fileItems.append(fileItem)
+ itemDict = fileItem.model_dump()
+ itemDict.update(labelCols)
+ fileItems.append(itemDict)
except Exception as e:
logger.warning(f"Skipping invalid file record: {str(e)}")
continue
@@ -1347,8 +1350,8 @@ class ComponentObjects:
folderIds = [f["id"] for f in folders if f.get("id")]
fileCounts: Dict[str, int] = {}
try:
- from modules.interfaces.interfaceRbac import _buildFilesScopeWhereClause
- scopeClause = _buildFilesScopeWhereClause(
+ from modules.interfaces.interfaceRbac import buildFilesScopeWhereClause
+ scopeClause = buildFilesScopeWhereClause(
self.currentUser, "FileItem", self.db,
self.mandateId, self.featureInstanceId,
[], [],
diff --git a/modules/interfaces/interfaceDbSubscription.py b/modules/interfaces/interfaceDbSubscription.py
index 05d83a58..a09fe93f 100644
--- a/modules/interfaces/interfaceDbSubscription.py
+++ b/modules/interfaces/interfaceDbSubscription.py
@@ -25,7 +25,7 @@ from modules.datamodels.datamodelSubscription import (
TERMINAL_STATUSES,
OPERATIVE_STATUSES,
BUILTIN_PLANS,
- _getPlan,
+ getPlan as getPlanFromCatalog,
_getSelectablePlans,
)
@@ -55,7 +55,7 @@ def getInterface(currentUser: User, mandateId: str = None) -> "SubscriptionObjec
return _subscriptionInterfaces[cacheKey]
-def _getRootInterface() -> "SubscriptionObjects":
+def getRootInterface() -> "SubscriptionObjects":
from modules.security.rootAccess import getRootUser
return SubscriptionObjects(getRootUser(), mandateId=None)
@@ -96,7 +96,7 @@ class SubscriptionObjects:
# =========================================================================
def getPlan(self, planKey: str) -> Optional[SubscriptionPlan]:
- return _getPlan(planKey)
+ return getPlanFromCatalog(planKey)
def getSelectablePlans(self) -> List[SubscriptionPlan]:
return _getSelectablePlans()
diff --git a/modules/interfaces/interfaceRbac.py b/modules/interfaces/interfaceRbac.py
index 14953ef1..13bdfcba 100644
--- a/modules/interfaces/interfaceRbac.py
+++ b/modules/interfaces/interfaceRbac.py
@@ -247,8 +247,8 @@ def getRecordsetWithRBAC(
# Handle JSONB fields and ensure numeric types are correct
# Import the helper function from connector module
- from modules.connectors.connectorDbPostgre import _get_model_fields
- fields = _get_model_fields(modelClass)
+ from modules.connectors.connectorDbPostgre import getModelFields
+ fields = getModelFields(modelClass)
for record in records:
for fieldName, fieldType in fields.items():
# Ensure numeric fields are properly typed
@@ -379,8 +379,8 @@ def getRecordsetPaginatedWithRBAC(
whereValues.append(value)
if pagination and pagination.filters:
- from modules.connectors.connectorDbPostgre import _get_model_fields
- fields = _get_model_fields(modelClass)
+ from modules.connectors.connectorDbPostgre import getModelFields
+ fields = getModelFields(modelClass)
validColumns = set(fields.keys())
for key, val in pagination.filters.items():
if key == "search" and isinstance(val, str) and val.strip():
@@ -440,8 +440,8 @@ def getRecordsetPaginatedWithRBAC(
orderParts: List[str] = []
if pagination and pagination.sort:
- from modules.connectors.connectorDbPostgre import _get_model_fields
- validColumns = set(_get_model_fields(modelClass).keys())
+ from modules.connectors.connectorDbPostgre import getModelFields
+ validColumns = set(getModelFields(modelClass).keys())
for sf in pagination.sort:
if sf.field in validColumns:
direction = "DESC" if sf.direction.lower() == "desc" else "ASC"
@@ -464,10 +464,10 @@ def getRecordsetPaginatedWithRBAC(
cursor.execute(dataSql, whereValues)
records = [dict(row) for row in cursor.fetchall()]
- from modules.connectors.connectorDbPostgre import _get_model_fields, _parseRecordFields
- fields = _get_model_fields(modelClass)
+ from modules.connectors.connectorDbPostgre import getModelFields, parseRecordFields
+ fields = getModelFields(modelClass)
for record in records:
- _parseRecordFields(record, fields, f"table {table}")
+ parseRecordFields(record, fields, f"table {table}")
for fieldName, fieldType in fields.items():
if fieldType == "JSONB" and fieldName in record and record[fieldName] is None:
modelFields = modelClass.model_fields
@@ -484,12 +484,15 @@ def getRecordsetPaginatedWithRBAC(
if enrichPermissions:
records = _enrichRecordsWithPermissions(records, permissions, currentUser)
- if pagination:
- pageSize = pagination.pageSize
- totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0
- return PaginatedResult(items=records, totalItems=totalItems, totalPages=totalPages)
+ from modules.routes.routeHelpers import enrichRowsWithFkLabels
+ enrichRowsWithFkLabels(records, modelClass)
- return records
+ if pagination:
+ pageSize = pagination.pageSize
+ totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0
+ return PaginatedResult(items=records, totalItems=totalItems, totalPages=totalPages)
+
+ return records
except Exception as e:
logger.error(f"Error in getRecordsetPaginatedWithRBAC for table {table}: {e}")
return PaginatedResult(items=[], totalItems=0, totalPages=0) if pagination else []
@@ -518,8 +521,8 @@ def getDistinctColumnValuesWithRBAC(
if not connector._ensureTableExists(modelClass):
return []
- from modules.connectors.connectorDbPostgre import _get_model_fields
- fields = _get_model_fields(modelClass)
+ from modules.connectors.connectorDbPostgre import getModelFields
+ fields = getModelFields(modelClass)
if column not in fields:
return []
@@ -614,21 +617,34 @@ def getDistinctColumnValuesWithRBAC(
whereClause = " WHERE " + " AND ".join(whereConditions) if whereConditions else ""
notNullCond = f'"{column}" IS NOT NULL AND "{column}"::TEXT != \'\''
if whereClause:
- whereClause += f" AND {notNullCond}"
+ nonNullWhere = whereClause + f" AND {notNullCond}"
else:
- whereClause = f" WHERE {notNullCond}"
+ nonNullWhere = f" WHERE {notNullCond}"
- sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{whereClause} ORDER BY val'
+ sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{nonNullWhere} ORDER BY val'
with connector.connection.cursor() as cursor:
cursor.execute(sql, whereValues)
- return [row["val"] for row in cursor.fetchall()]
+ result = [row["val"] for row in cursor.fetchall()]
+
+ # Include a None entry when NULL/empty rows exist (enables "(Leer)" filter)
+ emptyCond = f'("{column}" IS NULL OR "{column}"::TEXT = \'\')'
+ if whereClause:
+ emptySql = f'SELECT 1 FROM "{table}"{whereClause} AND {emptyCond} LIMIT 1'
+ else:
+ emptySql = f'SELECT 1 FROM "{table}" WHERE {emptyCond} LIMIT 1'
+ with connector.connection.cursor() as cursor:
+ cursor.execute(emptySql, whereValues)
+ if cursor.fetchone():
+ result.append(None)
+
+ return result
except Exception as e:
logger.error(f"Error in getDistinctColumnValuesWithRBAC for {table}.{column}: {e}")
return []
-def _buildFilesScopeWhereClause(
+def buildFilesScopeWhereClause(
currentUser: User,
table: str,
connector,
@@ -673,7 +689,7 @@ def _buildFilesScopeWhereClause(
if instances:
effectiveMandateId = instances[0].get("mandateId") or ""
except Exception as e:
- logger.warning(f"_buildFilesScopeWhereClause: could not resolve mandate for instance {featureInstanceId}: {e}")
+ logger.warning(f"buildFilesScopeWhereClause: could not resolve mandate for instance {featureInstanceId}: {e}")
scopeParts: List[str] = []
scopeValues: List = []
@@ -757,7 +773,7 @@ def buildRbacWhereClause(
namespaceAll = TABLE_NAMESPACE.get(table, "system")
# Files: scope-based context filtering applies even with ALL access
if namespaceAll == "files":
- return _buildFilesScopeWhereClause(
+ return buildFilesScopeWhereClause(
currentUser, table, connector, mandateId, featureInstanceId,
baseConditions, baseValues,
)
@@ -811,7 +827,7 @@ def buildRbacWhereClause(
# - scope='featureInstance' → visible to users with access to that instance
# - scope='personal' → only visible to owner (sysCreatedBy)
if namespace == "files":
- return _buildFilesScopeWhereClause(
+ return buildFilesScopeWhereClause(
currentUser, table, connector, mandateId, featureInstanceId,
baseConditions, baseValues,
)
diff --git a/modules/routes/routeAdminDemoConfig.py b/modules/routes/routeAdminDemoConfig.py
index d893c205..db37e775 100644
--- a/modules/routes/routeAdminDemoConfig.py
+++ b/modules/routes/routeAdminDemoConfig.py
@@ -28,9 +28,9 @@ def listDemoConfigs(
currentUser: User = Depends(requirePlatformAdmin),
) -> dict:
"""List all available demo configurations."""
- from modules.demoConfigs import _getAvailableDemoConfigs
+ from modules.demoConfigs import getAvailableDemoConfigs
- configs = _getAvailableDemoConfigs()
+ configs = getAvailableDemoConfigs()
return {
"configs": [cfg.toDict() for cfg in configs.values()],
}
@@ -44,9 +44,9 @@ def loadDemoConfig(
currentUser: User = Depends(requirePlatformAdmin),
) -> dict:
"""Load (create) a demo configuration. Idempotent."""
- from modules.demoConfigs import _getDemoConfigByCode
+ from modules.demoConfigs import getDemoConfigByCode
- config = _getDemoConfigByCode(code)
+ config = getDemoConfigByCode(code)
if not config:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
@@ -69,9 +69,9 @@ def removeDemoConfig(
currentUser: User = Depends(requirePlatformAdmin),
) -> dict:
"""Remove all data created by a demo configuration."""
- from modules.demoConfigs import _getDemoConfigByCode
+ from modules.demoConfigs import getDemoConfigByCode
- config = _getDemoConfigByCode(code)
+ config = getDemoConfigByCode(code)
if not config:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
diff --git a/modules/routes/routeAdminFeatures.py b/modules/routes/routeAdminFeatures.py
index 66682464..9634dd0d 100644
--- a/modules/routes/routeAdminFeatures.py
+++ b/modules/routes/routeAdminFeatures.py
@@ -18,7 +18,7 @@ import json
import math
from pydantic import BaseModel, Field
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
-from modules.routes.routeHelpers import _applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory
+from modules.routes.routeHelpers import applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory
from modules.auth import limiter, getRequestContext, RequestContext, requirePlatformAdmin
from modules.datamodels.datamodelUam import User, UserInDB
@@ -481,7 +481,7 @@ def list_feature_instances(
return handleIdsInMemory(items, pagination)
if paginationParams:
- filtered = _applyFiltersAndSort(items, paginationParams)
+ filtered = applyFiltersAndSort(items, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
@@ -1019,7 +1019,7 @@ def list_template_roles(
if mode == "ids":
return handleIdsInMemory(enriched, pagination)
- filtered = _applyFiltersAndSort(enriched, paginationParams)
+ filtered = applyFiltersAndSort(enriched, paginationParams)
if paginationParams:
totalItems = len(filtered)
@@ -1223,7 +1223,7 @@ def list_feature_instance_users(
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
- filtered = _applyFiltersAndSort(items, paginationParams)
+ filtered = applyFiltersAndSort(items, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
diff --git a/modules/routes/routeAudit.py b/modules/routes/routeAudit.py
index 3634ff9d..0e686297 100644
--- a/modules/routes/routeAudit.py
+++ b/modules/routes/routeAudit.py
@@ -69,14 +69,24 @@ def _applySortFilterSearch(
return items
-def _distinctColumnValues(items: List[Dict[str, Any]], column: str) -> List[str]:
- """Extract sorted distinct non-empty string values for a column."""
+def _distinctColumnValues(items: List[Dict[str, Any]], column: str) -> List[Optional[str]]:
+ """Extract sorted distinct values for a column.
+
+ Includes ``None`` as the last entry when at least one row has a null/empty
+ value — this enables the "(Leer)" filter option in the frontend.
+ """
vals = set()
+ hasEmpty = False
for r in items:
v = r.get(column)
- if v is not None and v != "":
- vals.add(str(v))
- return sorted(vals)
+ if v is None or v == "":
+ hasEmpty = True
+ continue
+ vals.add(str(v))
+ result: List[Optional[str]] = sorted(vals)
+ if hasEmpty:
+ result.append(None)
+ return result
def _enrichUserAndInstanceLabels(
@@ -87,46 +97,32 @@ def _enrichUserAndInstanceLabels(
instanceKey: str = "featureInstanceId",
instanceLabelKey: str = "instanceLabel",
) -> None:
- """Resolve userId → username and featureInstanceId → label in-place."""
- userIds = set()
- instanceIds = set()
- for r in items:
- uid = r.get(userKey)
- if uid and not r.get(usernameKey):
- userIds.add(uid)
- iid = r.get(instanceKey)
- if iid:
- instanceIds.add(iid)
+ """Resolve userId -> username and featureInstanceId -> label in-place.
- userMap: Dict[str, str] = {}
- instanceMap: Dict[str, str] = {}
+ Uses the central resolvers from routeHelpers. Returns None (not the raw ID)
+ for unresolvable entries so the frontend can distinguish "resolved" from
+ "missing".
+ """
+ from modules.routes.routeHelpers import resolveUserLabels, resolveInstanceLabels
- try:
- from modules.interfaces.interfaceDbApp import getInterface
- appIf = getInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- )
- if userIds:
- users = appIf.getUsersByIds(list(userIds))
- for uid, u in users.items():
- name = getattr(u, "displayName", None) or getattr(u, "email", None) or uid
- userMap[uid] = name
- if instanceIds:
- for iid in instanceIds:
- fi = appIf.getFeatureInstance(iid)
- if fi:
- instanceMap[iid] = getattr(fi, "label", None) or getattr(fi, "featureCode", None) or iid
- except Exception as e:
- logger.debug("_enrichUserAndInstanceLabels: %s", e)
+ userIds = list({r.get(userKey) for r in items if r.get(userKey) and not r.get(usernameKey)})
+ instanceIds = list({r.get(instanceKey) for r in items if r.get(instanceKey)})
+
+ userMap: Dict[str, Optional[str]] = {}
+ instanceMap: Dict[str, Optional[str]] = {}
+
+ if userIds:
+ userMap = resolveUserLabels(userIds)
+ if instanceIds:
+ instanceMap = resolveInstanceLabels(instanceIds)
for r in items:
uid = r.get(userKey)
if uid and not r.get(usernameKey) and uid in userMap:
r[usernameKey] = userMap[uid]
iid = r.get(instanceKey)
- if iid and iid in instanceMap:
- r[instanceLabelKey] = instanceMap[iid]
+ if iid:
+ r[instanceLabelKey] = instanceMap.get(iid)
def _requireAuditAccess(context: RequestContext):
diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py
index 382b709a..e3d26352 100644
--- a/modules/routes/routeBilling.py
+++ b/modules/routes/routeBilling.py
@@ -20,7 +20,7 @@ from pydantic import BaseModel, Field
from modules.auth import limiter, requirePlatformAdmin, getRequestContext, RequestContext
# Import billing components
-from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface, _getRootInterface
+from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface, getRootInterface
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService
import json
import math
@@ -140,44 +140,46 @@ def _getBillingDataScope(user) -> BillingDataScope:
def _isAdminOfMandate(ctx: RequestContext, targetMandateId: str) -> bool:
- """Check if user is PlatformAdmin or admin of the specified mandate."""
+ """Check if user is PlatformAdmin or admin of the specified mandate.
+
+ Fail-loud: any DB/lookup error is logged at ERROR and re-raised. We never
+ silently return False — that would mask infrastructure outages as "no
+ permission" and produce confusing 403s instead of actionable 500s.
+ """
if ctx.isPlatformAdmin:
return True
- try:
- from modules.interfaces.interfaceDbApp import getRootInterface
- rootInterface = getRootInterface()
- userMandates = rootInterface.getUserMandates(str(ctx.user.id))
- for um in userMandates:
- if str(getattr(um, 'mandateId', None)) != str(targetMandateId):
- continue
- if not getattr(um, 'enabled', True):
- continue
- umId = str(getattr(um, 'id', ''))
- roleIds = rootInterface.getRoleIdsForUserMandate(umId)
- for roleId in roleIds:
- role = rootInterface.getRole(roleId)
- if role and role.roleLabel == "admin" and not role.featureInstanceId:
- return True
- return False
- except Exception:
- return False
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ rootInterface = getRootInterface()
+ userMandates = rootInterface.getUserMandates(str(ctx.user.id))
+ for um in userMandates:
+ if str(getattr(um, 'mandateId', None)) != str(targetMandateId):
+ continue
+ if not getattr(um, 'enabled', True):
+ continue
+ umId = str(getattr(um, 'id', ''))
+ roleIds = rootInterface.getRoleIdsForUserMandate(umId)
+ for roleId in roleIds:
+ role = rootInterface.getRole(roleId)
+ if role and role.roleLabel == "admin" and not role.featureInstanceId:
+ return True
+ return False
def _isMemberOfMandate(ctx: RequestContext, targetMandateId: str) -> bool:
- """Check if user has any enabled membership in the specified mandate."""
- try:
- from modules.interfaces.interfaceDbApp import getRootInterface
- rootInterface = getRootInterface()
- userMandates = rootInterface.getUserMandates(str(ctx.user.id))
- for um in userMandates:
- if str(getattr(um, 'mandateId', None)) != str(targetMandateId):
- continue
- if not getattr(um, 'enabled', True):
- continue
- return True
- return False
- except Exception:
- return False
+ """Check if user has any enabled membership in the specified mandate.
+
+ Fail-loud: see _isAdminOfMandate above for the same rationale.
+ """
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ rootInterface = getRootInterface()
+ userMandates = rootInterface.getUserMandates(str(ctx.user.id))
+ for um in userMandates:
+ if str(getattr(um, 'mandateId', None)) != str(targetMandateId):
+ continue
+ if not getattr(um, 'enabled', True):
+ continue
+ return True
+ return False
@@ -887,7 +889,7 @@ def confirmCheckoutSession(
if not _isAdminOfMandate(ctx, mandate_id):
raise HTTPException(status_code=403, detail=routeApiMsg("Mandate admin role required"))
- root_billing_interface = _getRootInterface()
+ root_billing_interface = getRootInterface()
return _creditStripeSessionIfNeeded(root_billing_interface, session_dict, eventId=None)
except HTTPException:
raise
@@ -957,10 +959,10 @@ async def stripeWebhook(
sessionMode = session.get("mode") if hasattr(session, "get") else getattr(session, "mode", None)
if sessionMode == "subscription":
- _handleSubscriptionCheckoutCompleted(session, event_id)
+ handleSubscriptionCheckoutCompleted(session, event_id)
return {"received": True}
- billingInterface = _getRootInterface()
+ billingInterface = getRootInterface()
if billingInterface.getStripeWebhookEventByEventId(event_id):
logger.info(f"Stripe event {event_id} already processed, skipping")
return {"received": True}
@@ -997,11 +999,11 @@ async def stripeWebhook(
return {"received": True}
-def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None:
+def handleSubscriptionCheckoutCompleted(session, eventId: str) -> None:
"""Handle checkout.session.completed for mode=subscription.
Resolves the local PENDING record by ID from webhook metadata and transitions it."""
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface
- from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, _getPlan
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface
+ from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, getPlan
from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import (
getService as getSubscriptionService,
_notifySubscriptionChange,
@@ -1033,8 +1035,16 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None:
mandateId = metadata.get("mandateId")
planKey = metadata.get("planKey", "")
platformUrl = platformUrl or metadata.get("platformUrl", "")
- except Exception:
- pass
+ except Exception as e:
+ # Stripe lookup is the only way to recover the metadata at this
+ # point — if it fails we MUST surface it, otherwise the webhook
+ # later short-circuits with "missing metadata" and the user
+ # silently gets stuck in PENDING.
+ logger.error(
+ "Stripe Subscription.retrieve(%s) failed during checkout "
+ "metadata recovery: %s", stripeSub, e,
+ )
+ raise
stripeSubId = session.get("subscription")
@@ -1083,7 +1093,17 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None:
elif priceMapping and priceId == priceMapping.stripePriceIdInstances:
stripeData["stripeItemIdInstances"] = item["id"]
except Exception as e:
- logger.error("Error retrieving Stripe subscription %s: %s", stripeSubId, e)
+ # Without these enrichment fields the activation completes anyway
+ # (status flips to ACTIVE/SCHEDULED below), but periods + Stripe
+ # item-IDs are missing on the local record, which breaks later
+ # add-on billing and renewal accounting. Re-raise so the webhook
+ # is retried by Stripe instead of silently shipping a broken row.
+ logger.error(
+ "Error retrieving Stripe subscription %s during checkout "
+ "completion (will be retried by Stripe): %s",
+ stripeSubId, e,
+ )
+ raise
if stripeData:
subInterface.updateFields(subscriptionRecordId, stripeData)
@@ -1136,12 +1156,12 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None:
subService.invalidateCache(mandateId)
if toStatus == SubscriptionStatusEnum.ACTIVE:
- plan = _getPlan(planKey)
+ plan = getPlan(planKey)
updatedSub = subInterface.getById(subscriptionRecordId)
_notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=updatedSub, platformUrl=platformUrl)
try:
- billingIf = _getRootInterface()
+ billingIf = getRootInterface()
billingIf.creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung")
except Exception as ex:
logger.error("creditSubscriptionBudget on activation failed: %s", ex)
@@ -1155,8 +1175,8 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None:
def _handleSubscriptionWebhook(event) -> None:
"""Process Stripe subscription webhook events.
All record resolution is by stripeSubscriptionId — no mandate-based guessing."""
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface
- from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, _getPlan
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface
+ from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, getPlan
from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import (
getService as getSubscriptionService,
_notifySubscriptionChange,
@@ -1205,11 +1225,11 @@ def _handleSubscriptionWebhook(event) -> None:
subInterface.transitionStatus(subId, SubscriptionStatusEnum.SCHEDULED, SubscriptionStatusEnum.ACTIVE)
subService.invalidateCache(mandateId)
planKey = sub.get("planKey", "")
- plan = _getPlan(planKey)
+ plan = getPlan(planKey)
refreshedSub = subInterface.getById(subId)
_notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=refreshedSub, platformUrl=webhookPlatformUrl)
try:
- _getRootInterface().creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung")
+ getRootInterface().creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung")
except Exception as ex:
logger.error("creditSubscriptionBudget SCHEDULED->ACTIVE failed: %s", ex)
logger.info("SCHEDULED -> ACTIVE for sub %s (mandate %s)", subId, mandateId)
@@ -1245,7 +1265,7 @@ def _handleSubscriptionWebhook(event) -> None:
scheduled["id"], SubscriptionStatusEnum.SCHEDULED, SubscriptionStatusEnum.ACTIVE,
)
subService.invalidateCache(mandateId)
- plan = _getPlan(scheduled.get("planKey", ""))
+ plan = getPlan(scheduled.get("planKey", ""))
refreshedScheduled = subInterface.getById(scheduled["id"])
_notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=refreshedScheduled, platformUrl=webhookPlatformUrl)
logger.info("Promoted SCHEDULED sub %s -> ACTIVE (mandate %s)", scheduled["id"], mandateId)
@@ -1256,7 +1276,7 @@ def _handleSubscriptionWebhook(event) -> None:
if currentStatus == SubscriptionStatusEnum.ACTIVE:
subInterface.transitionStatus(subId, SubscriptionStatusEnum.ACTIVE, SubscriptionStatusEnum.PAST_DUE)
subService.invalidateCache(mandateId)
- plan = _getPlan(sub.get("planKey", ""))
+ plan = getPlan(sub.get("planKey", ""))
_notifySubscriptionChange(mandateId, "payment_failed", plan, subscriptionRecord=sub, platformUrl=webhookPlatformUrl)
logger.info("Payment failed for sub %s (mandate %s)", subId, mandateId)
@@ -1283,7 +1303,7 @@ def _handleSubscriptionWebhook(event) -> None:
period_start_at = datetime.fromtimestamp(int(period_ts), tz=timezone.utc)
periodLabel = period_start_at.strftime("%Y-%m-%d")
try:
- billing_if = _getRootInterface()
+ billing_if = getRootInterface()
billing_if.resetStorageBillingPeriod(mandateId, period_start_at)
billing_if.reconcileMandateStorageBilling(mandateId)
except Exception as ex:
@@ -1291,7 +1311,7 @@ def _handleSubscriptionWebhook(event) -> None:
planKey = sub.get("planKey", "")
try:
- billing_if = _getRootInterface()
+ billing_if = getRootInterface()
billing_if.creditSubscriptionBudget(mandateId, planKey, periodLabel=periodLabel or "Periodenverlängerung")
except Exception as ex:
logger.error("creditSubscriptionBudget on invoice.paid failed: %s", ex)
@@ -1408,28 +1428,21 @@ def getUsersForMandate(
def _attachCreatedByUserNamesToTransactionRows(rows: List[Dict[str, Any]]) -> None:
- """Resolve createdByUserId to userName using root app interface (sysadmin transaction views)."""
- try:
- from modules.interfaces.interfaceDbApp import getRootInterface
+ """Resolve createdByUserId to userName using central FK resolvers.
- appRoot = getRootInterface()
- userNames: Dict[str, str] = {}
- for row in rows:
- uid = row.get("createdByUserId")
- if not uid:
- row["userName"] = ""
- continue
- if uid not in userNames:
- try:
- u = appRoot.getUser(uid)
- userNames[uid] = u.username if u else uid[:8]
- except Exception:
- userNames[uid] = uid[:8]
- row["userName"] = userNames.get(uid, "")
- except Exception:
- for row in rows:
- uid = row.get("createdByUserId")
- row["userName"] = uid[:8] if uid else ""
+ Returns None (not a truncated UUID) for unresolvable IDs so the frontend
+ renders an explicit NA() indicator instead of a misleading 8-char snippet.
+ """
+ from modules.routes.routeHelpers import resolveUserLabels
+
+ userIds = list({r.get("createdByUserId") for r in rows if r.get("createdByUserId")})
+ userMap: Dict[str, Optional[str]] = {}
+ if userIds:
+ userMap = resolveUserLabels(userIds)
+
+ for row in rows:
+ uid = row.get("createdByUserId")
+ row["userName"] = userMap.get(uid) if uid else None
def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]:
@@ -1717,18 +1730,13 @@ def getUserViewStatistics(
for acc in allAccounts:
accountToMandate[acc.get("id", "")] = acc.get("mandateId", "")
- from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
- mandateIdsForLookup = list(set(accountToMandate.values()))
- mandateMap: Dict[str, str] = {}
- if mandateIdsForLookup:
- rootIface = getAppInterface(ctx.user)
- mandatesById = rootIface.getMandatesByIds(mandateIdsForLookup)
- for mid, m in mandatesById.items():
- mandateMap[mid] = getattr(m, "name", mid) or mid
+ from modules.routes.routeHelpers import resolveMandateLabels
+ mandateIdsForLookup = list({v for v in accountToMandate.values() if v})
+ mandateMap: Dict[str, Optional[str]] = resolveMandateLabels(mandateIdsForLookup) if mandateIdsForLookup else {}
def _mandateName(accountId: str) -> str:
mid = accountToMandate.get(accountId, "")
- return mandateMap.get(mid, mid or "unknown")
+ return mandateMap.get(mid) or f"NA({mid})" if mid else "unknown"
costByMandate: Dict[str, float] = {}
for accId, total in agg.get("costByAccountId", {}).items():
diff --git a/modules/routes/routeDataConnections.py b/modules/routes/routeDataConnections.py
index 290be722..05c8aa9d 100644
--- a/modules/routes/routeDataConnections.py
+++ b/modules/routes/routeDataConnections.py
@@ -127,7 +127,7 @@ def get_auth_authority_options(
# CRUD ENDPOINTS
# ============================================================================
-@router.get("/", response_model=PaginatedResponse[UserConnection])
+@router.get("/")
@limiter.limit("30/minute")
async def get_connections(
request: Request,
@@ -135,7 +135,7 @@ async def get_connections(
mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
currentUser: User = Depends(getCurrentUser)
-) -> PaginatedResponse[UserConnection]:
+):
"""Get connections for the current user with optional pagination, sorting, and filtering.
SECURITY: This endpoint is secure - users can only see their own connections.
@@ -151,7 +151,7 @@ async def get_connections(
- GET /api/connections/?mode=filterValues&column=status
- GET /api/connections/?mode=ids
"""
- from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory
+ from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels
def _buildEnhancedItems():
interface = getInterface(currentUser)
@@ -252,27 +252,13 @@ async def get_connections(
}
enhanced_connections_dict.append(connection_dict)
- # If no pagination requested, return all items
+ enrichRowsWithFkLabels(enhanced_connections_dict, UserConnection)
+
if paginationParams is None:
- # Convert back to UserConnection objects (enum strings are already in dict)
- items = []
- for conn_dict in enhanced_connections_dict:
- conn_dict_copy = dict(conn_dict)
- if "authority" in conn_dict_copy and isinstance(conn_dict_copy["authority"], str):
- try:
- conn_dict_copy["authority"] = AuthAuthority(conn_dict_copy["authority"])
- except ValueError:
- pass
- if "status" in conn_dict_copy and isinstance(conn_dict_copy["status"], str):
- try:
- conn_dict_copy["status"] = ConnectionStatus(conn_dict_copy["status"])
- except ValueError:
- pass
- items.append(UserConnection(**conn_dict_copy))
- return PaginatedResponse(
- items=items,
- pagination=None
- )
+ return {
+ "items": enhanced_connections_dict,
+ "pagination": None,
+ }
# Apply filtering if provided
if paginationParams.filters:
@@ -292,43 +278,24 @@ async def get_connections(
paginationParams.sort
)
- # Count total items after filters
totalItems = len(enhanced_connections_dict)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
- # Apply pagination (skip/limit)
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
paged_connections = enhanced_connections_dict[startIdx:endIdx]
- # Convert back to UserConnection objects (convert enum strings back to enums)
- items = []
- for conn_dict in paged_connections:
- # Convert enum strings back to enum objects
- conn_dict_copy = dict(conn_dict)
- if "authority" in conn_dict_copy and isinstance(conn_dict_copy["authority"], str):
- try:
- conn_dict_copy["authority"] = AuthAuthority(conn_dict_copy["authority"])
- except ValueError:
- pass # Keep as string if invalid
- if "status" in conn_dict_copy and isinstance(conn_dict_copy["status"], str):
- try:
- conn_dict_copy["status"] = ConnectionStatus(conn_dict_copy["status"])
- except ValueError:
- pass # Keep as string if invalid
- items.append(UserConnection(**conn_dict_copy))
-
- return PaginatedResponse(
- items=items,
- pagination=PaginationMetadata(
+ return {
+ "items": paged_connections,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
- )
- )
+ ).model_dump(),
+ }
except HTTPException:
raise
diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py
index 82cf1624..11b90f09 100644
--- a/modules/routes/routeDataFiles.py
+++ b/modules/routes/routeDataFiles.py
@@ -17,6 +17,7 @@ from modules.shared.attributeUtils import getModelAttributeDefinitions
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
from modules.shared.i18nRegistry import apiRouteContext
+from modules.routes.routeHelpers import enrichRowsWithFkLabels
routeApiMsg = apiRouteContext("routeDataFiles")
# Configure logger
@@ -220,7 +221,7 @@ router = APIRouter(
}
)
-@router.get("/list", response_model=PaginatedResponse[FileItem])
+@router.get("/list")
@limiter.limit("120/minute")
def get_files(
request: Request,
@@ -229,7 +230,7 @@ def get_files(
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[FileItem]:
+):
"""
Get files with optional pagination, sorting, and filtering.
@@ -303,24 +304,27 @@ def get_files(
recordFilter = {"folderId": fVal}
result = managementInterface.getAllFiles(pagination=paginationParams, recordFilter=recordFilter)
-
+
+ def _filesToDicts(items):
+ return [f.model_dump() if hasattr(f, "model_dump") else (dict(f) if not isinstance(f, dict) else f) for f in items]
+
if paginationParams:
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_filesToDicts(result.items), FileItem)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
- )
- )
+ ).model_dump(),
+ }
else:
- return PaginatedResponse(
- items=result,
- pagination=None
- )
+ items = result if isinstance(result, list) else (result.items if hasattr(result, "items") else [result])
+ enriched = enrichRowsWithFkLabels(_filesToDicts(items), FileItem)
+ return {"items": enriched, "pagination": None}
except HTTPException:
raise
except Exception as e:
@@ -1019,14 +1023,14 @@ def updateFileNeutralize(
# ── File endpoints with path parameters (catch-all /{fileId}) ─────────────────
-@router.get("/{fileId}", response_model=FileItem)
+@router.get("/{fileId}")
@limiter.limit("30/minute")
def get_file(
request: Request,
fileId: str = Path(..., description="ID of the file"),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext)
-) -> FileItem:
+):
"""Get a file. Resolves the file's mandate/instance scope automatically."""
try:
_mgmt, fileData = _resolveFileWithScope(currentUser, context, fileId)
@@ -1036,7 +1040,9 @@ def get_file(
detail=f"File with ID {fileId} not found"
)
- return fileData
+ fileDict = fileData.model_dump() if hasattr(fileData, "model_dump") else dict(fileData)
+ enriched = enrichRowsWithFkLabels([fileDict], FileItem)
+ return enriched[0]
except interfaceDbManagement.FileNotFoundError as e:
logger.warning(f"File not found: {str(e)}")
diff --git a/modules/routes/routeDataMandates.py b/modules/routes/routeDataMandates.py
index 2bed0169..7972181d 100644
--- a/modules/routes/routeDataMandates.py
+++ b/modules/routes/routeDataMandates.py
@@ -22,7 +22,7 @@ from modules.auth import limiter, requirePlatformAdmin, getRequestContext, getCu
# Import interfaces
import modules.interfaces.interfaceDbApp as interfaceDbApp
-from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRootInterface
+from modules.interfaces.interfaceDbBilling import getRootInterface as _getBillingRootInterface
from modules.shared.attributeUtils import getModelAttributeDefinitions
from modules.shared.auditLogger import audit_logger
@@ -318,7 +318,7 @@ def create_mandate(
from modules.datamodels.datamodelSubscription import (
MandateSubscription, SubscriptionStatusEnum, BUILTIN_PLANS,
)
- from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot
from datetime import datetime, timezone, timedelta
planKey = mandateData.get("planKey", "TRIAL_14D")
@@ -660,7 +660,7 @@ def list_mandate_users(
from modules.routes.routeHelpers import (
handleFilterValuesInMemory, handleIdsInMemory,
- _applyFiltersAndSort as _sharedApplyFiltersAndSort,
+ applyFiltersAndSort as _sharedApplyFiltersAndSort,
paginateInMemory,
)
@@ -674,13 +674,23 @@ def list_mandate_users(
if paginationParams:
paginationParamsObj = None
- try:
- paginationDict = json.loads(pagination) if pagination else None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ except json.JSONDecodeError as e:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Invalid 'pagination' query: not valid JSON ({e.msg})",
+ )
if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParamsObj = PaginationParams(**paginationDict)
- except Exception:
- pass
+ try:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParamsObj = PaginationParams(**paginationDict)
+ except Exception as e:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Invalid 'pagination' payload: {e}",
+ )
filtered = _sharedApplyFiltersAndSort(result, paginationParamsObj)
totalItems = len(filtered)
diff --git a/modules/routes/routeDataPrompts.py b/modules/routes/routeDataPrompts.py
index 79dc8d72..ee99b912 100644
--- a/modules/routes/routeDataPrompts.py
+++ b/modules/routes/routeDataPrompts.py
@@ -44,20 +44,25 @@ def get_prompts(
- filterValues: distinct values for a column (cross-filtered)
- ids: all IDs matching current filters
"""
- from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory
+ from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels
+
+ def _promptsToEnrichedDicts(promptItems):
+ dicts = [r.model_dump() if hasattr(r, 'model_dump') else (dict(r) if not isinstance(r, dict) else r) for r in promptItems]
+ enrichRowsWithFkLabels(dicts, Prompt)
+ return dicts
if mode == "filterValues":
if not column:
raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues")
managementInterface = interfaceDbManagement.getInterface(currentUser)
result = managementInterface.getAllPrompts(pagination=None)
- items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in result]
+ items = _promptsToEnrichedDicts(result)
return handleFilterValuesInMemory(items, column, pagination)
if mode == "ids":
managementInterface = interfaceDbManagement.getInterface(currentUser)
result = managementInterface.getAllPrompts(pagination=None)
- items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in result]
+ items = _promptsToEnrichedDicts(result)
return handleIdsInMemory(items, pagination)
paginationParams = None
@@ -74,22 +79,24 @@ def get_prompts(
result = managementInterface.getAllPrompts(pagination=paginationParams)
if paginationParams:
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ items = _promptsToEnrichedDicts(result.items)
+ return {
+ "items": items,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
- )
- )
+ ).model_dump(),
+ }
else:
- return PaginatedResponse(
- items=result,
- pagination=None
- )
+ items = _promptsToEnrichedDicts(result)
+ return {
+ "items": items,
+ "pagination": None,
+ }
@router.post("", response_model=Prompt)
diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py
index ea796aab..67156291 100644
--- a/modules/routes/routeDataUsers.py
+++ b/modules/routes/routeDataUsers.py
@@ -25,12 +25,17 @@ from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
from modules.shared.i18nRegistry import apiRouteContext
+from modules.routes.routeHelpers import enrichRowsWithFkLabels
routeApiMsg = apiRouteContext("routeDataUsers")
# Configure logger
logger = logging.getLogger(__name__)
+def _usersToDicts(items) -> list:
+ return [u.model_dump() if hasattr(u, "model_dump") else (dict(u) if not isinstance(u, dict) else u) for u in items]
+
+
def _isAdminForUser(context: RequestContext, targetUserId: str) -> bool:
"""
Check if the current user has admin rights for the target user.
@@ -187,7 +192,7 @@ def get_user_options(
# CRUD ENDPOINTS
# ============================================================================
-@router.get("/", response_model=PaginatedResponse[User])
+@router.get("/")
@limiter.limit("30/minute")
def get_users(
request: Request,
@@ -195,7 +200,7 @@ def get_users(
mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[User]:
+):
"""
Get users with optional pagination, sorting, and filtering.
MULTI-TENANT: mandateId from X-Mandate-Id header determines scope.
@@ -236,48 +241,44 @@ def get_users(
# Get users for specific mandate using getUsersByMandate
result = appInterface.getUsersByMandate(str(context.mandateId), paginationParams)
- # getUsersByMandate returns PaginatedResult if pagination was provided
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_usersToDicts(result.items), User)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=result.currentPage,
pageSize=result.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
- )
- )
+ ).model_dump(),
+ }
else:
- # No pagination - result is a list
users = result if isinstance(result, list) else result.items if hasattr(result, 'items') else []
- return PaginatedResponse(
- items=users,
- pagination=None
- )
+ enriched = enrichRowsWithFkLabels(_usersToDicts(users), User)
+ return {"items": enriched, "pagination": None}
elif context.isPlatformAdmin:
# PlatformAdmin without mandateId — DB-level pagination via interface
result = appInterface.getAllUsers(paginationParams)
if paginationParams and hasattr(result, 'items'):
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
+ enriched = enrichRowsWithFkLabels(_usersToDicts(result.items), User)
+ return {
+ "items": enriched,
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
- )
- )
+ ).model_dump(),
+ }
else:
users = result if isinstance(result, list) else (result.items if hasattr(result, 'items') else [])
- return PaginatedResponse(
- items=users,
- pagination=None
- )
+ enriched = enrichRowsWithFkLabels(_usersToDicts(users), User)
+ return {"items": enriched, "pagination": None}
else:
# Non-SysAdmin without mandateId: aggregate users across all admin mandates
rootInterface = getRootInterface()
@@ -316,34 +317,30 @@ def get_users(
for u in batchUsers.values()
]
- from modules.routes.routeHelpers import _applyFiltersAndSort as _applyFiltersAndSortHelper
+ from modules.routes.routeHelpers import applyFiltersAndSort as _applyFiltersAndSortHelper
filteredUsers = _applyFiltersAndSortHelper(allUsers, paginationParams)
- users = [User(**u) for u in filteredUsers]
+ enriched = enrichRowsWithFkLabels(filteredUsers, User)
if paginationParams:
import math
- totalItems = len(users)
+ totalItems = len(enriched)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
- paginatedUsers = users[startIdx:endIdx]
- return PaginatedResponse(
- items=paginatedUsers,
- pagination=PaginationMetadata(
+ return {
+ "items": enriched[startIdx:endIdx],
+ "pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
- )
- )
+ ).model_dump(),
+ }
else:
- return PaginatedResponse(
- items=users,
- pagination=None
- )
+ return {"items": enriched, "pagination": None}
except HTTPException:
raise
except Exception as e:
@@ -753,10 +750,10 @@ def send_password_link(
expiryHours = int(APP_CONFIG.get("Auth_RESET_TOKEN_EXPIRY_HOURS", "24"))
try:
- from modules.routes.routeSecurityLocal import _buildAuthEmailHtml, _sendAuthEmail
+ from modules.routes.routeSecurityLocal import buildAuthEmailHtml, sendAuthEmail
emailSubject = "PowerOn - Passwort setzen"
- emailHtml = _buildAuthEmailHtml(
+ emailHtml = buildAuthEmailHtml(
greeting=f"Hallo {targetUser.fullName or targetUser.username}",
bodyLines=[
"Ein Administrator hat einen Link zum Setzen Ihres Passworts angefordert.",
@@ -770,7 +767,7 @@ def send_password_link(
footerText=f"Dieser Link ist {expiryHours} Stunden gültig. Falls Sie diese Anforderung nicht erwartet haben, kontaktieren Sie bitte Ihren Administrator.",
)
- emailSent = _sendAuthEmail(
+ emailSent = sendAuthEmail(
recipient=targetUser.email,
subject=emailSubject,
message="",
diff --git a/modules/routes/routeHelpers.py b/modules/routes/routeHelpers.py
index de2f863b..19bfdb8e 100644
--- a/modules/routes/routeHelpers.py
+++ b/modules/routes/routeHelpers.py
@@ -12,7 +12,7 @@ Provides unified logic for:
import copy
import json
import logging
-from typing import Any, Dict, List, Optional, Callable
+from typing import Any, Dict, List, Optional, Callable, Union
from fastapi.responses import JSONResponse
@@ -29,64 +29,183 @@ logger = logging.getLogger(__name__)
# Central FK label resolvers (cross-DB)
# ---------------------------------------------------------------------------
-def _resolveMandateLabels(ids: List[str]) -> Dict[str, str]:
+def resolveMandateLabels(ids: List[str]) -> Dict[str, Optional[str]]:
+ """Resolve mandate IDs to labels. Returns None (not the ID!) for
+ unresolvable entries so the caller can distinguish "resolved" from "missing".
+ """
from modules.interfaces.interfaceDbApp import getRootInterface
rootIface = getRootInterface()
mMap = rootIface.getMandatesByIds(ids)
- return {
- mid: getattr(m, "label", None) or getattr(m, "name", mid) or mid
- for mid, m in mMap.items()
- }
+ result: Dict[str, Optional[str]] = {}
+ for mid in ids:
+ m = mMap.get(mid)
+ label = (getattr(m, "label", None) or getattr(m, "name", None)) if m else None
+ if not label:
+ logger.warning("resolveMandateLabels: no label for id=%s (found=%s)", mid, m is not None)
+ result[mid] = label or None
+ return result
-def _resolveInstanceLabels(ids: List[str]) -> Dict[str, str]:
+def resolveInstanceLabels(ids: List[str]) -> Dict[str, Optional[str]]:
+ """Resolve feature-instance IDs to labels. Returns None for unresolvable."""
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.interfaces.interfaceFeatures import getFeatureInterface
rootIface = getRootInterface()
featureIface = getFeatureInterface(rootIface.db)
- result: Dict[str, str] = {}
+ result: Dict[str, Optional[str]] = {}
for iid in ids:
fi = featureIface.getFeatureInstance(iid)
- result[iid] = fi.label if fi and fi.label else iid
+ label = fi.label if fi and fi.label else None
+ if not label:
+ logger.warning("resolveInstanceLabels: no label for id=%s (found=%s)", iid, fi is not None)
+ result[iid] = label
return result
-def _resolveUserLabels(ids: List[str]) -> Dict[str, str]:
+def resolveUserLabels(ids: List[str]) -> Dict[str, Optional[str]]:
+ """Resolve user IDs to display names. Returns None for unresolvable."""
from modules.interfaces.interfaceDbApp import getRootInterface
rootIface = getRootInterface()
+ from modules.datamodels.datamodelUam import User as _User
+ uniqueIds = list(set(ids))
users = rootIface.db.getRecordset(
- __import__("modules.datamodels.datamodelUam", fromlist=["User"]).User,
- recordFilter={"id": list(set(ids))},
+ _User,
+ recordFilter={"id": uniqueIds},
)
- result: Dict[str, str] = {}
+ if not users and uniqueIds:
+ logger.warning(
+ "resolveUserLabels: query returned 0 users for %d ids (db=%s, table=%s). "
+ "Attempting full table scan...",
+ len(uniqueIds), getattr(rootIface.db, 'dbDatabase', '?'), _User.__name__,
+ )
+ allUsers = rootIface.db.getRecordset(_User)
+ logger.warning(
+ "resolveUserLabels: full scan found %d users total. Looking for ids: %s",
+ len(allUsers or []), uniqueIds[:3],
+ )
+ users = [u for u in (allUsers or []) if u.get("id") in set(uniqueIds)]
+ result: Dict[str, Optional[str]] = {}
+ found: Dict[str, dict] = {}
for u in (users or []):
uid = u.get("id", "")
- result[uid] = u.get("username") or u.get("email") or uid
+ found[uid] = u
+ for uid in ids:
+ u = found.get(uid)
+ if u:
+ result[uid] = u.get("username") or u.get("email") or None
+ else:
+ logger.warning("resolveUserLabels: user not found for id=%s", uid)
+ result[uid] = None
return result
+def resolveRoleLabels(ids: List[str]) -> Dict[str, Optional[str]]:
+ """Resolve Role.id to roleLabel. Returns None for unresolvable."""
+ if not ids:
+ return {}
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ from modules.datamodels.datamodelRbac import Role as _Role
+ rootIface = getRootInterface()
+ recs = rootIface.db.getRecordset(
+ _Role,
+ recordFilter={"id": list(set(ids))},
+ ) or []
+ out: Dict[str, Optional[str]] = {i: None for i in ids}
+ for r in recs:
+ rid = r.get("id")
+ if rid:
+ out[rid] = r.get("roleLabel") or None
+ for rid in ids:
+ if out.get(rid) is None:
+ logger.warning("resolveRoleLabels: no label for id=%s", rid)
+ return out
+
+
_BUILTIN_FK_RESOLVERS: Dict[str, Callable[[List[str]], Dict[str, str]]] = {
- "Mandate": _resolveMandateLabels,
- "FeatureInstance": _resolveInstanceLabels,
- "User": _resolveUserLabels,
+ "Mandate": resolveMandateLabels,
+ "FeatureInstance": resolveInstanceLabels,
+ "User": resolveUserLabels,
+ "Role": resolveRoleLabels,
}
def _buildLabelResolversFromModel(modelClass: type) -> Dict[str, Callable[[List[str]], Dict[str, str]]]:
"""
- Auto-build labelResolvers dict from fk_model annotations on a Pydantic model.
- Maps field names to resolver functions for all fields that have a known fk_model.
+ Auto-build labelResolvers dict from fk_model / fk_target annotations on a Pydantic model.
+ Maps field names to resolver functions for all fields that have a known FK target.
+ Unlike ``_get_fk_sort_meta`` this does NOT require ``fk_label_field`` — the
+ builtin resolvers already know which column to read.
"""
- from modules.connectors.connectorDbPostgre import _get_fk_sort_meta
- fkMeta = _get_fk_sort_meta(modelClass)
resolvers: Dict[str, Callable[[List[str]], Dict[str, str]]] = {}
- for fieldName, meta in fkMeta.items():
- fkModelName = meta.get("model", "")
- if fkModelName in _BUILTIN_FK_RESOLVERS:
- resolvers[fieldName] = _BUILTIN_FK_RESOLVERS[fkModelName]
+ for name, fieldInfo in modelClass.model_fields.items():
+ extra = fieldInfo.json_schema_extra
+ if not extra or not isinstance(extra, dict):
+ continue
+ fkModel = extra.get("fk_model")
+ tgt = extra.get("fk_target")
+ if not fkModel and isinstance(tgt, dict):
+ fkModel = tgt.get("table")
+ if fkModel and fkModel in _BUILTIN_FK_RESOLVERS:
+ resolvers[name] = _BUILTIN_FK_RESOLVERS[fkModel]
return resolvers
+def enrichRowsWithFkLabels(
+ rows: List[Dict[str, Any]],
+ modelClass: type = None,
+ *,
+ labelResolvers: Optional[Dict[str, Callable[[List[str]], Dict[str, Optional[str]]]]] = None,
+ extraResolvers: Optional[Dict[str, Callable[[List[str]], Dict[str, Optional[str]]]]] = None,
+) -> List[Dict[str, Any]]:
+ """Add ``{field}Label`` columns to each row for every FK field that has a
+ registered resolver.
+
+ ``modelClass`` — if provided, resolvers are auto-built from ``fk_model``
+ annotations on the Pydantic model (via ``_buildLabelResolversFromModel``).
+
+ ``labelResolvers`` — explicit resolver map that overrides auto-built ones.
+
+ ``extraResolvers`` — merged on top of auto-built / explicit resolvers. Use
+ for ad-hoc fields that are not FK-annotated on the model (e.g.
+ ``createdByUserId`` on billing transactions).
+
+ If a label cannot be resolved the ``{field}Label`` value is ``None``
+ (never the raw ID — that would reintroduce the silent-truncation bug).
+ """
+ resolvers: Dict[str, Callable] = {}
+
+ if modelClass is not None and labelResolvers is None:
+ resolvers = _buildLabelResolversFromModel(modelClass)
+ elif labelResolvers is not None:
+ resolvers = dict(labelResolvers)
+
+ if extraResolvers:
+ resolvers.update(extraResolvers)
+
+ if not resolvers or not rows:
+ return rows
+
+ for field, resolver in resolvers.items():
+ ids = list({str(r.get(field)) for r in rows if r.get(field)})
+ if not ids:
+ continue
+ try:
+ labelMap = resolver(ids)
+ except Exception as e:
+ logger.error("enrichRowsWithFkLabels: resolver for '%s' raised: %s", field, e)
+ labelMap = {}
+
+ labelKey = f"{field}Label"
+ for r in rows:
+ fkVal = r.get(field)
+ if fkVal:
+ r[labelKey] = labelMap.get(str(fkVal))
+ else:
+ r[labelKey] = None
+
+ return rows
+
+
# ---------------------------------------------------------------------------
# Cross-filter pagination parsing
# ---------------------------------------------------------------------------
@@ -210,7 +329,7 @@ def handleIdsMode(
# In-memory helpers (for enriched / non-SQL routes)
# ---------------------------------------------------------------------------
-def _applyFiltersAndSort(
+def applyFiltersAndSort(
items: List[Dict[str, Any]],
paginationParams: Optional[PaginationParams],
) -> List[Dict[str, Any]]:
@@ -364,12 +483,21 @@ def _extractDistinctValues(
items: List[Dict[str, Any]],
columnKey: str,
requestLang: Optional[str] = None,
-) -> List[str]:
- """Extract sorted distinct display values for a column from enriched items."""
+) -> List[Optional[str]]:
+ """Extract sorted distinct display values for a column from enriched items.
+
+ Includes ``None`` as the last entry when at least one row has a null/empty
+ value — this enables the "(Leer)" filter option in the frontend.
+ """
+ _MISSING = object()
values = set()
+ hasEmpty = False
for item in items:
- val = item.get(columnKey)
+ val = item.get(columnKey, _MISSING)
+ if val is _MISSING:
+ continue
if val is None or val == "":
+ hasEmpty = True
continue
if isinstance(val, bool):
values.add("true" if val else "false")
@@ -381,7 +509,10 @@ def _extractDistinctValues(
values.add(text)
else:
values.add(str(val))
- return sorted(values, key=lambda v: v.lower())
+ result: List[Optional[str]] = sorted(values, key=lambda v: v.lower())
+ if hasEmpty:
+ result.append(None)
+ return result
def handleFilterValuesInMemory(
@@ -396,7 +527,7 @@ def handleFilterValuesInMemory(
Returns JSONResponse to bypass FastAPI response_model validation.
"""
crossFilterParams = parseCrossFilterPagination(column, paginationJson)
- crossFiltered = _applyFiltersAndSort(items, crossFilterParams)
+ crossFiltered = applyFiltersAndSort(items, crossFilterParams)
return JSONResponse(content=_extractDistinctValues(crossFiltered, column, requestLang))
@@ -411,7 +542,7 @@ def handleIdsInMemory(
Returns JSONResponse to bypass FastAPI response_model validation.
"""
pagination = parsePaginationForIds(paginationJson)
- filtered = _applyFiltersAndSort(items, pagination)
+ filtered = applyFiltersAndSort(items, pagination)
ids = []
for item in filtered:
val = item.get(idField)
@@ -510,6 +641,7 @@ def getRecordsetPaginatedWithFkSort(
idOrder = {pid: idx for idx, pid in enumerate(pageIds)}
pageItems.sort(key=lambda r: idOrder.get(r.get(idField), 999999))
+ enrichRowsWithFkLabels(pageItems, modelClass)
totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0
return {"items": pageItems, "totalItems": totalItems, "totalPages": totalPages}
diff --git a/modules/routes/routeI18n.py b/modules/routes/routeI18n.py
index cadf128e..927d1bf2 100644
--- a/modules/routes/routeI18n.py
+++ b/modules/routes/routeI18n.py
@@ -26,7 +26,7 @@ from fastapi.responses import Response
from pydantic import BaseModel, Field
from modules.auth import getCurrentUser, requireSysAdmin, requirePlatformAdmin
-from modules.connectors.connectorDbPostgre import _get_cached_connector
+from modules.connectors.connectorDbPostgre import getCachedConnector
from modules.datamodels.datamodelAi import (
AiCallOptions,
AiCallRequest,
@@ -40,11 +40,11 @@ from modules.datamodels.datamodelRbac import Role
from modules.datamodels.datamodelFeatures import Feature
from modules.datamodels.datamodelNotification import NotificationType
from modules.interfaces.interfaceDbManagement import getInterface as getMgmtInterface
-from modules.routes.routeNotifications import _createNotification
+from modules.routes.routeNotifications import createNotification
from modules.shared.configuration import APP_CONFIG
from modules.shared.i18nRegistry import (
_enforceSourcePlaceholders,
- _loadCache as _reloadI18nCache,
+ loadCache as _reloadI18nCache,
apiRouteContext,
)
from modules.shared.timeUtils import getUtcTimestamp
@@ -109,7 +109,7 @@ _ISO_PRIORITY_CODES: List[str] = ["de", "gsw", "en", "fr", "it"]
# ---------------------------------------------------------------------------
def _publicMgmtDb():
- return _get_cached_connector(
+ return getCachedConnector(
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
dbDatabase="poweron_management",
dbUser=APP_CONFIG.get("DB_USER"),
@@ -729,7 +729,7 @@ async def _run_create_language_job_async(userId: str, code: str, label: str, cur
tmCount = await _translateTextMultilingualFields(db, code, label, billingCb)
- _createNotification(
+ createNotification(
userId,
NotificationType.SYSTEM,
title="Sprachset erstellt",
@@ -739,7 +739,7 @@ async def _run_create_language_job_async(userId: str, code: str, label: str, cur
logger.info("i18n create job done: code=%s, translated=%d/%d, tm_fields=%d", code, len(translated), len(xxEntries), tmCount)
except Exception as e:
logger.exception("create language job failed: %s", e)
- _createNotification(
+ createNotification(
userId,
NotificationType.SYSTEM,
title="Sprachset fehlgeschlagen",
@@ -790,7 +790,7 @@ async def create_language_set(
db.recordCreate(UiLanguageSet, rec)
background.add_task(_run_create_language_job, uid, code, resolvedLabel, currentUser, mandateId)
- _createNotification(
+ createNotification(
uid,
NotificationType.SYSTEM,
title="Sprachset wird erzeugt",
diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py
index 7e852b54..8138775f 100644
--- a/modules/routes/routeInvitations.py
+++ b/modules/routes/routeInvitations.py
@@ -21,7 +21,7 @@ from pydantic import BaseModel, Field, model_validator
from modules.auth import limiter, getRequestContext, RequestContext, getCurrentUser
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
-from modules.routes.routeHelpers import _applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory
+from modules.routes.routeHelpers import applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels
from modules.datamodels.datamodelInvitation import Invitation
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.shared.timeUtils import getUtcTimestamp
@@ -302,8 +302,8 @@ def create_invitation(
emailSubject = f"Einladung zu {mandateName}"
invite_desc = f"dem Mandanten «{mandateName}» beizutreten"
- from modules.routes.routeSecurityLocal import _buildAuthEmailHtml
- emailBody = _buildAuthEmailHtml(
+ from modules.routes.routeSecurityLocal import buildAuthEmailHtml
+ emailBody = buildAuthEmailHtml(
greeting=f"Hallo {display_name}",
bodyLines=[
f"Sie wurden eingeladen, {invite_desc}.",
@@ -496,20 +496,22 @@ def list_invitations(
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
- filtered = _applyFiltersAndSort(result, paginationParams)
+ filtered = applyFiltersAndSort(result, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
+ enriched = enrichRowsWithFkLabels(filtered[startIdx:endIdx], Invitation)
return {
- "items": filtered[startIdx:endIdx],
+ "items": enriched,
"pagination": PaginationMetadata(
currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
totalItems=totalItems, totalPages=totalPages,
sort=paginationParams.sort, filters=paginationParams.filters,
).model_dump(),
}
- return result
+ enriched = enrichRowsWithFkLabels(result, Invitation)
+ return {"items": enriched, "pagination": None}
except HTTPException:
raise
@@ -809,13 +811,13 @@ def accept_invitation(
if featureInstanceId:
existingAccess = rootInterface.getFeatureAccess(str(currentUser.id), featureInstanceId)
if existingAccess:
- # Update existing access with additional roles
+ # Update existing access with additional roles. addRoleToFeatureAccess
+ # is already idempotent (returns silently when the role is already
+ # assigned), so any exception here is a real error and must be
+ # surfaced — not swallowed.
featureAccessId = str(existingAccess.id)
for roleId in roleIds:
- try:
- rootInterface.addRoleToFeatureAccess(str(existingAccess.id), roleId)
- except Exception:
- pass # Role might already be assigned
+ rootInterface.addRoleToFeatureAccess(str(existingAccess.id), roleId)
message = "Roles updated for existing feature access"
else:
# Create feature access with instance-level roles
@@ -828,14 +830,13 @@ def accept_invitation(
featureAccessId = str(featureAccess.id)
message = "Successfully joined feature instance"
else:
- # Legacy: mandate-only invitation (no feature instance)
+ # Legacy: mandate-only invitation (no feature instance).
+ # addRoleToUserMandate is already idempotent — any exception here
+ # is a real error (e.g. DB / FK constraint) and must propagate.
existingMembership = rootInterface.getUserMandate(str(currentUser.id), mandateId)
if existingMembership:
for roleId in roleIds:
- try:
- rootInterface.addRoleToUserMandate(str(existingMembership.id), roleId)
- except Exception:
- pass
+ rootInterface.addRoleToUserMandate(str(existingMembership.id), roleId)
message = "Roles updated for existing membership"
else:
rootInterface.createUserMandate(
diff --git a/modules/routes/routeNotifications.py b/modules/routes/routeNotifications.py
index 41d7fe26..c1cacb17 100644
--- a/modules/routes/routeNotifications.py
+++ b/modules/routes/routeNotifications.py
@@ -52,7 +52,7 @@ class UnreadCountResponse(BaseModel):
# Helper Functions
# =============================================================================
-def _createNotification(
+def createNotification(
userId: str,
notificationType: NotificationType,
title: str,
@@ -103,7 +103,7 @@ def create_access_change_notification(
Failures are logged only so RBAC mutations still succeed.
"""
try:
- _createNotification(
+ createNotification(
userId=userId,
notificationType=NotificationType.SYSTEM,
title=title,
@@ -132,7 +132,7 @@ def createInvitationNotification(
msg = f"{inviterName} hat Sie zur Feature-Instanz '{featureInstanceName}' eingeladen."
else:
msg = f"{inviterName} hat Sie zu '{mandateName}' eingeladen."
- return _createNotification(
+ return createNotification(
userId=userId,
notificationType=NotificationType.INVITATION,
title="Neue Einladung",
diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py
index b6227cb0..807d5192 100644
--- a/modules/routes/routeSecurityLocal.py
+++ b/modules/routes/routeSecurityLocal.py
@@ -28,7 +28,7 @@ routeApiMsg = apiRouteContext("routeSecurityLocal")
logger = logging.getLogger(__name__)
-def _buildAuthEmailHtml(
+def buildAuthEmailHtml(
greeting: str,
bodyLines: list,
buttonText: str = None,
@@ -118,7 +118,7 @@ def _buildAuthEmailHtml(