From b6be8f391e363042882923524563448ff030f2e6 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Thu, 23 Apr 2026 23:09:38 +0200 Subject: [PATCH 1/7] fixes --- modules/demoConfigs/_baseDemoConfig.py | 15 +- modules/demoConfigs/investorDemo2026.py | 73 +++- modules/demoConfigs/pwgDemo2026.py | 72 +++- .../features/redmine/serviceRedmineSync.py | 93 +++++ .../features/trustee/routeFeatureTrustee.py | 128 +++++- modules/routes/routeI18n.py | 37 ++ tests/demo/test_demo_bootstrap.py | 9 +- tests/demo/test_demo_uc3_chatbot.py | 14 +- tests/demo/test_pwg_demo_bootstrap.py | 226 +++++++++++ tests/integration/rbac/test_rbac_database.py | 2 +- tests/test_phase123_basic.py | 314 -------------- tests/test_service_redmine_stats.py | 2 + .../serviceAgent/test_workflow_tools_crud.py | 383 ++++++++++++++++++ .../services/test_json_extraction_merging.py | 66 +-- .../workflows/test_automation2_graphUtils.py | 7 +- 15 files changed, 1052 insertions(+), 389 deletions(-) create mode 100644 tests/demo/test_pwg_demo_bootstrap.py delete mode 100644 tests/test_phase123_basic.py create mode 100644 tests/unit/serviceAgent/test_workflow_tools_crud.py diff --git a/modules/demoConfigs/_baseDemoConfig.py b/modules/demoConfigs/_baseDemoConfig.py index 4d9bdd59..d20d4315 100644 --- a/modules/demoConfigs/_baseDemoConfig.py +++ b/modules/demoConfigs/_baseDemoConfig.py @@ -4,11 +4,16 @@ Base class for demo configurations. Each demo config file in this folder extends _BaseDemoConfig and provides idempotent load() and remove() methods for setting up / tearing down a complete demo environment (mandates, users, features, test data, etc.). + +Subclasses MUST also declare ``credentials`` so the SysAdmin who triggers a +demo-load gets the initial username / password pair shown in the UI -- this +avoids the "where do I find the password?" anti-pattern of having to grep the +source code. """ import logging from abc import ABC, abstractmethod -from typing import Dict, Any +from typing import Any, Dict, List logger = logging.getLogger(__name__) @@ -20,6 +25,13 @@ class _BaseDemoConfig(ABC): label: str = "" description: str = "" + # Each entry describes one bootstrapped login that the demo creates. + # Shape: {"role": "Demo-Sachbearbeiter", "username": "pwg.demo", + # "email": "pwg.demo@poweron.swiss", "password": "pwg.demo.2026"} + # Surfaced via GET /api/admin/demo-config and inside the load() summary + # so the AdminDemoConfigPage can display it (no source-code grep needed). + credentials: List[Dict[str, str]] = [] + @abstractmethod def load(self, db) -> Dict[str, Any]: """Create all demo data (idempotent). Returns summary dict.""" @@ -35,4 +47,5 @@ class _BaseDemoConfig(ABC): "code": self.code, "label": self.label, "description": self.description, + "credentials": list(self.credentials or []), } diff --git a/modules/demoConfigs/investorDemo2026.py b/modules/demoConfigs/investorDemo2026.py index 058f9001..81956c6d 100644 --- a/modules/demoConfigs/investorDemo2026.py +++ b/modules/demoConfigs/investorDemo2026.py @@ -64,6 +64,14 @@ class InvestorDemo2026(_BaseDemoConfig): "Two mandates (HappyLife AG + Alpina Treuhand AG), one SysAdmin user, " "trustee with RMA, workspace, graph editor, and neutralization." ) + credentials = [ + { + "role": "SysAdmin Demo", + "username": _USER["username"], + "email": _USER["email"], + "password": _USER["password"], + } + ] # ------------------------------------------------------------------ # load @@ -101,6 +109,10 @@ class InvestorDemo2026(_BaseDemoConfig): logger.error(f"Demo load failed: {e}", exc_info=True) summary["errors"].append(str(e)) + # Surface initial credentials so the SysAdmin doesn't have to grep the + # source code -- consumed by AdminDemoConfigPage to render a copyable + # login box in the result banner. + summary["credentials"] = list(self.credentials) return summary # ------------------------------------------------------------------ @@ -268,10 +280,17 @@ class InvestorDemo2026(_BaseDemoConfig): logger.error(f"Failed to create feature '{instanceLabel}' ({code}) in {mandateLabel}: {e}") def _ensureFeatureAccess(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict): - """Grant the demo user admin access to every feature instance in the mandate.""" + """Grant the demo user admin access on EVERY feature instance of the + mandate. Without an explicit ``FeatureAccess`` + ``{code}-admin`` role + the user does not see any feature tile in the UI -- so this method + ALSO heals a half-broken state by re-copying the per-feature template + roles if they are missing (e.g. when the instance was created via an + older code path that skipped ``copyTemplateRoles``). + """ from modules.datamodels.datamodelFeatures import FeatureInstance from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole from modules.datamodels.datamodelRbac import Role + from modules.interfaces.interfaceFeatures import getFeatureInterface instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or [] @@ -297,16 +316,50 @@ class InvestorDemo2026(_BaseDemoConfig): "featureInstanceId": instId, "roleLabel": adminRoleLabel, }) - if adminRoles: - adminRoleId = adminRoles[0].get("id") - existingRole = db.getRecordset(FeatureAccessRole, recordFilter={ - "featureAccessId": featureAccessId, - "roleId": adminRoleId, + + # Self-heal: if the per-feature admin role does not exist on this + # instance the template roles were never copied -- copy them now. + if not adminRoles: + logger.warning( + "Feature instance %s (%s) is missing role '%s' -- " + "re-copying template roles", instId, featureCode, adminRoleLabel, + ) + try: + fi = getFeatureInterface(db) + fi._copyTemplateRoles(featureCode, mandateId, instId) + summary["created"].append( + f"Repaired template roles for {featureCode} in {mandateLabel}" + ) + except Exception as repairErr: + summary["errors"].append( + f"Could not repair template roles for {featureCode} " + f"in {mandateLabel}: {repairErr}" + ) + adminRoles = db.getRecordset(Role, recordFilter={ + "featureInstanceId": instId, + "roleLabel": adminRoleLabel, }) - if not existingRole: - far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId) - db.recordCreate(FeatureAccessRole, far) - logger.info(f"Assigned {adminRoleLabel} role in {mandateLabel}") + + if not adminRoles: + summary["errors"].append( + f"Admin role '{adminRoleLabel}' not found for feature " + f"instance {featureCode} in {mandateLabel} -- demo user " + f"will not see this feature." + ) + continue + + adminRoleId = adminRoles[0].get("id") + existingRole = db.getRecordset(FeatureAccessRole, recordFilter={ + "featureAccessId": featureAccessId, + "roleId": adminRoleId, + }) + if not existingRole: + far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId) + db.recordCreate(FeatureAccessRole, far) + summary["created"].append( + f"Role '{adminRoleLabel}' assigned to demo user in {mandateLabel}" + ) + logger.info(f"Assigned {adminRoleLabel} role in {mandateLabel}") def _ensureTrusteeRmaConfig(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict): if not mandateId: diff --git a/modules/demoConfigs/pwgDemo2026.py b/modules/demoConfigs/pwgDemo2026.py index e3aeea51..d4661bcf 100644 --- a/modules/demoConfigs/pwgDemo2026.py +++ b/modules/demoConfigs/pwgDemo2026.py @@ -67,6 +67,14 @@ class PwgDemo2026(_BaseDemoConfig): "Graph-Editor mit dem Pilot-Workflow für Jahresmietzinsbestätigungen " "(als File importiert, active=false). Idempotent." ) + credentials = [ + { + "role": "Demo-Sachbearbeiter", + "username": _USER["username"], + "email": _USER["email"], + "password": _USER["password"], + } + ] # ------------------------------------------------------------------ # load @@ -98,6 +106,10 @@ class PwgDemo2026(_BaseDemoConfig): logger.error(f"PWG demo load failed: {e}", exc_info=True) summary["errors"].append(str(e)) + # Surface initial credentials so the SysAdmin doesn't have to grep the + # source code -- consumed by AdminDemoConfigPage to render a copyable + # login box in the result banner. + summary["credentials"] = list(self.credentials) return summary # ------------------------------------------------------------------ @@ -253,9 +265,17 @@ class PwgDemo2026(_BaseDemoConfig): summary["errors"].append(f"Feature '{instanceLabel}' in {mandateLabel}: {e}") def _ensureFeatureAccess(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict): + """Grant the demo user admin access on EVERY feature instance of the + mandate. Without an explicit ``FeatureAccess`` + ``{code}-admin`` role + the user does not see any feature tile in the UI -- so this method + ALSO heals a half-broken state by re-copying the per-feature template + roles if they are missing (e.g. when the instance was created via an + older code path that skipped ``copyTemplateRoles``). + """ from modules.datamodels.datamodelFeatures import FeatureInstance from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole from modules.datamodels.datamodelRbac import Role + from modules.interfaces.interfaceFeatures import getFeatureInterface instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or [] @@ -280,15 +300,51 @@ class PwgDemo2026(_BaseDemoConfig): "featureInstanceId": instId, "roleLabel": adminRoleLabel, }) - if adminRoles: - adminRoleId = adminRoles[0].get("id") - existingRole = db.getRecordset(FeatureAccessRole, recordFilter={ - "featureAccessId": featureAccessId, - "roleId": adminRoleId, + + # Self-heal: if the per-feature admin role does not exist on this + # instance the template roles were never copied -- copy them now. + if not adminRoles: + logger.warning( + "Feature instance %s (%s) is missing role '%s' -- " + "re-copying template roles", instId, featureCode, adminRoleLabel, + ) + try: + fi = getFeatureInterface(db) + fi._copyTemplateRoles(featureCode, mandateId, instId) + summary["created"].append( + f"Repaired template roles for {featureCode} in {mandateLabel}" + ) + except Exception as repairErr: + summary["errors"].append( + f"Could not repair template roles for {featureCode} " + f"in {mandateLabel}: {repairErr}" + ) + adminRoles = db.getRecordset(Role, recordFilter={ + "featureInstanceId": instId, + "roleLabel": adminRoleLabel, }) - if not existingRole: - far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId) - db.recordCreate(FeatureAccessRole, far) + + if not adminRoles: + # Hard fail surfaced to UI -- without the admin role the user + # would silently not see the instance. + summary["errors"].append( + f"Admin role '{adminRoleLabel}' not found for feature " + f"instance {featureCode} in {mandateLabel} -- demo user " + f"will not see this feature." + ) + continue + + adminRoleId = adminRoles[0].get("id") + existingRole = db.getRecordset(FeatureAccessRole, recordFilter={ + "featureAccessId": featureAccessId, + "roleId": adminRoleId, + }) + if not existingRole: + far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId) + db.recordCreate(FeatureAccessRole, far) + summary["created"].append( + f"Role '{adminRoleLabel}' assigned to demo user in {mandateLabel}" + ) def _ensureNeutralizationConfig(self, db, mandateId: Optional[str], userId: Optional[str], summary: Dict): if not mandateId or not userId: diff --git a/modules/features/redmine/serviceRedmineSync.py b/modules/features/redmine/serviceRedmineSync.py index 6d086ac0..2c631630 100644 --- a/modules/features/redmine/serviceRedmineSync.py +++ b/modules/features/redmine/serviceRedmineSync.py @@ -79,6 +79,16 @@ async def runSync( async with _lockFor(featureInstanceId): started = time.monotonic() + + # CRITICAL: ensure the schema cache (especially the per-status + # ``isClosed`` map) is populated BEFORE we iterate issues. Redmine's + # /issues.json endpoint only returns ``{id, name}`` for the status + # object -- the closed/open flag lives in /issue_statuses.json. If + # the cache is empty here, every freshly-synced ticket would land + # with ``isClosed=False`` and the Stats page would be useless. + await _ensureSchemaWarm(currentUser, mandateId, featureInstanceId) + cfg = iface.getConfig(featureInstanceId) # re-read to get warm cache + full = force or cfg.lastSyncAt is None updated_from_iso: Optional[str] = None if not full and cfg.lastSyncAt is not None: @@ -107,6 +117,15 @@ async def runSync( tickets_upserted += _upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch) relations_upserted += _replaceRelations(iface, featureInstanceId, issue, now_epoch) + # Self-healing pass: re-apply ``isClosed`` to every mirrored ticket + # using the now-warm schema cache. Fixes pre-existing rows that were + # synced before the cache was populated (cheap; mirror-local only). + flags_fixed = _rebuildIsClosedFromSchema(iface, featureInstanceId, now_epoch) + if flags_fixed: + logger.info( + f"runSync({featureInstanceId}): corrected isClosed on {flags_fixed} mirror rows" + ) + duration_ms = int((time.monotonic() - started) * 1000) iface.recordSyncSuccess( featureInstanceId, @@ -240,6 +259,80 @@ def _replaceRelations( return inserted +# --------------------------------------------------------------------------- +# Schema cache warm-up + post-sync isClosed correction +# --------------------------------------------------------------------------- + +async def _ensureSchemaWarm( + currentUser: User, + mandateId: Optional[str], + featureInstanceId: str, +) -> None: + """Make sure ``cfg.schemaCache['statuses']`` exists with the per-status + ``isClosed`` flag. Called at the start of every sync because Redmine's + ``/issues.json`` doesn't expose ``is_closed`` on the inline status + object, so we MUST resolve it via the schema. + """ + iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId) + cfg = iface.getConfig(featureInstanceId) + if cfg is None: + return + statuses = (cfg.schemaCache or {}).get("statuses") or [] + if statuses: + return + # Lazy import to avoid a circular dependency at module load. + from modules.features.redmine.serviceRedmine import getProjectMeta + try: + await getProjectMeta(currentUser, mandateId, featureInstanceId, forceRefresh=True) + except Exception as e: + logger.warning( + f"_ensureSchemaWarm({featureInstanceId}): could not warm schema cache: {e} " + "-- isClosed flags may be inaccurate until next successful schema fetch." + ) + + +def _rebuildIsClosedFromSchema(iface, featureInstanceId: str, nowEpoch: float) -> int: + """Walk the mirror once and fix ``isClosed`` (and ``closedOnTs``) for any + ticket whose stored value disagrees with the current schema cache. + + Returns the number of rows that were actually corrected. A no-op when + the schema cache has no statuses (logged once, then the caller can + decide whether to retry). + """ + cfg = iface.getConfig(featureInstanceId) + if cfg is None: + return 0 + statuses = (cfg.schemaCache or {}).get("statuses") or [] + if not statuses: + return 0 + closed_ids = {int(s.get("id")) for s in statuses if s.get("id") is not None and s.get("isClosed")} + rows = iface.listMirroredTickets(featureInstanceId) + corrections = 0 + for row in rows: + sid = row.get("statusId") + if sid is None: + continue + should_be_closed = int(sid) in closed_ids + if bool(row.get("isClosed")) == should_be_closed: + continue + # Only the closed/open flag (and the derived closedOnTs) are + # touched here -- everything else came from Redmine and stays. + update = { + "isClosed": bool(should_be_closed), + "closedOnTs": float(row.get("updatedOnTs")) if (should_be_closed and row.get("updatedOnTs") is not None) else None, + "syncedAt": nowEpoch, + } + try: + iface.upsertMirroredTicket(featureInstanceId, int(row.get("redmineId")), {**row, **update}) + corrections += 1 + except Exception as e: + logger.warning( + f"_rebuildIsClosedFromSchema({featureInstanceId}): could not fix ticket " + f"#{row.get('redmineId')}: {e}" + ) + return corrections + + # --------------------------------------------------------------------------- # Pure helpers # --------------------------------------------------------------------------- diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py index 3a6bfab0..d040c37d 100644 --- a/modules/features/trustee/routeFeatureTrustee.py +++ b/modules/features/trustee/routeFeatureTrustee.py @@ -1562,38 +1562,84 @@ async def refresh_chart_of_accounts( return {"message": f"Chart of accounts refreshed: {len(charts)} entries", "count": len(charts)} -@router.post("/{instanceId}/accounting/sync") -@limiter.limit("5/minute") -async def sync_positions_to_accounting( - request: Request, - instanceId: str = Path(..., description="Feature Instance ID"), - data: Dict[str, Any] = Body(...), - context: RequestContext = Depends(getRequestContext) -) -> Dict[str, Any]: - """Sync positions to the accounting system. Body: { positionIds: [...] }""" - mandateId = _validateInstanceAccess(instanceId, context) - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) +TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE = "trusteeAccountingPush" + + +async def _trusteeAccountingPushJobHandler(job: Dict[str, Any], progressCb) -> Dict[str, Any]: + """BackgroundJob handler: pushes a batch of positions to the external + accounting system. Runs in the worker without blocking the original HTTP + request, so the user can continue navigating while the sync runs. + + Reads inputs from `job["payload"]` (`positionIds`) and reports incremental + progress via `progressCb(percent, message)`. The job result has the same + shape that the legacy synchronous endpoint used to return. + """ + from modules.security.rootAccess import getRootUser from .accounting.accountingBridge import AccountingBridge + + instanceId = job["featureInstanceId"] + mandateId = job["mandateId"] + payload = job.get("payload") or {} + positionIds: List[str] = list(payload.get("positionIds") or []) + if not positionIds: + return {"total": 0, "success": 0, "skipped": 0, "errors": 0, "results": []} + + rootUser = getRootUser() + interface = getInterface(rootUser, mandateId=mandateId, featureInstanceId=instanceId) bridge = AccountingBridge(interface) - positionIds = data.get("positionIds", []) - if not positionIds: - raise HTTPException(status_code=400, detail=routeApiMsg("positionIds required")) + results = [] + total = len(positionIds) + progressCb(2, f"Sync wird vorbereitet ({total} Position(en))...") + + # Resolve connector + plain config once to avoid decryption rate-limits + # (mirrors the optimisation in pushBatchToAccounting). We push positions + # one-by-one inside the job so we can emit incremental progress and so + # one bad row never aborts the rest. + from .accounting.accountingBridge import SyncResult + try: + connector, plainConfig, configRecord = await bridge._resolveConnectorAndConfig(instanceId) + except Exception as resolveErr: + logger.exception("Accounting push: failed to resolve connector/config") + progressCb(100, "Verbindungsaufbau fehlgeschlagen.") + raise resolveErr + + if not connector or not plainConfig: + results = [SyncResult(success=False, errorMessage="No active accounting configuration found") for _ in positionIds] + progressCb(100, "Keine aktive Buchhaltungs-Konfiguration gefunden.") + return { + "total": len(results), + "success": 0, + "skipped": 0, + "errors": len(results), + "results": [r.model_dump() for r in results], + } + + for index, positionId in enumerate(positionIds, start=1): + result = await bridge.pushPositionToAccounting( + instanceId, + positionId, + _resolvedConnector=connector, + _resolvedPlainConfig=plainConfig, + _resolvedConfigRecord=configRecord, + ) + results.append(result) + # Reserve 5..95% for the push loop, keep the tail for summary. + pct = 5 + int(90 * index / total) + progressCb(pct, f"Position {index}/{total} verarbeitet") - results = await bridge.pushBatchToAccounting(instanceId, positionIds) skipped = [r for r in results if not r.success and r.errorMessage and "already synced" in r.errorMessage] failed = [r for r in results if not r.success and r not in skipped] if skipped: - logger.info( - "Accounting sync: %s position(s) already synced, skipped", - len(skipped), - ) + logger.info("Accounting sync: %s position(s) already synced, skipped", len(skipped)) if failed: logger.warning( "Accounting sync had %s failure(s): %s", len(failed), "; ".join(r.errorMessage or "unknown" for r in failed[:3]), ) + + progressCb(100, "Sync abgeschlossen.") return { "total": len(results), "success": sum(1 for r in results if r.success), @@ -1603,6 +1649,50 @@ async def sync_positions_to_accounting( } +try: + from modules.serviceCenter.services.serviceBackgroundJobs import registerJobHandler as _registerPushJobHandler + _registerPushJobHandler(TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE, _trusteeAccountingPushJobHandler) +except Exception as _pushRegErr: + logger.warning("Failed to register trusteeAccountingPush job handler: %s", _pushRegErr) + + +@router.post("/{instanceId}/accounting/sync", status_code=status.HTTP_202_ACCEPTED) +@limiter.limit("5/minute") +async def sync_positions_to_accounting( + request: Request, + instanceId: str = Path(..., description="Feature Instance ID"), + data: Dict[str, Any] = Body(...), + context: RequestContext = Depends(getRequestContext) +) -> Dict[str, Any]: + """Submit a background job that pushes positions to the accounting system. + + Body: ``{ positionIds: [...] }`` + + Returns ``{ jobId, status: "pending" }`` immediately so the user is not + blocked while the (potentially long) external accounting calls run. + Clients poll ``GET /api/jobs/{jobId}`` until status is ``SUCCESS`` / + ``ERROR`` and then read the same ``{ total, success, skipped, errors, + results }`` payload from ``job.result`` that the legacy synchronous + endpoint returned. + """ + from modules.serviceCenter.services.serviceBackgroundJobs import startJob + + mandateId = _validateInstanceAccess(instanceId, context) + + positionIds = data.get("positionIds", []) + if not positionIds: + raise HTTPException(status_code=400, detail=routeApiMsg("positionIds required")) + + jobId = await startJob( + TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE, + {"positionIds": list(positionIds)}, + mandateId=mandateId, + featureInstanceId=instanceId, + triggeredBy=context.user.id if context.user else None, + ) + return {"jobId": jobId, "status": "pending"} + + @router.post("/{instanceId}/accounting/sync/{positionId}") @limiter.limit("10/minute") async def sync_single_position_to_accounting( diff --git a/modules/routes/routeI18n.py b/modules/routes/routeI18n.py index 91fbd9fe..cadf128e 100644 --- a/modules/routes/routeI18n.py +++ b/modules/routes/routeI18n.py @@ -98,6 +98,11 @@ _ISO_LABELS: Dict[str, str] = { "ur": "اردو", "uz": "Oʻzbek", "yo": "Yorùbá", "zu": "isiZulu", } +# Priority order for the language picker: most relevant first, rest sorted by label. +# Single source of truth -- frontend fetches via GET /api/i18n/iso-choices and must +# never duplicate this list. +_ISO_PRIORITY_CODES: List[str] = ["de", "gsw", "en", "fr", "it"] + # --------------------------------------------------------------------------- # DB helpers @@ -554,6 +559,38 @@ async def list_language_codes(): return sorted(out, key=lambda x: (not x.get("isDefault"), x["code"])) +@router.get("/iso-choices") +async def list_iso_choices(): + """Return the catalog of supported ISO 639-1/-3 language codes plus their + native labels. Single source of truth for any UI that lets the user pick a + language code (e.g. SysAdmin "add language set" dropdown). The frontend + must NOT keep its own copy of this list. + + Response: + { + "priorityCodes": ["de", "gsw", "en", "fr", "it"], + "choices": [{"value": "de", "label": "de — Deutsch"}, ...] + } + """ + choices = [ + {"value": code, "label": f"{code} — {label}"} + for code, label in _ISO_LABELS.items() + ] + + def _sortKey(item): + try: + prio = _ISO_PRIORITY_CODES.index(item["value"]) + return (0, prio) + except ValueError: + return (1, item["label"].lower()) + + choices.sort(key=_sortKey) + return { + "priorityCodes": list(_ISO_PRIORITY_CODES), + "choices": choices, + } + + @router.get("/sets/{code}") async def get_language_set(code: str): db = _publicMgmtDb() diff --git a/tests/demo/test_demo_bootstrap.py b/tests/demo/test_demo_bootstrap.py index 1d725442..09076e57 100644 --- a/tests/demo/test_demo_bootstrap.py +++ b/tests/demo/test_demo_bootstrap.py @@ -48,7 +48,7 @@ class TestDemoBootstrap: memberships = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mid}) assert len(memberships) >= 1, f"User not member of mandate {mandate.get('label')}" - @pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "chatbot", "neutralization"]) + @pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "neutralization"]) def test_happylifeFeaturesExist(self, db, mandateHappylife, featureCode): mid = mandateHappylife.get("id") instances = _getFeatureInstances(db, mid, featureCode) @@ -66,6 +66,13 @@ class TestDemoBootstrap: instances = _getFeatureInstances(db, mid, "chatbot") assert len(instances) == 0, "Alpina Treuhand should not have chatbot" + def test_happylifeNoChatbot(self, db, mandateHappylife): + """HappyLife also should NOT have a chatbot instance — chatbot was + removed from the InvestorDemo on 2026-04-20 (see changelog).""" + mid = mandateHappylife.get("id") + instances = _getFeatureInstances(db, mid, "chatbot") + assert len(instances) == 0, "HappyLife should no longer have chatbot (removed 2026-04-20)" + class TestDemoBootstrapRma: diff --git a/tests/demo/test_demo_uc3_chatbot.py b/tests/demo/test_demo_uc3_chatbot.py index 89c8d7ba..0248bd5d 100644 --- a/tests/demo/test_demo_uc3_chatbot.py +++ b/tests/demo/test_demo_uc3_chatbot.py @@ -1,9 +1,11 @@ """ T-UC3: Knowledge Chatbot. -Verifies that the chatbot feature instance exists in HappyLife AG -and that knowledge-base documents are available for upload. -Note: The actual RAG demo runs via workspace, not the chatbot's own index. +The chatbot feature instance was removed from the InvestorDemo on +2026-04-20 (see changelog) — neither HappyLife nor Alpina bootstrap a +chatbot today; the actual RAG demo runs via workspace. We still verify +the knowledge-base demo files are present and that the bootstrap does +NOT (re)create chatbot instances in either mandate. """ import pytest @@ -13,11 +15,11 @@ from tests.demo.conftest import _getFeatureInstances class TestChatbotSetup: - def test_chatbotInstanceHappylife(self, db, mandateHappylife): - """HappyLife must have a chatbot instance.""" + def test_chatbotNotInHappylife(self, db, mandateHappylife): + """HappyLife should NOT have a chatbot instance (removed 2026-04-20).""" mid = mandateHappylife.get("id") instances = _getFeatureInstances(db, mid, "chatbot") - assert len(instances) >= 1, "No chatbot instance in HappyLife" + assert len(instances) == 0, "HappyLife should no longer bootstrap a chatbot instance" def test_chatbotNotInAlpina(self, db, mandateAlpina): """Alpina should NOT have a chatbot instance.""" diff --git a/tests/demo/test_pwg_demo_bootstrap.py b/tests/demo/test_pwg_demo_bootstrap.py new file mode 100644 index 00000000..0613cafa --- /dev/null +++ b/tests/demo/test_pwg_demo_bootstrap.py @@ -0,0 +1,226 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +"""T6 — PWG-Pilot demo bootstrap & idempotency tests. + +Covers AC 11 + AC 12 of the PWG-Pilot plan: + - ``PwgDemo2026.load()`` is idempotent (twice → no errors). + - All expected objects exist after load (mandate, demo user, + 4 feature instances, trustee seed data, imported pilot workflow with + ``active=False``). + - ``remove()`` cleans up cleanly and a subsequent ``load()`` rebuilds + the demo without error (idempotency over the full lifecycle). + +Mirrors the structure of ``tests/demo/test_demo_bootstrap.py`` and reuses +its session-scoped ``db`` fixture from ``tests/demo/conftest.py``. + +Marked ``expensive + live`` because they hit the real Postgres databases +(``poweron_app``, ``poweron_trustee``, ``poweron_graphicaleditor``); run +them explicitly with:: + + pytest -m "expensive or live" tests/demo/test_pwg_demo_bootstrap.py +""" + +import pytest + +from modules.datamodels.datamodelFeatures import FeatureInstance +from modules.datamodels.datamodelMembership import UserMandate +from modules.datamodels.datamodelUam import Mandate, UserInDB + +from tests.demo.conftest import _getFeatureInstances + + +pytestmark = [pytest.mark.expensive, pytest.mark.live] + + +# --------------------------------------------------------------------------- +# Fixtures (function-scoped so they always reflect current DB state) +# --------------------------------------------------------------------------- + +@pytest.fixture(scope="session") +def pwgDemoConfig(): + """Auto-discovered ``PwgDemo2026`` instance.""" + from modules.demoConfigs import _getDemoConfigByCode + cfg = _getDemoConfigByCode("pwg-demo-2026") + assert cfg is not None, ( + "Demo config 'pwg-demo-2026' not found — check modules/demoConfigs/pwgDemo2026.py" + ) + return cfg + + +@pytest.fixture +def mandatePwg(db): + records = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"}) + assert records, "Mandate 'stiftung-pwg' not found — run pwgDemoConfig.load() first" + return records[0] + + +@pytest.fixture +def pwgUser(db): + records = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"}) + assert records, "User 'pwg.demo' not found — run pwgDemoConfig.load() first" + return records[0] + + +# --------------------------------------------------------------------------- +# Bootstrap idempotency +# --------------------------------------------------------------------------- + +class TestPwgDemoBootstrap: + + def test_loadIsIdempotent(self, db, pwgDemoConfig): + """Loading the PWG demo twice in a row must not raise errors.""" + s1 = pwgDemoConfig.load(db) + assert len(s1.get("errors", [])) == 0, f"First load errors: {s1['errors']}" + s2 = pwgDemoConfig.load(db) + assert len(s2.get("errors", [])) == 0, f"Second load errors: {s2['errors']}" + + def test_credentialsAreSurfacedFromLoadSummary(self, db, pwgDemoConfig): + s = pwgDemoConfig.load(db) + creds = s.get("credentials") or [] + assert any(c.get("username") == "pwg.demo" for c in creds), ( + "PWG demo must surface 'pwg.demo' credentials so the SysAdmin " + "doesn't have to grep source code for the password." + ) + + def test_mandateStiftungPwgExists(self, db): + records = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"}) + assert len(records) == 1 + assert records[0].get("label") == "Stiftung PWG" + assert records[0].get("enabled") is True + + def test_pwgDemoUserExists(self, db): + records = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"}) + assert len(records) == 1 + user = records[0] + assert user.get("email") == "pwg.demo@poweron.swiss" + assert user.get("isSysAdmin") is True + assert user.get("language") == "de" + + def test_pwgUserMembership(self, db, pwgUser, mandatePwg): + memberships = db.getRecordset(UserMandate, recordFilter={ + "userId": pwgUser.get("id"), + "mandateId": mandatePwg.get("id"), + }) + assert len(memberships) >= 1, "PWG demo user not a member of Stiftung PWG" + + @pytest.mark.parametrize( + "featureCode", + ["workspace", "trustee", "graphicalEditor", "neutralization"], + ) + def test_pwgFeaturesExist(self, db, mandatePwg, featureCode): + instances = _getFeatureInstances(db, mandatePwg.get("id"), featureCode) + assert len(instances) >= 1, f"Feature '{featureCode}' missing in Stiftung PWG" + + def test_pwgFourFeatureInstances(self, db, mandatePwg): + instances = db.getRecordset(FeatureInstance, recordFilter={ + "mandateId": mandatePwg.get("id"), + }) or [] + codes = sorted({i.get("featureCode") for i in instances}) + assert codes == ["graphicalEditor", "neutralization", "trustee", "workspace"], ( + f"Expected exactly 4 feature instances, got {codes}" + ) + + +# --------------------------------------------------------------------------- +# Trustee seed data — 5 fictitious tenants × 12 monthly bookings each +# --------------------------------------------------------------------------- + +class TestPwgTrusteeSeed: + + def test_trusteeRentAccountExists(self, db, mandatePwg): + from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataAccount + instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee") + assert instances, "No trustee instance for PWG" + instId = instances[0].get("id") + from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb + trusteeDb = _openTrusteeDb() + accounts = trusteeDb.getRecordset(TrusteeDataAccount, recordFilter={ + "featureInstanceId": instId, + "accountNumber": "6000", + }) or [] + assert len(accounts) == 1, f"Expected exactly 1 rent account 6000, got {len(accounts)}" + assert accounts[0].get("isActive") is True + + def test_trusteeFiveTenants(self, db, mandatePwg): + from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataContact + instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee") + instId = instances[0].get("id") + from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb + trusteeDb = _openTrusteeDb() + contacts = trusteeDb.getRecordset(TrusteeDataContact, recordFilter={ + "featureInstanceId": instId, + }) or [] + # Some installations may already have other trustee contacts, but the + # 5 PWG seed tenants must be present. + names = {c.get("name") for c in contacts} + for expected in ( + "Anna Müller", "Beat Schneider", "Carla Weber", + "Daniel Frey", "Eva Lang", + ): + assert expected in names, f"PWG seed tenant '{expected}' missing" + + def test_trusteeMonthlyBookingsForTenant(self, db, mandatePwg): + """Every tenant gets 12 monthly journal entries.""" + from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataJournalEntry + instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee") + instId = instances[0].get("id") + from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb + trusteeDb = _openTrusteeDb() + entries = trusteeDb.getRecordset(TrusteeDataJournalEntry, recordFilter={ + "featureInstanceId": instId, + }) or [] + # 5 tenants × 12 months = 60; >= so reload doesn't false-fail. + pwgEntries = [e for e in entries if (e.get("reference") or "").startswith("PWG-")] + assert len(pwgEntries) >= 60, ( + f"Expected >=60 PWG journal entries (5 tenants × 12 months), got {len(pwgEntries)}" + ) + + +# --------------------------------------------------------------------------- +# Pilot workflow — imported envelope, must be active=False +# --------------------------------------------------------------------------- + +class TestPwgPilotWorkflow: + + def test_pilotWorkflowImported(self, db, mandatePwg): + from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow + from modules.demoConfigs.pwgDemo2026 import _openGraphicalEditorDb + instances = _getFeatureInstances(db, mandatePwg.get("id"), "graphicalEditor") + assert instances, "No graphicalEditor instance for PWG" + instId = instances[0].get("id") + geDb = _openGraphicalEditorDb() + wfs = geDb.getRecordset(AutoWorkflow, recordFilter={ + "mandateId": mandatePwg.get("id"), + "featureInstanceId": instId, + "label": "PWG Pilot: Jahresmietzinsbestätigung", + }) or [] + assert len(wfs) == 1, f"Expected exactly 1 PWG pilot workflow, got {len(wfs)}" + wf = wfs[0] + # AC 10: imports must be inactive by default + assert wf.get("active") is False, "PWG pilot workflow must be imported with active=false" + graph = wf.get("graph") or {} + assert (graph.get("nodes") or []), "PWG pilot workflow has no nodes" + + +# --------------------------------------------------------------------------- +# Lifecycle: remove + reload (mirrors investor demo TestDemoRemoveAndReload) +# --------------------------------------------------------------------------- + +class TestPwgRemoveAndReload: + + def test_removeAndReload(self, db, pwgDemoConfig): + """Remove the PWG demo, verify it is gone, then reload it.""" + rs = pwgDemoConfig.remove(db) + assert len(rs.get("errors", [])) == 0, f"Remove errors: {rs['errors']}" + + mandates = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"}) + assert len(mandates) == 0, "Stiftung PWG mandate should be gone after remove" + + users = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"}) + assert len(users) == 0, "pwg.demo user should be gone after remove" + + ls = pwgDemoConfig.load(db) + assert len(ls.get("errors", [])) == 0, f"Reload errors: {ls['errors']}" + + mandates = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"}) + assert len(mandates) == 1, "Stiftung PWG must exist after reload" diff --git a/tests/integration/rbac/test_rbac_database.py b/tests/integration/rbac/test_rbac_database.py index 72eb1b26..208ed6dd 100644 --- a/tests/integration/rbac/test_rbac_database.py +++ b/tests/integration/rbac/test_rbac_database.py @@ -166,7 +166,7 @@ class TestRbacDatabaseFiltering: try: mandate = Mandate( id=testMandateId, - name="RBAC test mandate", + name="rbac-test-mandate-uc", label="RBAC test", ) mandatePayload = mandate.model_dump() diff --git a/tests/test_phase123_basic.py b/tests/test_phase123_basic.py deleted file mode 100644 index 59a3234d..00000000 --- a/tests/test_phase123_basic.py +++ /dev/null @@ -1,314 +0,0 @@ -""" -Basic verification tests for Phase 1-3 implementation. -Run with: python tests/test_phase123_basic.py -Requires: gateway running on localhost:8000 -""" -import sys -import os -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -print("=" * 60) -print("PHASE 1-3 BASIC VERIFICATION") -print("=" * 60) - -errors = [] -passes = [] - -def _check(label, condition, detail=""): - if condition: - passes.append(label) - print(f" [PASS] {label}") - else: - errors.append(f"{label}: {detail}") - print(f" [FAIL] {label} — {detail}") - -# ── Phase 1: Data Models ────────────────────────────────────────────────────── -print("\n--- Phase 1: Data Models ---") - -try: - from modules.datamodels.datamodelUam import Mandate - m = Mandate(name="test", label="test") - _check("Mandate has isSystem field", hasattr(m, "isSystem")) - _check("Mandate isSystem default False", m.isSystem is False) - _check("Mandate no mandateType field", not hasattr(m, "mandateType")) -except Exception as e: - errors.append(f"Phase 1 DataModel: {e}") - print(f" [FAIL] Phase 1 DataModel import: {e}") - -try: - from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, BUILTIN_PLANS, SubscriptionPlan - _check("PENDING status exists", hasattr(SubscriptionStatusEnum, "PENDING")) - _check("BUILTIN_PLANS has TRIAL_14D", "TRIAL_14D" in BUILTIN_PLANS) - trial = BUILTIN_PLANS["TRIAL_14D"] - _check("TRIAL_14D has maxDataVolumeMB", hasattr(trial, "maxDataVolumeMB")) - _check("TRIAL_14D maxDataVolumeMB=1024", trial.maxDataVolumeMB == 1024) - _check("TRIAL_14D has includedModules", hasattr(trial, "includedModules")) - _check("TRIAL_14D includedModules=2", trial.includedModules == 2) - _check("TRIAL_14D trialDays=14", trial.trialDays == 14) -except Exception as e: - errors.append(f"Phase 1 Subscription: {e}") - print(f" [FAIL] Phase 1 Subscription: {e}") - -# ── Phase 2: Scope Fields ───────────────────────────────────────────────────── -print("\n--- Phase 2: Scope Fields on Models ---") - -try: - from modules.datamodels.datamodelFiles import FileItem - fi = FileItem(fileName="test.txt", mimeType="text/plain", fileHash="abc", fileSize=100) - _check("FileItem has scope field", hasattr(fi, "scope")) - _check("FileItem scope default=personal", fi.scope == "personal") - _check("FileItem has neutralize field", hasattr(fi, "neutralize")) - _check("FileItem neutralize default=False", fi.neutralize == False) -except Exception as e: - errors.append(f"Phase 2 FileItem: {e}") - print(f" [FAIL] Phase 2 FileItem: {e}") - -try: - from modules.datamodels.datamodelDataSource import DataSource - ds = DataSource(connectionId="c1", sourceType="sharepoint", path="/test", label="Test") - _check("DataSource has scope field", hasattr(ds, "scope")) - _check("DataSource scope default=personal", ds.scope == "personal") - _check("DataSource has neutralize field", hasattr(ds, "neutralize")) - _check("DataSource neutralize default=False", ds.neutralize == False) -except Exception as e: - errors.append(f"Phase 2 DataSource: {e}") - print(f" [FAIL] Phase 2 DataSource: {e}") - -try: - from modules.datamodels.datamodelKnowledge import FileContentIndex - fci = FileContentIndex(userId="u1", fileName="test.txt", mimeType="text/plain") - _check("FileContentIndex has scope field", hasattr(fci, "scope")) - _check("FileContentIndex scope default=personal", fci.scope == "personal") - _check("FileContentIndex has neutralizationStatus", hasattr(fci, "neutralizationStatus")) - _check("FileContentIndex neutralizationStatus default=None", fci.neutralizationStatus is None) -except Exception as e: - errors.append(f"Phase 2 FileContentIndex: {e}") - print(f" [FAIL] Phase 2 FileContentIndex: {e}") - -# ── Phase 2: RAG Scope Filtering ────────────────────────────────────────────── -print("\n--- Phase 2: RAG Scope Logic ---") - -try: - from modules.interfaces.interfaceDbKnowledge import KnowledgeObjects - _check("KnowledgeObjects has _getScopedFileIds", hasattr(KnowledgeObjects, "_getScopedFileIds")) - _check("KnowledgeObjects has _buildScopeFilter", hasattr(KnowledgeObjects, "_buildScopeFilter")) - - import inspect - sig = inspect.signature(KnowledgeObjects._getScopedFileIds) - params = list(sig.parameters.keys()) - _check("_getScopedFileIds has isSysAdmin param", "isSysAdmin" in params) - - sig2 = inspect.signature(KnowledgeObjects.semanticSearch) - params2 = list(sig2.parameters.keys()) - _check("semanticSearch has scope param", "scope" in params2) - _check("semanticSearch has isSysAdmin param", "isSysAdmin" in params2) -except Exception as e: - errors.append(f"Phase 2 RAG: {e}") - print(f" [FAIL] Phase 2 RAG: {e}") - -# ── Phase 3: Neutralization Methods ─────────────────────────────────────────── -print("\n--- Phase 3: Neutralization Integration ---") - -try: - from modules.workflows.workflowManager import WorkflowManager - _check("WorkflowManager has _neutralizePromptIfRequired", hasattr(WorkflowManager, "_neutralizePromptIfRequired")) - _check("WorkflowManager has _rehydrateResponseIfNeeded", hasattr(WorkflowManager, "_rehydrateResponseIfNeeded")) - - import inspect - sig_n = inspect.signature(WorkflowManager._neutralizePromptIfRequired) - _check("_neutralizePromptIfRequired is async", inspect.iscoroutinefunction(WorkflowManager._neutralizePromptIfRequired)) - - sig_r = inspect.signature(WorkflowManager._rehydrateResponseIfNeeded) - _check("_rehydrateResponseIfNeeded is async", inspect.iscoroutinefunction(WorkflowManager._rehydrateResponseIfNeeded)) -except Exception as e: - errors.append(f"Phase 3 WorkflowManager: {e}") - print(f" [FAIL] Phase 3 WorkflowManager: {e}") - -# ── Phase 3: Fail-Safe Logic ────────────────────────────────────────────────── -print("\n--- Phase 3: Fail-Safe Logic ---") - -try: - import ast - with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "workflows", "methods", "methodContext", "actions", "neutralizeData.py"), "r") as f: - source = f.read() - _check("neutralizeData.py has 'SKIPPING' fail-safe", "SKIPPING" in source) - _check("neutralizeData.py has 'do NOT pass original' comment", "do NOT pass original" in source.lower() or "not passing original" in source.lower()) - _check("neutralizeData.py uses continue for skip", "continue" in source) -except Exception as e: - errors.append(f"Phase 3 Fail-Safe: {e}") - print(f" [FAIL] Phase 3 Fail-Safe: {e}") - -# ── Phase 2: Route Endpoints ────────────────────────────────────────────────── -print("\n--- Phase 2: API Endpoints ---") - -try: - import ast - with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "routes", "routeDataFiles.py"), "r") as f: - source = f.read() - _check("routeDataFiles has PATCH scope endpoint", "updateFileScope" in source) - _check("routeDataFiles has PATCH neutralize endpoint", "updateFileNeutralize" in source) - _check("routeDataFiles checks global sysAdmin", "isSysAdmin" in source) -except Exception as e: - errors.append(f"Phase 2 Routes: {e}") - print(f" [FAIL] Phase 2 Routes: {e}") - -# ── Phase 1: Store Endpoints ────────────────────────────────────────────────── -print("\n--- Phase 1: Store Endpoints ---") - -try: - with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "routes", "routeStore.py"), "r") as f: - source = f.read() - _check("routeStore has listUserMandates", "listUserMandates" in source or "list_user_mandates" in source) - _check("routeStore has getSubscriptionInfo", "getSubscriptionInfo" in source or "get_subscription_info" in source) - _check("routeStore has orphan control", "orphan" in source.lower() or "last" in source.lower()) -except Exception as e: - errors.append(f"Phase 1 Store: {e}") - print(f" [FAIL] Phase 1 Store: {e}") - -# ── Phase 1: Provisioning ───────────────────────────────────────────────────── -print("\n--- Phase 1: Provisioning ---") - -try: - with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "interfaces", "interfaceDbApp.py"), "r") as f: - source = f.read() - _check("interfaceDbApp has _provisionMandateForUser", "_provisionMandateForUser" in source) - _check("interfaceDbApp has _activatePendingSubscriptions", "_activatePendingSubscriptions" in source) - _check("interfaceDbApp has deleteMandate cascade", "deleteMandate" in source and "cascade" in source.lower()) -except Exception as e: - errors.append(f"Phase 1 Provisioning: {e}") - print(f" [FAIL] Phase 1 Provisioning: {e}") - -# ── Phase 1: Registration Routes ────────────────────────────────────────────── -print("\n--- Phase 1: Registration ---") - -try: - with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "routes", "routeSecurityLocal.py"), "r") as f: - source = f.read() - _check("routeSecurityLocal has registrationType", "registrationType" in source) - _check("routeSecurityLocal has companyName", "companyName" in source) - _check("routeSecurityLocal has onboarding endpoint", "onboarding" in source) -except Exception as e: - errors.append(f"Phase 1 Registration: {e}") - print(f" [FAIL] Phase 1 Registration: {e}") - -# ── Fix 1: OnboardingWizard Integration ──────────────────────────────────────── -print("\n--- Fix 1: OnboardingWizard Integration ---") - -try: - loginPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "..", "frontend_nyla", "src", "pages", "Login.tsx") - with open(loginPath, "r", encoding="utf-8") as f: - source = f.read() - _check("Login.tsx imports OnboardingWizard", "OnboardingWizard" in source) - _check("Login.tsx has showOnboardingWizard state", "showOnboardingWizard" in source) - _check("Login.tsx checks isNewUser", "isNewUser" in source) -except Exception as e: - errors.append(f"Fix 1: {e}") - print(f" [FAIL] Fix 1: {e}") - -# ── Fix 2: CommCoach UDB Integration ────────────────────────────────────────── -print("\n--- Fix 2: CommCoach UDB Integration ---") - -try: - dossierPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "..", "frontend_nyla", "src", "pages", "views", "commcoach", "CommcoachDossierView.tsx") - with open(dossierPath, "r", encoding="utf-8") as f: - source = f.read() - _check("CommCoach imports UnifiedDataBar", "UnifiedDataBar" in source) - _check("CommCoach imports FilesTab", "FilesTab" in source) - _check("CommCoach no longer imports getDocumentsApi", "getDocumentsApi" not in source) - _check("CommCoach has UDB sidebar", "udbSidebar" in source or "UnifiedDataBar" in source) -except Exception as e: - errors.append(f"Fix 2: {e}") - print(f" [FAIL] Fix 2: {e}") - -# ── Fix 3: Neutralization Backend Endpoints ─────────────────────────────────── -print("\n--- Fix 3: Neutralization Backend Endpoints ---") - -try: - routePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "features", "neutralization", "routeFeatureNeutralizer.py") - with open(routePath, "r") as f: - source = f.read() - _check("Neutralization has deleteAttribute endpoint", "deleteAttribute" in source or "delete_attribute" in source) - _check("Neutralization has retrigger endpoint", "retrigger" in source) - _check("Neutralization has single attribute delete", "single" in source or "attributeId" in source) -except Exception as e: - errors.append(f"Fix 3: {e}") - print(f" [FAIL] Fix 3: {e}") - -# ── Fix 4: Central AI Neutralization ────────────────────────────────────────── -print("\n--- Fix 4: Central AI Neutralization ---") - -try: - aiPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "serviceCenter", "services", "serviceAi", "mainServiceAi.py") - with open(aiPath, "r") as f: - source = f.read() - _check("AiService has _shouldNeutralize", "_shouldNeutralize" in source) - _check("AiService has _neutralizeRequest", "_neutralizeRequest" in source) - _check("AiService has _rehydrateResponse", "_rehydrateResponse" in source) - _check("callAi uses neutralization", "_shouldNeutralize" in source and "_neutralizeRequest" in source) -except Exception as e: - errors.append(f"Fix 4: {e}") - print(f" [FAIL] Fix 4: {e}") - -# ── Fix 5: Voice Settings User Level ────────────────────────────────────────── -print("\n--- Fix 5: Voice Settings User Level ---") - -try: - from modules.datamodels.datamodelUam import UserVoicePreferences - uvp = UserVoicePreferences(userId="u1") - _check("UserVoicePreferences model exists", True) - _check("UserVoicePreferences has sttLanguage", hasattr(uvp, "sttLanguage")) - _check("UserVoicePreferences default sttLanguage=de-DE", uvp.sttLanguage == "de-DE") - _check("UserVoicePreferences has ttsVoice", hasattr(uvp, "ttsVoice")) -except Exception as e: - errors.append(f"Fix 5: {e}") - print(f" [FAIL] Fix 5: {e}") - -try: - voiceUserPath = os.path.join( - os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "routes", "routeVoiceUser.py", - ) - with open(voiceUserPath, "r") as f: - source = f.read() - _check("Voice preferences GET endpoint", '"/preferences"' in source and "getVoicePreferences" in source) - _check("Voice preferences PUT endpoint", "updateVoicePreferences" in source) -except Exception as e: - errors.append(f"Fix 5 Routes: {e}") - print(f" [FAIL] Fix 5 Routes: {e}") - -# ── Fix 6: RAG mandate-wide scope ───────────────────────────────────────────── -print("\n--- Fix 6: RAG mandate-wide scope ---") - -try: - knowledgePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "serviceCenter", "services", "serviceKnowledge", "mainServiceKnowledge.py") - with open(knowledgePath, "r") as f: - source = f.read() - _check("buildAgentContext passes mandateId to semanticSearch", "mandateId=mandateId" in source) - _check("buildAgentContext has isSysAdmin param", "isSysAdmin" in source) -except Exception as e: - errors.append(f"Fix 6: {e}") - print(f" [FAIL] Fix 6: {e}") - -# ── Summary ─────────────────────────────────────────────────────────────────── -print("\n" + "=" * 60) -print(f"RESULTS: {len(passes)} passed, {len(errors)} failed") -print("=" * 60) - -if errors: - print("\nFAILURES:") - for e in errors: - print(f" - {e}") - sys.exit(1) -else: - print("\nALL CHECKS PASSED!") - sys.exit(0) diff --git a/tests/test_service_redmine_stats.py b/tests/test_service_redmine_stats.py index 310c15c7..aecd2caf 100644 --- a/tests/test_service_redmine_stats.py +++ b/tests/test_service_redmine_stats.py @@ -112,6 +112,8 @@ class TestAggregateEndToEnd: dateTo="2026-04-30", bucket="month", trackerIdsFilter=[], + categoryIdsFilter=[], + statusFilter="", instanceId="test-instance", ) assert dto.instanceId == "test-instance" diff --git a/tests/unit/serviceAgent/test_workflow_tools_crud.py b/tests/unit/serviceAgent/test_workflow_tools_crud.py new file mode 100644 index 00000000..9ebe1df6 --- /dev/null +++ b/tests/unit/serviceAgent/test_workflow_tools_crud.py @@ -0,0 +1,383 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +"""T3 — Unit tests for the workflow-CRUD agent tools. + +Covers AC 5 + AC 6 of the PWG-Pilot plan: + - createWorkflow happy-path returns a workflowId. + - createWorkflow rejects missing label / instanceId. + - deleteWorkflow without ``confirm=true`` is a NO-OP and returns an error. + - deleteWorkflow with ``confirm=true`` deletes and returns success. + - updateWorkflowMetadata patches only the supplied fields. + - createWorkflowFromFile / exportWorkflowToFile happy-path round-trip. + +The tools call into a feature-instance interface; we replace +``workflowTools._getInterface`` with a fake that captures interactions +without touching any database. +""" + +import asyncio +import json +import uuid +from typing import Any, Dict, Optional + +import pytest + +from modules.serviceCenter.services.serviceAgent import workflowTools +from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +class _FakeInterface: + """In-memory stand-in for ``GraphicalEditorObjects``. + + Stores workflows by id and records every method call in ``self.calls`` + so tests can assert on the parameters the tool layer forwarded. + """ + + def __init__(self, mandateId: str = "mand-1", featureInstanceId: str = "inst-1"): + self.mandateId = mandateId + self.featureInstanceId = featureInstanceId + self.workflows: Dict[str, Dict[str, Any]] = {} + self.calls: list = [] + + def createWorkflow(self, data: Dict[str, Any]) -> Dict[str, Any]: + self.calls.append(("createWorkflow", data)) + wfId = data.get("id") or str(uuid.uuid4()) + record = dict(data) + record["id"] = wfId + record["mandateId"] = self.mandateId + record["featureInstanceId"] = self.featureInstanceId + record.setdefault("active", False) + self.workflows[wfId] = record + return record + + def updateWorkflow(self, workflowId: str, data: Dict[str, Any]) -> Optional[Dict[str, Any]]: + self.calls.append(("updateWorkflow", workflowId, data)) + existing = self.workflows.get(workflowId) + if not existing: + return None + existing.update(data) + return existing + + def deleteWorkflow(self, workflowId: str) -> bool: + self.calls.append(("deleteWorkflow", workflowId)) + return self.workflows.pop(workflowId, None) is not None + + def getWorkflow(self, workflowId: str) -> Optional[Dict[str, Any]]: + return self.workflows.get(workflowId) + + def importWorkflowFromDict( + self, + envelope: Dict[str, Any], + existingWorkflowId: Optional[str] = None, + ) -> Dict[str, Any]: + self.calls.append(("importWorkflowFromDict", envelope, existingWorkflowId)) + data = { + "label": envelope.get("label", "Imported"), + "description": envelope.get("description", ""), + "tags": envelope.get("tags", []), + "graph": envelope.get("graph", {"nodes": [], "connections": []}), + "invocations": envelope.get("invocations", []), + "active": False, + } + if existingWorkflowId: + updated = self.updateWorkflow(existingWorkflowId, data) or {} + return {"workflow": updated, "warnings": [], "created": False} + created = self.createWorkflow(data) + return {"workflow": created, "warnings": [], "created": True} + + def exportWorkflowToDict(self, workflowId: str) -> Optional[Dict[str, Any]]: + wf = self.workflows.get(workflowId) + if not wf: + return None + return { + "$schemaVersion": "1.0", + "$kind": "poweron.workflow", + "label": wf.get("label"), + "description": wf.get("description", ""), + "tags": wf.get("tags", []), + "graph": wf.get("graph") or {"nodes": [], "connections": []}, + "invocations": wf.get("invocations") or [], + } + + +@pytest.fixture +def fakeInterface(monkeypatch): + """Replace ``_getInterface`` with a fixture-scoped fake.""" + fake = _FakeInterface() + monkeypatch.setattr(workflowTools, "_getInterface", lambda _ctx, _iid: fake) + return fake + + +def _ctx(workflowId: str = "wf-1", instanceId: str = "inst-1") -> Dict[str, Any]: + """Standard agent-tool context dict.""" + return { + "workflowId": workflowId, + "featureInstanceId": instanceId, + "userId": "user-1", + "mandateId": "mand-1", + } + + +def _runTool(handler, params: Dict[str, Any], context: Dict[str, Any]) -> ToolResult: + return asyncio.run(handler(params, context)) + + +def _payload(result: ToolResult) -> Dict[str, Any]: + """Decode the tool's data string back into a dict for easy asserts.""" + assert isinstance(result.data, str), "ToolResult.data must be a string per registry contract" + return json.loads(result.data) + + +# --------------------------------------------------------------------------- +# createWorkflow — AC 5 +# --------------------------------------------------------------------------- + +class TestCreateWorkflow: + def test_happyPathReturnsWorkflowId(self, fakeInterface): + result = _runTool(workflowTools._createWorkflow, {"label": "Smoke-Test"}, _ctx()) + assert result.success, result.error + payload = _payload(result) + assert payload["workflowId"] + assert payload["label"] == "Smoke-Test" + assert payload["workflowId"] in fakeInterface.workflows + assert fakeInterface.workflows[payload["workflowId"]]["active"] is False + + def test_missingLabelIsRejected(self, fakeInterface): + result = _runTool(workflowTools._createWorkflow, {}, _ctx()) + assert not result.success + assert "label" in (result.error or "").lower() + assert fakeInterface.calls == [], "no DB call must happen on validation error" + + def test_missingInstanceIdIsRejected(self, fakeInterface): + ctx = {"workflowId": "wf-1", "userId": "user-1", "mandateId": "mand-1"} + result = _runTool(workflowTools._createWorkflow, {"label": "Empty"}, ctx) + assert not result.success + assert "instanceid" in (result.error or "").lower() + + def test_blankLabelIsRejected(self, fakeInterface): + result = _runTool(workflowTools._createWorkflow, {"label": " "}, _ctx()) + assert not result.success + + def test_initialGraphAndTagsAreForwarded(self, fakeInterface): + graph = {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []} + result = _runTool( + workflowTools._createWorkflow, + {"label": "With Graph", "tags": ["pwg"], "graph": graph, "description": "d"}, + _ctx(), + ) + assert result.success + wfId = _payload(result)["workflowId"] + stored = fakeInterface.workflows[wfId] + assert stored["tags"] == ["pwg"] + assert stored["description"] == "d" + assert stored["graph"]["nodes"][0]["id"] == "n1" + + +# --------------------------------------------------------------------------- +# deleteWorkflow — AC 6 +# --------------------------------------------------------------------------- + +class TestDeleteWorkflow: + def test_withoutConfirmReturnsError(self, fakeInterface): + fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"} + result = _runTool(workflowTools._deleteWorkflow, {"workflowId": "wf-x"}, _ctx()) + assert not result.success + assert "confirm" in (result.error or "").lower() + # Critical: no destructive call must reach the interface + assert all(call[0] != "deleteWorkflow" for call in fakeInterface.calls) + assert "wf-x" in fakeInterface.workflows + + def test_withConfirmFalseAlsoBlocks(self, fakeInterface): + fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"} + result = _runTool( + workflowTools._deleteWorkflow, + {"workflowId": "wf-x", "confirm": False}, + _ctx(), + ) + assert not result.success + assert "wf-x" in fakeInterface.workflows + + def test_withConfirmTrueDeletes(self, fakeInterface): + fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"} + result = _runTool( + workflowTools._deleteWorkflow, + {"workflowId": "wf-x", "confirm": True}, + _ctx(), + ) + assert result.success, result.error + assert "wf-x" not in fakeInterface.workflows + + def test_unknownWorkflowReturnsError(self, fakeInterface): + result = _runTool( + workflowTools._deleteWorkflow, + {"workflowId": "wf-ghost", "confirm": True}, + _ctx(), + ) + assert not result.success + assert "not found" in (result.error or "").lower() + + def test_missingIdsReturnError(self, fakeInterface): + result = _runTool( + workflowTools._deleteWorkflow, + {"confirm": True}, + {"userId": "user-1", "mandateId": "mand-1"}, + ) + assert not result.success + assert "required" in (result.error or "").lower() + + +# --------------------------------------------------------------------------- +# updateWorkflowMetadata — supports the "rename" intent without touching graph +# --------------------------------------------------------------------------- + +class TestUpdateWorkflowMetadata: + def test_renameOnlyTouchesLabel(self, fakeInterface): + fakeInterface.workflows["wf-1"] = { + "id": "wf-1", + "label": "Old Name", + "graph": {"nodes": [{"id": "n1"}], "connections": []}, + } + result = _runTool( + workflowTools._updateWorkflowMetadata, + {"workflowId": "wf-1", "label": "New Name"}, + _ctx(), + ) + assert result.success, result.error + payload = _payload(result) + assert payload["label"] == "New Name" + assert payload["changed"] == ["label"] + # Graph must remain untouched + stored = fakeInterface.workflows["wf-1"] + assert stored["graph"]["nodes"][0]["id"] == "n1" + + def test_emptyPatchIsRejected(self, fakeInterface): + fakeInterface.workflows["wf-1"] = {"id": "wf-1", "label": "L"} + result = _runTool( + workflowTools._updateWorkflowMetadata, + {"workflowId": "wf-1"}, + _ctx(), + ) + assert not result.success + + def test_blankLabelIsRejected(self, fakeInterface): + fakeInterface.workflows["wf-1"] = {"id": "wf-1", "label": "L"} + result = _runTool( + workflowTools._updateWorkflowMetadata, + {"workflowId": "wf-1", "label": " "}, + _ctx(), + ) + assert not result.success + + +# --------------------------------------------------------------------------- +# createWorkflowFromFile / exportWorkflowToFile — round-trip via the tool layer +# --------------------------------------------------------------------------- + +class TestImportExportTools: + def test_inlineEnvelopeImportCreatesWorkflow(self, fakeInterface): + envelope = { + "$schemaVersion": "1.0", + "label": "Imported PWG", + "graph": {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []}, + } + result = _runTool( + workflowTools._createWorkflowFromFile, + {"envelope": envelope}, + _ctx(), + ) + assert result.success, result.error + payload = _payload(result) + assert payload["workflowId"] + assert payload["created"] is True + assert payload["label"] == "Imported PWG" + assert fakeInterface.workflows[payload["workflowId"]]["active"] is False + + def test_importRequiresFileIdOrEnvelope(self, fakeInterface): + result = _runTool( + workflowTools._createWorkflowFromFile, + {}, + _ctx(), + ) + assert not result.success + assert "fileid" in (result.error or "").lower() or "envelope" in (result.error or "").lower() + + def test_existingWorkflowIdReplacesGraph(self, fakeInterface): + fakeInterface.workflows["wf-1"] = { + "id": "wf-1", + "label": "Existing", + "graph": {"nodes": [], "connections": []}, + } + envelope = { + "$schemaVersion": "1.0", + "label": "Replaced", + "graph": {"nodes": [{"id": "n2", "type": "trigger.manual"}], "connections": []}, + } + result = _runTool( + workflowTools._createWorkflowFromFile, + {"envelope": envelope, "existingWorkflowId": "wf-1"}, + _ctx(), + ) + assert result.success, result.error + payload = _payload(result) + assert payload["created"] is False + assert fakeInterface.workflows["wf-1"]["graph"]["nodes"][0]["id"] == "n2" + + def test_exportProducesEnvelopeWithSchemaVersion(self, fakeInterface): + fakeInterface.workflows["wf-1"] = { + "id": "wf-1", + "label": "Round-Trip", + "graph": {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []}, + } + result = _runTool( + workflowTools._exportWorkflowToFile, + {"workflowId": "wf-1"}, + _ctx(), + ) + assert result.success, result.error + payload = _payload(result) + assert payload["fileName"].endswith(".workflow.json") + assert payload["schemaVersion"] == "1.0" + envelope = payload["envelope"] + assert envelope["label"] == "Round-Trip" + assert envelope["$kind"] == "poweron.workflow" + + def test_exportUnknownWorkflowReturnsError(self, fakeInterface): + result = _runTool( + workflowTools._exportWorkflowToFile, + {"workflowId": "wf-ghost"}, + _ctx(), + ) + assert not result.success + assert "not found" in (result.error or "").lower() + + +# --------------------------------------------------------------------------- +# Tool definitions — make sure the new tools are registered with the toolbox +# (cheap regression test that a refactor doesn't drop one of them silently) +# --------------------------------------------------------------------------- + +class TestToolDefinitions: + def test_allCrudToolsAreRegistered(self): + defs = workflowTools.getWorkflowToolDefinitions() + names = {d["name"] for d in defs} + for required in ( + "createWorkflow", + "createWorkflowFromFile", + "exportWorkflowToFile", + "deleteWorkflow", + "updateWorkflowMetadata", + ): + assert required in names, f"{required} missing from workflow toolbox" + + def test_deleteWorkflowMarksConfirmRequired(self): + defs = {d["name"]: d for d in workflowTools.getWorkflowToolDefinitions()} + deleteSpec = defs["deleteWorkflow"] + params = deleteSpec.get("parameters", {}) + assert "confirm" in (params.get("required") or []), ( + "deleteWorkflow must declare confirm as required so the model " + "cannot accidentally call it without an explicit confirmation." + ) diff --git a/tests/unit/services/test_json_extraction_merging.py b/tests/unit/services/test_json_extraction_merging.py index 11f18bba..49f430a8 100644 --- a/tests/unit/services/test_json_extraction_merging.py +++ b/tests/unit/services/test_json_extraction_merging.py @@ -3,6 +3,14 @@ # All rights reserved. """ Test script for JSON extraction response detection and merging. + +The methods under test (``_isJsonExtractionResponse``, +``_mergeJsonExtractionResponses``, etc.) are pure data-manipulation and +do NOT touch ``self._context`` / ``self._get_service`` / the DB. We +therefore bypass ``ExtractionService.__init__`` (which would require a +live ``ServiceCenterContext`` + service-resolver) by instantiating with +``__new__`` — same as constructing a stub without dependency wiring. + Run: python gateway/tests/unit/services/test_json_extraction_merging.py """ @@ -20,7 +28,7 @@ from modules.serviceCenter.services.serviceExtraction.mainServiceExtraction impo def test_detects_json_with_code_fences(): """Test that JSON extraction responses with markdown code fences are detected""" print("Test 1: Detecting JSON with code fences...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) content_part = ContentPart( id="test1", @@ -38,7 +46,7 @@ def test_detects_json_with_code_fences(): def test_detects_json_without_code_fences(): """Test that JSON extraction responses without code fences are detected""" print("Test 2: Detecting JSON without code fences...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) content_part = ContentPart( id="test2", @@ -56,7 +64,7 @@ def test_detects_json_without_code_fences(): def test_rejects_non_extraction_json(): """Test that regular JSON (without extracted_content) is rejected""" print("Test 3: Rejecting non-extraction JSON...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) content_part = ContentPart( id="test3", @@ -74,7 +82,7 @@ def test_rejects_non_extraction_json(): def test_rejects_non_json_content(): """Test that non-JSON content is rejected""" print("Test 4: Rejecting non-JSON content...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) content_part = ContentPart( id="test4", @@ -92,7 +100,7 @@ def test_rejects_non_json_content(): def test_merges_tables_with_same_headers(): """Test that tables with identical headers are merged""" print("Test 5: Merging tables with same headers...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) part1 = ContentPart( id="test1", @@ -116,18 +124,22 @@ def test_merges_tables_with_same_headers(): assert len(merged["extracted_content"]["tables"]) == 1, f"Should have one merged table, got {len(merged['extracted_content']['tables'])}" table = merged["extracted_content"]["tables"][0] assert table["headers"] == ["Name", "Amount"], f"Headers should match, got {table['headers']}" - # Should have 3 unique rows (Alice appears twice but should be deduplicated) - assert len(table["rows"]) == 3, f"Should have 3 unique rows, got {len(table['rows'])}" + # Per the documented merge contract ("Tables: Combines all table rows, + # ... duplicates preserved" — see _mergeJsonExtractionResponses + # docstring), identical rows from different parts are NOT deduplicated. + # Alice appears in both parts, so the merged table has 4 rows. + assert len(table["rows"]) == 4, f"Should have 4 rows (duplicates preserved), got {len(table['rows'])}" assert ["Alice", "100"] in table["rows"], "Alice row should be present" assert ["Bob", "200"] in table["rows"], "Bob row should be present" assert ["Charlie", "300"] in table["rows"], "Charlie row should be present" + assert table["rows"].count(["Alice", "100"]) == 2, "Alice row must be preserved twice (no dedup)" print(" [PASS]") def test_merges_multiple_json_blocks_separated_by_dash(): """Test that multiple JSON blocks separated by --- are merged""" print("Test 6: Merging multiple JSON blocks separated by ---...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) # Create content part with multiple JSON blocks separated by --- part1 = ContentPart( @@ -153,7 +165,7 @@ def test_merges_multiple_json_blocks_separated_by_dash(): def test_merges_text_content(): """Test that text content from multiple parts is merged""" print("Test 7: Merging text content...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) part1 = ContentPart( id="test1", @@ -183,7 +195,7 @@ def test_merges_text_content(): def test_merges_headings_and_lists(): """Test that headings and lists are merged""" print("Test 8: Merging headings and lists...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) part1 = ContentPart( id="test1", @@ -218,7 +230,7 @@ def test_merges_headings_and_lists(): def test_handles_empty_content_parts(): """Test that empty content parts are handled gracefully""" print("Test 9: Handling empty content parts...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) part1 = ContentPart( id="test1", @@ -246,7 +258,7 @@ def test_handles_empty_content_parts(): def test_merges_tables_with_different_headers(): """Test that tables with different headers are kept separate""" print("Test 10: Keeping tables with different headers separate...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) part1 = ContentPart( id="test1", @@ -284,7 +296,7 @@ def test_merges_tables_with_different_headers(): def test_real_world_scenario(): """Test with a realistic scenario similar to the debug file""" print("Test 11: Real-world scenario (multiple documents, multiple JSON blocks)...") - service = ExtractionService(None) + service = ExtractionService.__new__(ExtractionService) # Simulate 3 documents, each with a table extraction response part1 = ContentPart( @@ -314,25 +326,23 @@ def test_real_world_scenario(): merged = service._mergeJsonExtractionResponses([part1, part2, part3]) - # Should have one merged table with all unique transactions + # Should have one merged table with all transactions assert len(merged["extracted_content"]["tables"]) == 1, f"Should have one merged table, got {len(merged['extracted_content']['tables'])}" table = merged["extracted_content"]["tables"][0] assert table["headers"] == ["Transaction ID", "Date", "Amount"], "Headers should match" - - # Should have 5 unique rows (TXN001 appears twice but should be deduplicated) - assert len(table["rows"]) == 5, f"Should have 5 unique rows, got {len(table['rows'])}" - - # Verify all transactions are present + + # Per the documented merge contract, duplicate rows are preserved. + # TXN001 occurs in both doc1 and doc2 -> 6 rows total. + assert len(table["rows"]) == 6, f"Should have 6 rows (duplicates preserved), got {len(table['rows'])}" + transaction_ids = [row[0] for row in table["rows"]] - assert "TXN001" in transaction_ids, "TXN001 should be present" - assert "TXN002" in transaction_ids, "TXN002 should be present" - assert "TXN003" in transaction_ids, "TXN003 should be present" - assert "TXN004" in transaction_ids, "TXN004 should be present" - assert "TXN005" in transaction_ids, "TXN005 should be present" - - # Verify TXN001 appears only once (deduplicated) - assert transaction_ids.count("TXN001") == 1, "TXN001 should appear only once (deduplicated)" - + for txn in ("TXN001", "TXN002", "TXN003", "TXN004", "TXN005"): + assert txn in transaction_ids, f"{txn} should be present" + + # TXN001 must appear twice (no dedup at merge time — dedup is the + # responsibility of downstream consumers if needed). + assert transaction_ids.count("TXN001") == 2, "TXN001 must appear twice (duplicates preserved)" + print(" [PASS]") diff --git a/tests/unit/workflows/test_automation2_graphUtils.py b/tests/unit/workflows/test_automation2_graphUtils.py index 45f4ba0f..78077987 100644 --- a/tests/unit/workflows/test_automation2_graphUtils.py +++ b/tests/unit/workflows/test_automation2_graphUtils.py @@ -34,9 +34,14 @@ class TestResolveParameterReferences: assert resolveParameterReferences(value, node_outputs) == "b" def test_ref_missing_node(self): + # Current runtime semantics: an unresolved ref (nodeId not in + # node_outputs) collapses to None rather than the original + # placeholder dict. The workflow engine relies on this — downstream + # nodes treat missing refs as "no value yet" rather than "literal + # placeholder" — so we lock the contract here. node_outputs = {} value = {"type": "ref", "nodeId": "missing", "path": ["x"]} - assert resolveParameterReferences(value, node_outputs) == value + assert resolveParameterReferences(value, node_outputs) is None def test_value_wrapper(self): value = {"type": "value", "value": "static text"} From 794ba36f27a8c76ddd3265bb47f9c22b21d723e2 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 25 Apr 2026 01:13:01 +0200 Subject: [PATCH 2/7] teamsbot --- ...g-mietzinsbestaetigung-pilot.workflow.json | 3 +- env_dev.env | 2 +- .../datamodels/datamodelWorkflowActions.py | 27 +- .../graphicalEditor/adapterValidator.py | 205 ++ .../features/graphicalEditor/nodeAdapter.py | 172 ++ .../graphicalEditor/nodeDefinitions/ai.py | 35 +- .../nodeDefinitions/clickup.py | 8 +- .../nodeDefinitions/context.py | 15 +- .../graphicalEditor/nodeDefinitions/data.py | 18 +- .../graphicalEditor/nodeDefinitions/email.py | 50 +- .../graphicalEditor/nodeDefinitions/flow.py | 4 +- .../graphicalEditor/nodeDefinitions/input.py | 2 +- .../nodeDefinitions/sharepoint.py | 6 +- .../nodeDefinitions/triggers.py | 2 +- .../nodeDefinitions/trustee.py | 27 +- .../features/graphicalEditor/nodeRegistry.py | 57 +- modules/features/graphicalEditor/portTypes.py | 724 +++-- .../routeFeatureGraphicalEditor.py | 46 + .../graphicalEditor/upstreamPathsService.py | 128 + .../features/teamsbot/datamodelTeamsbot.py | 61 +- .../teamsbot/interfaceFeatureTeamsbot.py | 64 +- .../features/teamsbot/routeFeatureTeamsbot.py | 147 +- modules/features/teamsbot/service.py | 2576 +++++++++++++++-- .../trustee/accounting/accountingDataSync.py | 31 +- .../connectors/accountingConnectorRma.py | 9 +- .../trustee/datamodelFeatureTrustee.py | 4 +- .../features/trustee/routeFeatureTrustee.py | 76 +- modules/interfaces/interfaceBootstrap.py | 26 +- .../serviceAgent/actionToolAdapter.py | 129 +- .../services/serviceAgent/workflowTools.py | 118 + .../services/serviceAi/mainServiceAi.py | 27 +- .../workflows/automation2/executionEngine.py | 15 +- .../executors/actionNodeExecutor.py | 130 +- .../automation2/executors/dataExecutor.py | 41 +- .../featureInstanceRefMigration.py | 159 + modules/workflows/automation2/graphUtils.py | 137 +- .../automation2/pickNotPushMigration.py | 83 + .../automation2/udmUpstreamShapes.py | 36 + .../methods/_actionSignatureValidator.py | 177 ++ .../workflows/methods/methodAi/methodAi.py | 27 +- modules/workflows/methods/methodBase.py | 21 +- .../methods/methodChatbot/methodChatbot.py | 4 +- .../methods/methodClickup/methodClickup.py | 21 +- .../methods/methodContext/methodContext.py | 10 +- .../methods/methodFile/methodFile.py | 3 +- .../methods/methodJira/methodJira.py | 27 +- .../methods/methodOutlook/methodOutlook.py | 19 +- .../methods/methodRedmine/methodRedmine.py | 44 +- .../methodSharepoint/methodSharepoint.py | 35 +- .../methodTrustee/actions/processDocuments.py | 13 +- .../methodTrustee/actions/syncToAccounting.py | 6 +- .../methods/methodTrustee/methodTrustee.py | 47 +- scripts/_listMandates.py | 25 + scripts/check_orphan_featureinstance.py | 97 + .../script_migrate_feature_instance_refs.py | 213 ++ tests/integration/automation2/__init__.py | 2 + .../test_pick_not_push_migration_v2.py | 189 ++ tests/integration/trustee/__init__.py | 4 + .../trustee/test_spesenbelege_workflow_e2e.py | 474 +++ .../test_action_node_connection_provenance.py | 9 + .../graphicalEditor/test_adapter_validator.py | 352 +++ .../unit/graphicalEditor/test_node_adapter.py | 170 ++ .../graphicalEditor/test_portTypes_catalog.py | 257 ++ .../test_port_schema_recursive.py | 24 + .../test_upstream_paths_and_graph_schema.py | 67 + tests/unit/methods/__init__.py | 0 .../test_action_signature_validator.py | 289 ++ .../test_trustee_schema_compliance.py | 188 ++ tests/unit/scripts/__init__.py | 2 + .../test_migrate_feature_instance_refs.py | 289 ++ .../test_action_tool_adapter_typed.py | 127 + tests/unit/teamsbot/__init__.py | 0 tests/unit/teamsbot/test_directorPrompts.py | 604 ++++ .../unit/workflow/test_phase3_context_node.py | 33 +- .../workflows/test_automation2_graphUtils.py | 99 + .../test_featureInstanceRefMigration.py | 310 ++ 76 files changed, 8899 insertions(+), 779 deletions(-) create mode 100644 modules/features/graphicalEditor/adapterValidator.py create mode 100644 modules/features/graphicalEditor/nodeAdapter.py create mode 100644 modules/features/graphicalEditor/upstreamPathsService.py create mode 100644 modules/workflows/automation2/featureInstanceRefMigration.py create mode 100644 modules/workflows/automation2/pickNotPushMigration.py create mode 100644 modules/workflows/automation2/udmUpstreamShapes.py create mode 100644 modules/workflows/methods/_actionSignatureValidator.py create mode 100644 scripts/_listMandates.py create mode 100644 scripts/check_orphan_featureinstance.py create mode 100644 scripts/script_migrate_feature_instance_refs.py create mode 100644 tests/integration/automation2/__init__.py create mode 100644 tests/integration/automation2/test_pick_not_push_migration_v2.py create mode 100644 tests/integration/trustee/__init__.py create mode 100644 tests/integration/trustee/test_spesenbelege_workflow_e2e.py create mode 100644 tests/unit/graphicalEditor/test_action_node_connection_provenance.py create mode 100644 tests/unit/graphicalEditor/test_adapter_validator.py create mode 100644 tests/unit/graphicalEditor/test_node_adapter.py create mode 100644 tests/unit/graphicalEditor/test_portTypes_catalog.py create mode 100644 tests/unit/graphicalEditor/test_port_schema_recursive.py create mode 100644 tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py create mode 100644 tests/unit/methods/__init__.py create mode 100644 tests/unit/methods/test_action_signature_validator.py create mode 100644 tests/unit/nodeDefinitions/test_trustee_schema_compliance.py create mode 100644 tests/unit/scripts/__init__.py create mode 100644 tests/unit/scripts/test_migrate_feature_instance_refs.py create mode 100644 tests/unit/serviceAgent/test_action_tool_adapter_typed.py create mode 100644 tests/unit/teamsbot/__init__.py create mode 100644 tests/unit/teamsbot/test_directorPrompts.py create mode 100644 tests/unit/workflows/test_featureInstanceRefMigration.py diff --git a/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json b/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json index 8a5a7f60..78f50751 100644 --- a/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json +++ b/demoData/workflows/pwg-mietzinsbestaetigung-pilot.workflow.json @@ -37,7 +37,8 @@ "y": 200, "title": "Pro Scan-Dokument", "parameters": { - "level": 1, + "items": {"type": "ref", "nodeId": "n2", "path": ["files"]}, + "level": "auto", "concurrency": 1 } }, diff --git a/env_dev.env b/env_dev.env index 4f1c7367..60bc5511 100644 --- a/env_dev.env +++ b/env_dev.env @@ -77,7 +77,7 @@ Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJK # Teamsbot Browser Bot Service # For local testing: run the bot locally with `npm run dev` in service-teams-browser-bot # The bot will connect back to localhost:8000 via WebSocket -TEAMSBOT_BROWSER_BOT_URL = https://cae-poweron-shared.redwater-53d21339.switzerlandnorth.azurecontainerapps.io +TEAMSBOT_BROWSER_BOT_URL = http://localhost:4100 # Debug Configuration APP_DEBUG_CHAT_WORKFLOW_ENABLED = True diff --git a/modules/datamodels/datamodelWorkflowActions.py b/modules/datamodels/datamodelWorkflowActions.py index 09c07c14..e82941f6 100644 --- a/modules/datamodels/datamodelWorkflowActions.py +++ b/modules/datamodels/datamodelWorkflowActions.py @@ -22,9 +22,24 @@ class WorkflowActionParameter(BaseModel): json_schema_extra={"label": "Name"}, ) type: str = Field( - description="Python type as string: 'str', 'int', 'bool', 'List[str]', etc.", + description=( + "Type reference. Either a primitive ('str', 'int', 'bool', 'float', 'Any', " + "'List[str]', 'Dict[str,Any]', …) or a PORT_TYPE_CATALOG schema name " + "(e.g. 'ConnectionRef', 'FeatureInstanceRef', 'DocumentList', " + "'TrusteeProcessResult'). Catalog types are validated by " + "_actionSignatureValidator at startup." + ), json_schema_extra={"label": "Typ"}, ) + uiHint: Optional[str] = Field( + None, + description=( + "Optional UI rendering hint for adapters. " + "Free-form (e.g. 'textarea', 'cron', 'fieldBuilder'). " + "Adapters can override; defaults derive from frontendType when absent." + ), + json_schema_extra={"label": "UI-Hinweis"}, + ) frontendType: FrontendType = Field( description="UI rendering type (from global FrontendType enum)", json_schema_extra={"label": "Frontend-Typ"}, @@ -80,6 +95,16 @@ class WorkflowActionDefinition(BaseModel): description="Parameter schema definitions", json_schema_extra={"label": "Parameter"}, ) + outputType: str = Field( + "ActionResult", + description=( + "PORT_TYPE_CATALOG schema name produced by this action " + "(e.g. 'TrusteeProcessResult', 'EmailDraft', 'DocumentList'). " + "Defaults to 'ActionResult' for fire-and-forget actions. " + "Validated by _actionSignatureValidator at startup." + ), + json_schema_extra={"label": "Ausgabe-Typ"}, + ) execute: Optional[Callable] = Field( None, description="Execution function - async function that takes parameters dict and returns ActionResult. Set dynamically.", diff --git a/modules/features/graphicalEditor/adapterValidator.py b/modules/features/graphicalEditor/adapterValidator.py new file mode 100644 index 00000000..7f760896 --- /dev/null +++ b/modules/features/graphicalEditor/adapterValidator.py @@ -0,0 +1,205 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Adapter Validator — enforces 5 drift rules between Schicht-3 NodeAdapters +and the Schicht-2 Actions they bind to. + +This is the CI-safety net described in the typed-action-architecture plan: +any drift between an Editor-Node Adapter and the underlying Action signature +must be caught at build time, never silently in production. + +Rules +----- +1. Every `userParams[].actionArg` exists as a parameter in the bound Action. +2. Every required Action parameter is covered by either `userParams` or + `contextParams` (i.e. no required arg is silently unset). +3. Every Action parameter type exists in PORT_TYPE_CATALOG (or is a primitive). +4. The Action `outputType` exists in PORT_TYPE_CATALOG (or is a primitive). +5. Every method-bound STATIC node has an Adapter (no orphan node ids). + +Rules 3+4 are already enforced by `_actionSignatureValidator` in Phase 2 — +this module composes with it so the report covers both layers. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Mapping + +from modules.features.graphicalEditor.nodeAdapter import ( + NodeAdapter, + _adapterFromLegacyNode, + _isMethodBoundNode, +) +from modules.workflows.methods._actionSignatureValidator import _validateTypeRef + + +@dataclass +class AdapterValidationReport: + """Aggregated drift report across all adapters.""" + + errors: List[str] = field(default_factory=list) + warnings: List[str] = field(default_factory=list) + + @property + def isHealthy(self) -> bool: + return not self.errors + + def merge(self, other: "AdapterValidationReport") -> None: + self.errors.extend(other.errors) + self.warnings.extend(other.warnings) + + +def _validateAdapterAgainstAction( + adapter: NodeAdapter, + actionDef: Any, +) -> AdapterValidationReport: + """Apply rules 1-4 to a single Adapter / Action pair. + + `actionDef` is duck-typed so tests can pass dataclasses; production passes + a `WorkflowActionDefinition` Pydantic model. + """ + report = AdapterValidationReport() + actionParams: Mapping[str, Any] = getattr(actionDef, "parameters", {}) or {} + outputType: str = getattr(actionDef, "outputType", "ActionResult") or "ActionResult" + + # Rule 1: every userParam.actionArg exists in the Action + declaredArgs = {up.actionArg for up in adapter.userParams} + for arg in declaredArgs: + if arg not in actionParams: + report.errors.append( + f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': " + f"userParams.actionArg '{arg}' does not exist in action parameters " + f"(known: {sorted(actionParams.keys())})" + ) + + # Rule 2: every required Action arg is covered (userParams OR contextParams) + coveredArgs = declaredArgs | set(adapter.contextParams.keys()) + for paramName, paramDef in actionParams.items(): + isRequired = bool(getattr(paramDef, "required", False)) + if isRequired and paramName not in coveredArgs: + report.errors.append( + f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': " + f"required action arg '{paramName}' is neither in userParams nor contextParams" + ) + + # Rule 3: every Action parameter type exists in catalog (re-runs Phase-2 rule) + for paramName, paramDef in actionParams.items(): + typeRef = getattr(paramDef, "type", None) + if not typeRef: + report.errors.append( + f"action '{adapter.bindsAction}.{paramName}': missing 'type' on parameter" + ) + continue + for err in _validateTypeRef(typeRef): + report.errors.append( + f"action '{adapter.bindsAction}.{paramName}': {err}" + ) + + # Rule 4: Action outputType exists in catalog (or is a generic fire-and-forget type) + if outputType not in {"ActionResult", "Transit"}: + for err in _validateTypeRef(outputType): + report.errors.append( + f"action '{adapter.bindsAction}'.outputType: {err}" + ) + + return report + + +def _validateAllAdapters( + staticNodes: List[Mapping[str, Any]], + actionsRegistry: Mapping[str, Mapping[str, Any]], +) -> AdapterValidationReport: + """Run rules 1-5 across all method-bound static node definitions. + + Args: + staticNodes: list of legacy node-dicts (`STATIC_NODE_TYPES`). + actionsRegistry: mapping of method-shortname -> {actionName: WorkflowActionDefinition}. + Built from live `methods` registry or test-stubbed methods. + + Returns: + Aggregated drift report. `isHealthy` is True only if every method-bound + node has a matching Action and all 5 rules pass. + """ + report = AdapterValidationReport() + seenAdapterIds: set[str] = set() + + for node in staticNodes: + if not _isMethodBoundNode(node): + continue + + adapter = _adapterFromLegacyNode(node) + if adapter is None: + report.errors.append( + f"node '{node.get('id')}' is method-bound but adapter projection failed" + ) + continue + seenAdapterIds.add(adapter.nodeId) + + methodName = str(node.get("_method") or "") + actionName = str(node.get("_action") or "") + methodActions = actionsRegistry.get(methodName) or {} + actionDef = methodActions.get(actionName) + if actionDef is None: + report.errors.append( + f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': " + f"action not found in registry (method '{methodName}' has actions: " + f"{sorted(methodActions.keys())})" + ) + continue + + report.merge(_validateAdapterAgainstAction(adapter, actionDef)) + + # Rule 5: every Action with dynamicMode=False MUST have an Editor Adapter. + # dynamicMode=True actions are agent-only and may legitimately lack one. + boundActions: set[str] = set() + for node in staticNodes: + if not _isMethodBoundNode(node): + continue + boundActions.add(f"{node.get('_method')}.{node.get('_action')}") + + for methodName, actions in actionsRegistry.items(): + for actionName, actionDef in actions.items(): + if bool(getattr(actionDef, "dynamicMode", False)): + continue + fqn = f"{methodName}.{actionName}" + if fqn not in boundActions: + report.warnings.append( + f"action '{fqn}' has no Editor adapter " + f"(set dynamicMode=True if intended as agent-only)" + ) + + return report + + +def _formatAdapterReport(report: AdapterValidationReport) -> str: + """Format a report for human-readable logging.""" + lines: List[str] = [] + if report.isHealthy and not report.warnings: + lines.append("Adapter validator: all healthy.") + return "\n".join(lines) + + if report.errors: + lines.append(f"Adapter validator: {len(report.errors)} ERROR(s)") + for e in report.errors: + lines.append(f" ERROR: {e}") + if report.warnings: + lines.append(f"Adapter validator: {len(report.warnings)} WARNING(s)") + for w in report.warnings: + lines.append(f" WARN: {w}") + return "\n".join(lines) + + +def _buildActionsRegistryFromMethods( + methodInstances: Mapping[str, Any], +) -> Dict[str, Dict[str, Any]]: + """Convenience: turn `{shortName: methodInstance}` into the registry shape. + + `methodInstance._actions` is a dict of action-name -> WorkflowActionDefinition. + """ + registry: Dict[str, Dict[str, Any]] = {} + for shortName, instance in methodInstances.items(): + actions = getattr(instance, "_actions", None) + if isinstance(actions, dict): + registry[shortName] = dict(actions) + return registry diff --git a/modules/features/graphicalEditor/nodeAdapter.py b/modules/features/graphicalEditor/nodeAdapter.py new file mode 100644 index 00000000..ed7ec711 --- /dev/null +++ b/modules/features/graphicalEditor/nodeAdapter.py @@ -0,0 +1,172 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Schicht-3 Adapter Layer — projects Schicht-2 Actions into Editor-Node form. + +Architecture (see wiki/c-work/1-plan/2026-04-typed-action-architecture.md): + - Schicht 1: Types Catalog (portTypes.PORT_TYPE_CATALOG) + - Schicht 2: Methods/Actions (modules/workflows/methods/method*) - source of truth + for Backend capabilities (parameter types, output types). + - Schicht 3: Adapters (this module) - Editor-Node + AI-Agent-Tool wrappers around + Actions. References Action signature, never duplicates types. + - Schicht 4: Workflow-Bindings + Agent-Tool-Calls (instance-level wiring). + +This module defines the in-code Adapter representation (NodeAdapter, +UserParamMapping) and the projection helpers that convert between the +legacy node-dict wire format and the typed Adapter view. + +Wire-format compatibility: the legacy dicts in nodeDefinitions/*.py remain +the wire format consumed by the frontend until Phase 4. This module exposes +an Adapter VIEW over those dicts so the validator and AI-tool generator can +operate on a clean, typed structure without breaking consumers. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Mapping, Optional + + +@dataclass(frozen=True) +class UserParamMapping: + """Maps an Action argument into a Node's user-facing parameter. + + The Action signature is the source of truth for type/required/description. + This mapping carries Editor-specific overrides (label, UI hints, conditional + visibility) but never re-declares the type. + """ + + actionArg: str + label: Optional[Any] = None + description: Optional[Any] = None + uiHint: Optional[str] = None + frontendOptions: Optional[Any] = None + visibleWhen: Optional[Dict[str, Any]] = None + defaultValue: Any = None + + +@dataclass(frozen=True) +class NodeAdapter: + """Schicht-3 Editor-Node adapter — binds to a Schicht-2 Action. + + All type information for `userParams` is inherited from the bound Action. + The adapter only carries Editor-specific concerns (UI labels, port topology, + icon/color metadata). + """ + + nodeId: str + bindsAction: str + category: str + label: Any + description: Any + userParams: List[UserParamMapping] = field(default_factory=list) + contextParams: Dict[str, str] = field(default_factory=dict) + inputs: int = 1 + outputs: int = 1 + inputAccepts: List[List[str]] = field(default_factory=list) + outputLabels: Optional[List[Any]] = None + meta: Dict[str, Any] = field(default_factory=dict) + + +def _isMethodBoundNode(node: Mapping[str, Any]) -> bool: + """True if a legacy node dict is bound to a Schicht-2 Action.""" + return bool(node.get("_method") and node.get("_action")) + + +def _bindsActionFromLegacy(node: Mapping[str, Any]) -> Optional[str]: + """Build the canonical 'method.action' identifier from a legacy node dict. + + Returns None for framework-primitive nodes (trigger/flow/input/data). + """ + method = node.get("_method") + action = node.get("_action") + if not method or not action: + return None + return f"{method}.{action}" + + +def _userParamFromLegacyParam(legacyParam: Mapping[str, Any]) -> UserParamMapping: + """Project a legacy parameter dict into a UserParamMapping view. + + The view carries only Editor-overrides; type/required come from the Action. + """ + return UserParamMapping( + actionArg=str(legacyParam.get("name", "")), + label=legacyParam.get("label"), + description=legacyParam.get("description"), + uiHint=legacyParam.get("frontendType"), + frontendOptions=legacyParam.get("frontendOptions"), + visibleWhen=_extractVisibleWhen(legacyParam.get("frontendOptions")), + defaultValue=legacyParam.get("default"), + ) + + +def _extractVisibleWhen(frontendOptions: Any) -> Optional[Dict[str, Any]]: + """Extract conditional-visibility hint from legacy frontendOptions.showWhen.""" + if not isinstance(frontendOptions, dict): + return None + dependsOn = frontendOptions.get("dependsOn") + showWhen = frontendOptions.get("showWhen") + if not dependsOn or not showWhen: + return None + return {"actionArg": str(dependsOn), "in": list(showWhen) if isinstance(showWhen, (list, tuple)) else [showWhen]} + + +def _adapterFromLegacyNode(node: Mapping[str, Any]) -> Optional[NodeAdapter]: + """Build a NodeAdapter view from a legacy node dict. + + Returns None for framework-primitive nodes (no _method/_action binding). + Pure projection — no validation, no Action-signature lookup. + """ + if not _isMethodBoundNode(node): + return None + + bindsAction = _bindsActionFromLegacy(node) + if not bindsAction: + return None + + inputAccepts = _projectInputAccepts(node) + + return NodeAdapter( + nodeId=str(node.get("id", "")), + bindsAction=bindsAction, + category=str(node.get("category", "")), + label=node.get("label", ""), + description=node.get("description", ""), + userParams=[_userParamFromLegacyParam(p) for p in (node.get("parameters") or [])], + contextParams={}, + inputs=int(node.get("inputs", 1)), + outputs=int(node.get("outputs", 1)), + inputAccepts=inputAccepts, + outputLabels=node.get("outputLabels"), + meta=dict(node.get("meta") or {}), + ) + + +def _projectInputAccepts(node: Mapping[str, Any]) -> List[List[str]]: + """Convert legacy `inputPorts` dict-of-dicts into a per-port `accepts` list.""" + inputPorts = node.get("inputPorts") or {} + if not isinstance(inputPorts, dict): + return [] + inputs = int(node.get("inputs", 0) or 0) + if inputs <= 0: + return [] + out: List[List[str]] = [] + for portIdx in range(inputs): + portCfg = inputPorts.get(portIdx) or inputPorts.get(str(portIdx)) or {} + accepts = portCfg.get("accepts") if isinstance(portCfg, dict) else None + out.append(list(accepts) if isinstance(accepts, (list, tuple)) else []) + return out + + +def _projectAllAdapters(staticNodes: List[Mapping[str, Any]]) -> Dict[str, NodeAdapter]: + """Project a list of legacy node dicts into a {nodeId: NodeAdapter} map. + + Framework-primitive nodes (no Action binding) are silently skipped. + """ + out: Dict[str, NodeAdapter] = {} + for node in staticNodes: + adapter = _adapterFromLegacyNode(node) + if adapter is not None: + out[adapter.nodeId] = adapter + return out diff --git a/modules/features/graphicalEditor/nodeDefinitions/ai.py b/modules/features/graphicalEditor/nodeDefinitions/ai.py index dce86056..d0e0eb22 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/ai.py +++ b/modules/features/graphicalEditor/nodeDefinitions/ai.py @@ -12,19 +12,19 @@ AI_NODES = [ "parameters": [ {"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea", "description": t("KI-Prompt")}, - {"name": "outputFormat", "type": "string", "required": False, "frontendType": "select", - "frontendOptions": {"options": ["text", "json", "emailDraft"]}, - "description": t("Ausgabeformat"), "default": "text"}, + {"name": "resultType", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["txt", "json", "md", "csv", "xml", "html", "pdf", "docx", "xlsx", "pptx", "png", "jpg"]}, + "description": t("Ausgabeformat"), "default": "txt"}, {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden", "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""}, - {"name": "context", "type": "string", "required": False, "frontendType": "hidden", - "description": t("Kontext-Daten (via Wire oder DataRef)"), "default": ""}, {"name": "simpleMode", "type": "boolean", "required": False, "frontendType": "checkbox", "description": t("Einfacher Modus"), "default": True}, ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["DocumentList", "AiResult", "TextResult", "Transit"]}}, + "inputPorts": {0: {"accepts": [ + "DocumentList", "AiResult", "TextResult", "Transit", "LoopItem", "ActionResult", + ]}}, "outputPorts": {0: {"schema": "AiResult"}}, "meta": {"icon": "mdi-robot", "color": "#9C27B0", "usesAi": True}, "_method": "ai", @@ -53,9 +53,11 @@ AI_NODES = [ "label": t("Dokument zusammenfassen"), "description": t("Dokumentinhalt zusammenfassen"), "parameters": [ + {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden", + "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""}, {"name": "summaryLength", "type": "string", "required": False, "frontendType": "select", - "frontendOptions": {"options": ["short", "medium", "long"]}, - "description": t("Kurz, mittel oder lang"), "default": "medium"}, + "frontendOptions": {"options": ["brief", "medium", "detailed"]}, + "description": t("Kurz, mittel oder ausführlich"), "default": "medium"}, ], "inputs": 1, "outputs": 1, @@ -71,9 +73,10 @@ AI_NODES = [ "label": t("Dokument übersetzen"), "description": t("Dokument in Zielsprache übersetzen"), "parameters": [ - {"name": "targetLanguage", "type": "string", "required": True, "frontendType": "select", - "frontendOptions": {"options": ["en", "de", "fr", "it", "es", "pt", "nl"]}, - "description": t("Zielsprache")}, + {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden", + "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""}, + {"name": "targetLanguage", "type": "string", "required": True, "frontendType": "text", + "description": t("Zielsprache (z.B. de, en, French)")}, ], "inputs": 1, "outputs": 1, @@ -89,8 +92,10 @@ AI_NODES = [ "label": t("Dokument konvertieren"), "description": t("Dokument in anderes Format konvertieren"), "parameters": [ + {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden", + "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""}, {"name": "targetFormat", "type": "string", "required": True, "frontendType": "select", - "frontendOptions": {"options": ["pdf", "docx", "txt", "html", "md"]}, + "frontendOptions": {"options": ["docx", "pdf", "xlsx", "csv", "txt", "html", "json", "md"]}, "description": t("Zielformat")}, ], "inputs": 1, @@ -126,9 +131,9 @@ AI_NODES = [ "parameters": [ {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea", "description": t("Code-Generierungs-Prompt")}, - {"name": "language", "type": "string", "required": False, "frontendType": "select", - "frontendOptions": {"options": ["python", "javascript", "typescript", "java", "csharp", "go"]}, - "description": t("Programmiersprache"), "default": "python"}, + {"name": "resultType", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["py", "js", "ts", "html", "java", "cpp", "txt", "json", "csv", "xml"]}, + "description": t("Datei-Endung der erzeugten Code-Datei"), "default": "py"}, ], "inputs": 1, "outputs": 1, diff --git a/modules/features/graphicalEditor/nodeDefinitions/clickup.py b/modules/features/graphicalEditor/nodeDefinitions/clickup.py index 210fe7f7..56b27984 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/clickup.py +++ b/modules/features/graphicalEditor/nodeDefinitions/clickup.py @@ -94,8 +94,6 @@ CLICKUP_NODES = [ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", "frontendOptions": {"authority": "clickup"}, "description": t("ClickUp-Verbindung")}, - {"name": "teamId", "type": "string", "required": False, "frontendType": "text", - "description": t("Workspace")}, {"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList", "frontendOptions": {"dependsOn": "connectionReference"}, "description": t("Pfad zur Liste")}, @@ -144,10 +142,8 @@ CLICKUP_NODES = [ "description": t("Task-ID")}, {"name": "path", "type": "string", "required": False, "frontendType": "text", "description": t("Oder Pfad")}, - {"name": "taskUpdateEntries", "type": "object", "required": False, "frontendType": "keyValueRows", - "description": t("Zu ändernde Felder")}, {"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json", - "description": t("JSON für API")}, + "description": t("JSON-Body für PUT /task/{id}, z.B. {\"name\":\"...\",\"status\":\"...\"}")}, ], "inputs": 1, "outputs": 1, @@ -172,6 +168,8 @@ CLICKUP_NODES = [ "description": t("Oder Pfad")}, {"name": "fileName", "type": "string", "required": False, "frontendType": "text", "description": t("Dateiname")}, + {"name": "content", "type": "string", "required": True, "frontendType": "hidden", + "description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""}, ], "inputs": 1, "outputs": 1, diff --git a/modules/features/graphicalEditor/nodeDefinitions/context.py b/modules/features/graphicalEditor/nodeDefinitions/context.py index b677dca6..81d878be 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/context.py +++ b/modules/features/graphicalEditor/nodeDefinitions/context.py @@ -10,14 +10,13 @@ CONTEXT_NODES = [ "label": t("Inhalt extrahieren"), "description": t("Dokumentstruktur extrahieren ohne KI (Seiten, Abschnitte, Bilder, Tabellen)"), "parameters": [ - {"name": "outputDetail", "type": "string", "required": False, "frontendType": "select", - "frontendOptions": {"options": ["full", "structure", "references"]}, - "description": t("Detailgrad: full = alles, structure = Skelett, references = Dateireferenzen"), - "default": "full"}, - {"name": "includeImages", "type": "boolean", "required": False, "frontendType": "checkbox", - "description": t("Bilder extrahieren"), "default": True}, - {"name": "includeTables", "type": "boolean", "required": False, "frontendType": "checkbox", - "description": t("Tabellen extrahieren"), "default": True}, + {"name": "documentList", "type": "string", "required": True, "frontendType": "hidden", + "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""}, + {"name": "extractionOptions", "type": "object", "required": False, "frontendType": "json", + "description": t( + "Extraktions-Optionen (JSON), z.B. {\"includeImages\": true, \"includeTables\": true, " + "\"outputDetail\": \"full\"}"), + "default": {}}, ], "inputs": 1, "outputs": 1, diff --git a/modules/features/graphicalEditor/nodeDefinitions/data.py b/modules/features/graphicalEditor/nodeDefinitions/data.py index 73552928..b6208840 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/data.py +++ b/modules/features/graphicalEditor/nodeDefinitions/data.py @@ -16,27 +16,11 @@ DATA_NODES = [ ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["Transit"]}}, + "inputPorts": {0: {"accepts": ["Transit", "AiResult", "LoopItem"]}}, "outputPorts": {0: {"schema": "AggregateResult"}}, "executor": "data", "meta": {"icon": "mdi-playlist-plus", "color": "#607D8B", "usesAi": False}, }, - { - "id": "data.transform", - "category": "data", - "label": t("Umwandeln"), - "description": t("Daten umstrukturieren"), - "parameters": [ - {"name": "mappings", "type": "json", "required": True, "frontendType": "mappingTable", - "description": t("Feld-Zuordnungen"), "default": []}, - ], - "inputs": 1, - "outputs": 1, - "inputPorts": {0: {"accepts": ["Transit"]}}, - "outputPorts": {0: {"schema": "ActionResult", "dynamic": True, "deriveFrom": "mappings"}}, - "executor": "data", - "meta": {"icon": "mdi-swap-horizontal-bold", "color": "#607D8B", "usesAi": False}, - }, { "id": "data.filter", "category": "data", diff --git a/modules/features/graphicalEditor/nodeDefinitions/email.py b/modules/features/graphicalEditor/nodeDefinitions/email.py index 30872815..11ff9895 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/email.py +++ b/modules/features/graphicalEditor/nodeDefinitions/email.py @@ -17,14 +17,8 @@ EMAIL_NODES = [ "description": t("Ordner"), "default": "Inbox"}, {"name": "limit", "type": "number", "required": False, "frontendType": "number", "description": t("Max E-Mails"), "default": 100}, - {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text", - "description": t("Nur von dieser Adresse"), "default": ""}, - {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text", - "description": t("Betreff muss enthalten"), "default": ""}, - {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox", - "description": t("Nur mit Anhängen"), "default": False}, {"name": "filter", "type": "string", "required": False, "frontendType": "text", - "description": t("Erweitert: Filter-Text"), "default": ""}, + "description": t("Filter-Ausdruck (z.B. 'from:max@example.com hasAttachment:true betreff')"), "default": ""}, ], "inputs": 1, "outputs": 1, @@ -43,24 +37,12 @@ EMAIL_NODES = [ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", "frontendOptions": {"authority": "msft"}, "description": t("E-Mail-Konto Verbindung")}, - {"name": "query", "type": "string", "required": False, "frontendType": "text", - "description": t("Suchbegriff"), "default": ""}, + {"name": "query", "type": "string", "required": True, "frontendType": "text", + "description": t("Suchausdruck (z.B. 'from:max@example.com hasAttachments:true Rechnung')")}, {"name": "folder", "type": "string", "required": False, "frontendType": "text", - "description": t("Ordner"), "default": "Inbox"}, + "description": t("Ordner"), "default": "All"}, {"name": "limit", "type": "number", "required": False, "frontendType": "number", "description": t("Max E-Mails"), "default": 100}, - {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text", - "description": t("Von Adresse"), "default": ""}, - {"name": "toAddress", "type": "string", "required": False, "frontendType": "text", - "description": t("An Adresse"), "default": ""}, - {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text", - "description": t("Betreff enthält"), "default": ""}, - {"name": "bodyContains", "type": "string", "required": False, "frontendType": "text", - "description": t("Inhalt enthält"), "default": ""}, - {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox", - "description": t("Mit Anhängen"), "default": False}, - {"name": "filter", "type": "string", "required": False, "frontendType": "text", - "description": t("Erweitert: KQL-Filter"), "default": ""}, ], "inputs": 1, "outputs": 1, @@ -74,22 +56,24 @@ EMAIL_NODES = [ "id": "email.draftEmail", "category": "email", "label": t("E-Mail entwerfen"), - "description": t("E-Mail-Entwurf erstellen"), + "description": t( + "AI-gestützt einen E-Mail-Entwurf aus Kontext und optionalen Dokumenten erstellen"), "parameters": [ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", "frontendOptions": {"authority": "msft"}, "description": t("E-Mail-Konto")}, - {"name": "subject", "type": "string", "required": True, "frontendType": "text", - "description": t("Betreff")}, - {"name": "body", "type": "string", "required": True, "frontendType": "textarea", - "description": t("Inhalt")}, + {"name": "context", "type": "string", "required": False, "frontendType": "textarea", + "description": t("Kontext / Brief-Beschreibung für die KI-Komposition"), "default": ""}, {"name": "to", "type": "string", "required": False, "frontendType": "text", - "description": t("Empfänger"), "default": ""}, - {"name": "attachments", "type": "json", "required": False, "frontendType": "attachmentBuilder", - "description": t( - "Anhänge: Liste von { contentRef | csvFromVariable | base64Content, name, mimeType }. " - "Per Wire befüllbar (z.B. CSV aus data.consolidate)."), - "default": []}, + "description": t("Empfänger (komma-separiert, optional für Entwurf)"), "default": ""}, + {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden", + "description": t("Anhang-Dokumente (via Wire oder DataRef)"), "default": ""}, + {"name": "emailContent", "type": "string", "required": False, "frontendType": "hidden", + "description": t("Direkt vorbereiteter Inhalt {subject, body, to} (via Wire — überspringt KI)"), + "default": ""}, + {"name": "emailStyle", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["formal", "casual", "business"]}, + "description": t("Stil"), "default": "business"}, ], "inputs": 1, "outputs": 1, diff --git a/modules/features/graphicalEditor/nodeDefinitions/flow.py b/modules/features/graphicalEditor/nodeDefinitions/flow.py index be5f5a43..04a44197 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/flow.py +++ b/modules/features/graphicalEditor/nodeDefinitions/flow.py @@ -88,7 +88,9 @@ FLOW_NODES = [ ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["Transit", "UdmDocument"]}}, + "inputPorts": {0: {"accepts": [ + "Transit", "UdmDocument", "EmailList", "DocumentList", "FileList", "TaskList", "ActionResult", + ]}}, "outputPorts": {0: {"schema": "LoopItem"}}, "executor": "flow", "meta": {"icon": "mdi-repeat", "color": "#FF9800", "usesAi": False}, diff --git a/modules/features/graphicalEditor/nodeDefinitions/input.py b/modules/features/graphicalEditor/nodeDefinitions/input.py index e6d88c6b..647e9ac2 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/input.py +++ b/modules/features/graphicalEditor/nodeDefinitions/input.py @@ -22,7 +22,7 @@ INPUT_NODES = [ "inputs": 1, "outputs": 1, "inputPorts": {0: {"accepts": ["Transit"]}}, - "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "fields"}}, + "outputPorts": {0: {"schema": {"kind": "fromGraph", "parameter": "fields"}}}, "executor": "input", "meta": {"icon": "mdi-form-textbox", "color": "#9C27B0", "usesAi": False}, }, diff --git a/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py index 1faa6bbb..7e52ef8d 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py +++ b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py @@ -43,7 +43,7 @@ SHAREPOINT_NODES = [ ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["FileList", "Transit"]}}, + "inputPorts": {0: {"accepts": ["FileList", "Transit", "LoopItem"]}}, "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-file-document", "color": "#0078D4", "usesAi": False}, "_method": "sharepoint", @@ -61,6 +61,8 @@ SHAREPOINT_NODES = [ {"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder", "frontendOptions": {"dependsOn": "connectionReference"}, "description": t("Zielordner-Pfad")}, + {"name": "content", "type": "string", "required": True, "frontendType": "hidden", + "description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""}, ], "inputs": 1, "outputs": 1, @@ -106,7 +108,7 @@ SHAREPOINT_NODES = [ ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["FileList", "Transit"]}}, + "inputPorts": {0: {"accepts": ["FileList", "Transit", "LoopItem"]}}, "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-download", "color": "#0078D4", "usesAi": False}, "_method": "sharepoint", diff --git a/modules/features/graphicalEditor/nodeDefinitions/triggers.py b/modules/features/graphicalEditor/nodeDefinitions/triggers.py index d4122527..7b55d5d7 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/triggers.py +++ b/modules/features/graphicalEditor/nodeDefinitions/triggers.py @@ -34,7 +34,7 @@ TRIGGER_NODES = [ "inputs": 0, "outputs": 1, "inputPorts": {}, - "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "formFields"}}, + "outputPorts": {0: {"schema": {"kind": "fromGraph", "parameter": "formFields"}}}, "executor": "trigger", "meta": {"icon": "mdi-form-select", "color": "#9C27B0", "usesAi": False}, }, diff --git a/modules/features/graphicalEditor/nodeDefinitions/trustee.py b/modules/features/graphicalEditor/nodeDefinitions/trustee.py index 0eb5e119..5f7de2b2 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/trustee.py +++ b/modules/features/graphicalEditor/nodeDefinitions/trustee.py @@ -46,8 +46,11 @@ TRUSTEE_NODES = [ ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, - "outputPorts": {0: {"schema": "DocumentList"}}, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit", "AiResult", "LoopItem", "ActionResult"]}}, + # Runtime returns ActionResult.isSuccess(documents=[...]) — see + # actions/extractFromFiles.py. Declaring DocumentList here was adapter + # drift and broke the DataPicker for downstream nodes. + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50", "usesAi": True}, "_method": "trustee", "_action": "extractFromFiles", @@ -58,14 +61,17 @@ TRUSTEE_NODES = [ "label": t("Dokumente verarbeiten"), "description": t("TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen."), "parameters": [ - {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden", - "description": t("Automatisch via Wire-Verbindung befüllt")}, + # Type matches what producers actually emit: ActionResult.documents + # is `List[ActionDocument]` (see datamodelChat.ActionResult). The + # DataPicker uses this string to filter compatible upstream paths. + {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef", + "description": t("Dokumentenliste eines Upstream-Producers (z.B. trustee.extractFromFiles → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")}, {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", "description": t("Trustee Feature-Instanz-ID")}, ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "inputPorts": {0: {"accepts": ["ActionResult", "DocumentList", "Transit"]}}, "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-file-document-check", "color": "#4CAF50", "usesAi": False}, "_method": "trustee", @@ -77,14 +83,17 @@ TRUSTEE_NODES = [ "label": t("In Buchhaltung synchronisieren"), "description": t("Trustee-Positionen in Buchhaltungssystem übertragen."), "parameters": [ - {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden", - "description": t("Automatisch via Wire-Verbindung befüllt")}, + # Type matches what producers actually emit: ActionResult.documents + # is `List[ActionDocument]` (see datamodelChat.ActionResult). The + # DataPicker uses this string to filter compatible upstream paths. + {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef", + "description": t("Verarbeitete Dokumentenliste eines Upstream-Producers (z.B. trustee.processDocuments → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")}, {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", "description": t("Trustee Feature-Instanz-ID")}, ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["Transit"]}}, + "inputPorts": {0: {"accepts": ["ActionResult", "DocumentList", "Transit"]}}, "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-calculator", "color": "#4CAF50", "usesAi": False}, "_method": "trustee", @@ -122,7 +131,7 @@ TRUSTEE_NODES = [ ], "inputs": 1, "outputs": 1, - "inputPorts": {0: {"accepts": ["Transit", "AiResult", "ConsolidateResult"]}}, + "inputPorts": {0: {"accepts": ["Transit", "AiResult", "ConsolidateResult", "UdmDocument"]}}, "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-database-search", "color": "#4CAF50", "usesAi": False}, "_method": "trustee", diff --git a/modules/features/graphicalEditor/nodeRegistry.py b/modules/features/graphicalEditor/nodeRegistry.py index 577b530f..dd302282 100644 --- a/modules/features/graphicalEditor/nodeRegistry.py +++ b/modules/features/graphicalEditor/nodeRegistry.py @@ -6,9 +6,10 @@ Nodes are defined first; IO/method actions are used at execution time. """ import logging -from typing import Dict, List, Any +from typing import Dict, List, Any, Optional from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES +from modules.features.graphicalEditor.nodeAdapter import _bindsActionFromLegacy from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText @@ -41,12 +42,21 @@ def _pickFromLangMap(d: Any, lang: str) -> Any: def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]: - """Apply request language via resolveText (t() keys + multilingual dicts).""" + """Apply request language via resolveText (t() keys + multilingual dicts). + + Also exposes Schicht-3 metadata (`bindsAction`) derived from the legacy + `_method`/`_action` pair, so frontend consumers can resolve back to the + Schicht-2 Action signature without parsing internal underscore-prefixed + fields. + """ lang = normalizePrimaryLanguageTag(language, "en") + bindsAction = _bindsActionFromLegacy(node) out = dict(node) for key in list(out.keys()): if key.startswith("_"): del out[key] + if bindsAction: + out["bindsAction"] = bindsAction lbl = node.get("label") if lbl is not None: out["label"] = resolveText(lbl, lang) or node.get("id", "") @@ -124,3 +134,46 @@ def getNodeTypeToMethodAction() -> Dict[str, tuple]: if method and action: mapping[node["id"]] = (method, action) return mapping + + +def validateAdaptersAgainstMethods(methodInstances: Optional[Dict[str, Any]] = None) -> Optional[str]: + """Run the Schicht-3 Adapter validator (5 drift rules) against the live methods. + + Intended to be called once at startup after methodDiscovery has populated + the methods registry. Returns a human-readable report (None when healthy) + so the caller decides whether to log, raise, or surface to operators. + + Pass `methodInstances` directly for testability; defaults to importing + the live registry from `methodDiscovery.methods`. + """ + from modules.features.graphicalEditor.adapterValidator import ( + _buildActionsRegistryFromMethods, + _formatAdapterReport, + _validateAllAdapters, + ) + + if methodInstances is None: + try: + from modules.workflows.processing.shared.methodDiscovery import methods + except Exception as exc: + logger.warning("Adapter validator skipped: cannot import methodDiscovery (%s)", exc) + return None + + methodInstances = {} + for fullName, info in (methods or {}).items(): + shortName = fullName.replace("Method", "").lower() if fullName[:1].isupper() else fullName + instance = info.get("instance") if isinstance(info, dict) else None + if instance is not None: + methodInstances[shortName] = instance + + if not methodInstances: + return None + + actionsRegistry = _buildActionsRegistryFromMethods(methodInstances) + report = _validateAllAdapters(list(STATIC_NODE_TYPES), actionsRegistry) + formatted = _formatAdapterReport(report) + if not report.isHealthy: + logger.warning("[adapterValidator] %s", formatted) + elif report.warnings: + logger.info("[adapterValidator] %s", formatted) + return formatted diff --git a/modules/features/graphicalEditor/portTypes.py b/modules/features/graphicalEditor/portTypes.py index 1ac90665..b607316a 100644 --- a/modules/features/graphicalEditor/portTypes.py +++ b/modules/features/graphicalEditor/portTypes.py @@ -4,13 +4,14 @@ Typed Port System for the Graphical Editor. Defines PortSchema, PORT_TYPE_CATALOG, SYSTEM_VARIABLES, -output normalizers, input extractors, and Transit helpers. +output normalizers, and Transit helpers. + """ import logging import time import uuid -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field @@ -25,9 +26,14 @@ logger = logging.getLogger(__name__) class PortField(BaseModel): name: str - type: str # str, int, bool, List[str], List[Document], Dict[str,Any] + type: str # str, int, bool, List[str], List[Document], Dict[str,Any], ConnectionRef, … description: str = "" required: bool = True + enumValues: Optional[List[str]] = None + # Marks this field as the discriminator for a Ref-Schema (e.g. ConnectionRef.authority, + # FeatureInstanceRef.featureCode). Pickers/validators use it to filter compatible + # producers by sub-type. Type must be "str" when discriminator is True. + discriminator: bool = False class PortSchema(BaseModel): @@ -57,13 +63,113 @@ class OutputPortDef(BaseModel): # --------------------------------------------------------------------------- PORT_TYPE_CATALOG: Dict[str, PortSchema] = { + # ----------------------------------------------------------------- + # Refs (handles to external resources, pickable by user) + # ----------------------------------------------------------------- + "ConnectionRef": PortSchema(name="ConnectionRef", fields=[ + PortField(name="id", type="str", description="UserConnection.id (UUID)"), + PortField(name="authority", type="str", discriminator=True, + description="Auth-Provider-Code: msft | clickup | google | …"), + PortField(name="label", type="str", required=False, description="Anzeigename"), + ]), + "FeatureInstanceRef": PortSchema(name="FeatureInstanceRef", fields=[ + PortField(name="id", type="str", description="FeatureInstance.id (UUID)"), + PortField(name="featureCode", type="str", discriminator=True, + description="Feature-Modul-Code: trustee | redmine | clickup | sharepoint | …"), + PortField(name="label", type="str", required=False, description="Anzeigename"), + PortField(name="mandateId", type="str", required=False, description="Zugehöriger Mandant"), + ]), + "ClickUpListRef": PortSchema(name="ClickUpListRef", fields=[ + PortField(name="listId", type="str", description="ClickUp-Listen-ID"), + PortField(name="name", type="str", required=False, description="Listenname"), + PortField(name="spaceId", type="str", required=False, description="Space-ID"), + PortField(name="folderId", type="str", required=False, description="Ordner-ID"), + PortField(name="connection", type="ConnectionRef", required=False, + description="ClickUp-Verbindung"), + ]), + "PromptTemplateRef": PortSchema(name="PromptTemplateRef", fields=[ + PortField(name="id", type="str", description="Prompt-Template-ID"), + PortField(name="name", type="str", required=False, description="Anzeigename"), + PortField(name="version", type="str", required=False, description="Version / Tag"), + ]), + "SharePointFolderRef": PortSchema(name="SharePointFolderRef", fields=[ + PortField(name="siteUrl", type="str", required=False, description="SharePoint Site"), + PortField(name="driveId", type="str", required=False, description="Drive ID"), + PortField(name="folderPath", type="str", required=False, description="Ordnerpfad"), + PortField(name="label", type="str", required=False, description="Kurzlabel für Picker"), + ]), + "SharePointFileRef": PortSchema(name="SharePointFileRef", fields=[ + PortField(name="siteUrl", type="str", required=False, description="SharePoint Site"), + PortField(name="driveId", type="str", required=False, description="Drive ID"), + PortField(name="filePath", type="str", required=False, description="Dateipfad"), + PortField(name="fileName", type="str", required=False, description="Dateiname"), + PortField(name="label", type="str", required=False, description="Kurzlabel"), + ]), + "Document": PortSchema(name="Document", fields=[ + PortField(name="id", type="str", required=False, description="Dokument-/Datei-ID"), + PortField(name="name", type="str", required=False, description="Anzeigename"), + PortField(name="mimeType", type="str", required=False, description="MIME-Typ"), + PortField(name="sizeBytes", type="int", required=False, description="Grösse"), + PortField(name="downloadUrl", type="str", required=False, description="Download-URL"), + PortField(name="filePath", type="str", required=False, description="Logischer Pfad"), + ]), + "FileItem": PortSchema(name="FileItem", fields=[ + PortField(name="id", type="str", required=False, description="Datei-ID"), + PortField(name="name", type="str", required=False, description="Name"), + PortField(name="path", type="str", required=False, description="Pfad"), + PortField(name="mimeType", type="str", required=False, description="MIME"), + PortField(name="sizeBytes", type="int", required=False, description="Grösse"), + ]), + "EmailItem": PortSchema(name="EmailItem", fields=[ + PortField(name="id", type="str", required=False, description="Message-ID"), + PortField(name="subject", type="str", required=False, description="Betreff"), + PortField(name="fromAddress", type="str", required=False, description="Absender"), + PortField(name="toAddresses", type="List[str]", required=False, description="Empfänger"), + PortField(name="receivedAt", type="str", required=False, description="Empfangen am"), + PortField(name="hasAttachments", type="bool", required=False, description="Hat Anhänge"), + PortField(name="bodyPreview", type="str", required=False, description="Vorschau"), + ]), + "TaskItem": PortSchema(name="TaskItem", fields=[ + PortField(name="id", type="str", required=False, description="Task-ID"), + PortField(name="title", type="str", required=False, description="Titel"), + PortField(name="status", type="str", required=False, description="Status"), + PortField(name="assignee", type="str", required=False, description="Assignee"), + PortField(name="dueDate", type="str", required=False, description="Fälligkeit"), + PortField(name="listId", type="str", required=False, description="ClickUp-Liste"), + ]), + "QueryResult": PortSchema(name="QueryResult", fields=[ + PortField(name="rows", type="List[Any]", description="Ergebniszeilen"), + PortField(name="columns", type="List[str]", required=False, description="Spaltennamen"), + PortField(name="count", type="int", required=False, description="Zeilenanzahl"), + ]), + "UdmPage": PortSchema(name="UdmPage", fields=[ + PortField(name="pageNumber", type="int", required=False, description="Seitennummer"), + PortField(name="blocks", type="List[Any]", required=False, description="ContentBlocks"), + ]), + "UdmBlock": PortSchema(name="UdmBlock", fields=[ + PortField(name="kind", type="str", required=False, description="Block-Typ"), + PortField(name="text", type="str", required=False, description="Textinhalt"), + PortField(name="children", type="List[Any]", required=False, description="Unterblöcke"), + ]), "DocumentList": PortSchema(name="DocumentList", fields=[ PortField(name="documents", type="List[Document]", description="Dokumentenliste"), + PortField(name="connection", type="ConnectionRef", required=False, + description="Verbindung, mit der die Liste erzeugt wurde"), + PortField(name="source", type="SharePointFolderRef", required=False, + description="Herkunftsordner / Quelle"), + PortField(name="count", type="int", required=False, + description="Anzahl Dokumente"), ]), "FileList": PortSchema(name="FileList", fields=[ - PortField(name="files", type="List[File]", + PortField(name="files", type="List[FileItem]", description="Dateiliste"), + PortField(name="connection", type="ConnectionRef", required=False, + description="Verbindung"), + PortField(name="source", type="SharePointFolderRef", required=False, + description="Listen-Kontext"), + PortField(name="count", type="int", required=False, + description="Anzahl Dateien"), ]), "EmailDraft": PortSchema(name="EmailDraft", fields=[ PortField(name="subject", type="str", @@ -76,14 +182,26 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = { description="CC"), PortField(name="attachments", type="List[Document]", required=False, description="Anhänge"), + PortField(name="connection", type="ConnectionRef", required=False, + description="Outlook-/Graph-Verbindung"), ]), "EmailList": PortSchema(name="EmailList", fields=[ - PortField(name="emails", type="List[Email]", + PortField(name="emails", type="List[EmailItem]", description="E-Mails"), + PortField(name="connection", type="ConnectionRef", required=False, + description="Verbindung"), + PortField(name="count", type="int", required=False, + description="Anzahl"), ]), "TaskList": PortSchema(name="TaskList", fields=[ - PortField(name="tasks", type="List[Task]", + PortField(name="tasks", type="List[TaskItem]", description="Aufgaben"), + PortField(name="connection", type="ConnectionRef", required=False, + description="Verbindung"), + PortField(name="listId", type="str", required=False, + description="ClickUp-Listen-ID"), + PortField(name="count", type="int", required=False, + description="Anzahl"), ]), "TaskResult": PortSchema(name="TaskResult", fields=[ PortField(name="success", type="bool", @@ -143,11 +261,29 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = { PortField(name="merged", type="Dict", description="Zusammengeführte Daten"), ]), + "ActionDocument": PortSchema(name="ActionDocument", fields=[ + PortField(name="documentName", type="str", + description="Dokumentname"), + PortField(name="documentData", type="Any", + description="Inhalt / Rohdaten (z.B. JSON-String, Bytes)"), + PortField(name="mimeType", type="str", + description="MIME-Typ"), + PortField(name="fileId", type="str", required=False, + description="Persistierte FileItem.id (vom Engine ergänzt)"), + PortField(name="fileName", type="str", required=False, + description="Persistierter Dateiname (vom Engine ergänzt)"), + ]), "ActionResult": PortSchema(name="ActionResult", fields=[ PortField(name="success", type="bool", description="Erfolg"), PortField(name="error", type="str", required=False, description="Fehler"), + # `documents` is populated for every action that returns ActionResult + # (see datamodelChat.ActionResult.documents and actionNodeExecutor.out). + # Without it in the catalog the DataPicker cannot offer downstream + # bindings like `processDocuments → documents → *` for syncToAccounting. + PortField(name="documents", type="List[ActionDocument]", required=False, + description="Erzeugte Dokumente (immer befüllt für Trustee/AI/Email/...)"), PortField(name="data", type="Dict", required=False, description="Ergebnisdaten"), ]), @@ -156,7 +292,11 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = { PortField(name="id", type="str", description="Dokument-ID"), PortField(name="sourceType", type="str", description="Quellformat (pdf, docx, …)"), PortField(name="sourcePath", type="str", description="Quellpfad"), - PortField(name="children", type="List[Any]", description="StructuralNodes"), + PortField(name="children", type="List[Any]", description="StructuralNodes / Seiten"), + PortField(name="connection", type="ConnectionRef", required=False, + description="Optionale Verbindungsreferenz"), + PortField(name="source", type="SharePointFileRef", required=False, + description="Optionale Datei-Herkunft"), ]), "UdmNodeList": PortSchema(name="UdmNodeList", fields=[ PortField(name="nodes", type="List[Any]", description="UDM StructuralNodes oder ContentBlocks"), @@ -167,9 +307,287 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = { PortField(name="mode", type="str", description="Konsolidierungsmodus"), PortField(name="count", type="int", description="Anzahl verarbeiteter Elemente"), ]), + + # ----------------------------------------------------------------- + # Shared sub-types (used inside Result schemas) + # ----------------------------------------------------------------- + "ProcessError": PortSchema(name="ProcessError", fields=[ + PortField(name="documentId", type="str", required=False, + description="Betroffenes Dokument (falls zuordbar)"), + PortField(name="stage", type="str", + description="Pipeline-Stufe: extract | parse | sync | validate | …"), + PortField(name="message", type="str", description="Fehlermeldung"), + PortField(name="code", type="str", required=False, description="Fehler-Code"), + ]), + "JournalLine": PortSchema(name="JournalLine", fields=[ + PortField(name="id", type="str", required=False, description="Buchungszeilen-ID"), + PortField(name="bookingDate", type="str", description="Buchungsdatum (ISO)"), + PortField(name="account", type="str", description="Konto"), + PortField(name="contraAccount", type="str", required=False, description="Gegenkonto"), + PortField(name="amount", type="float", description="Betrag"), + PortField(name="currency", type="str", required=False, description="Währung"), + PortField(name="text", type="str", required=False, description="Buchungstext"), + PortField(name="reference", type="str", required=False, description="Beleg-Referenz"), + ]), + + # ----------------------------------------------------------------- + # Trustee Action Results + # ----------------------------------------------------------------- + "TrusteeRefreshResult": PortSchema(name="TrusteeRefreshResult", fields=[ + PortField(name="syncCounts", type="Dict[str,int]", + description="Tabellen → Anzahl synchronisierter Datensätze"), + PortField(name="oldestBookingDate", type="str", required=False, + description="Ältestes Buchungsdatum (ISO)"), + PortField(name="newestBookingDate", type="str", required=False, + description="Neuestes Buchungsdatum (ISO)"), + PortField(name="durationMs", type="int", required=False, + description="Dauer in Millisekunden"), + PortField(name="featureInstance", type="FeatureInstanceRef", required=False, + description="Trustee-Instanz"), + PortField(name="errors", type="List[ProcessError]", required=False, + description="Fehler-Liste"), + ]), + "TrusteeProcessResult": PortSchema(name="TrusteeProcessResult", fields=[ + PortField(name="documents", type="List[Document]", + description="Verarbeitete Dokumente mit angereicherten Daten"), + PortField(name="processedCount", type="int", required=False, + description="Anzahl erfolgreich verarbeiteter Dokumente"), + PortField(name="failedCount", type="int", required=False, + description="Anzahl fehlgeschlagener Dokumente"), + PortField(name="featureInstance", type="FeatureInstanceRef", required=False, + description="Trustee-Instanz"), + PortField(name="errors", type="List[ProcessError]", required=False, + description="Fehler-Liste"), + ]), + "TrusteeSyncResult": PortSchema(name="TrusteeSyncResult", fields=[ + PortField(name="syncedCount", type="int", + description="Erfolgreich in das Buchhaltungssystem übertragene Datensätze"), + PortField(name="failedCount", type="int", required=False, + description="Fehlgeschlagene Übertragungen"), + PortField(name="journalLines", type="List[JournalLine]", required=False, + description="Erzeugte Buchungszeilen"), + PortField(name="featureInstance", type="FeatureInstanceRef", required=False, + description="Ziel-Trustee-Instanz"), + PortField(name="errors", type="List[ProcessError]", required=False, + description="Fehler-Liste"), + ]), + + # ----------------------------------------------------------------- + # Redmine Action Results + # ----------------------------------------------------------------- + "RedmineTicket": PortSchema(name="RedmineTicket", fields=[ + PortField(name="id", type="str", description="Ticket-ID"), + PortField(name="subject", type="str", description="Betreff"), + PortField(name="description", type="str", required=False, description="Beschreibung"), + PortField(name="status", type="str", description="Status-Name"), + PortField(name="tracker", type="str", required=False, + description="Tracker (Bug, Feature, Task, …)"), + PortField(name="priority", type="str", required=False, description="Priorität"), + PortField(name="assignee", type="str", required=False, description="Zugewiesen an"), + PortField(name="author", type="str", required=False, description="Autor"), + PortField(name="project", type="str", required=False, description="Projekt"), + PortField(name="createdOn", type="str", required=False, description="Erstellt (ISO)"), + PortField(name="updatedOn", type="str", required=False, description="Aktualisiert (ISO)"), + PortField(name="dueDate", type="str", required=False, description="Fälligkeitsdatum"), + PortField(name="featureInstance", type="FeatureInstanceRef", required=False, + description="Redmine-Instanz"), + ]), + "RedmineTicketList": PortSchema(name="RedmineTicketList", fields=[ + PortField(name="tickets", type="List[RedmineTicket]", description="Ticket-Liste"), + PortField(name="count", type="int", required=False, description="Anzahl Tickets"), + PortField(name="filters", type="Dict[str,Any]", required=False, + description="Angewendete Filter"), + PortField(name="featureInstance", type="FeatureInstanceRef", required=False, + description="Redmine-Instanz"), + ]), + "RedmineStats": PortSchema(name="RedmineStats", fields=[ + PortField(name="kpis", type="Dict[str,Any]", + description="Key Performance Indicators"), + PortField(name="throughput", type="Dict[str,Any]", required=False, + description="Durchsatz pro Zeitraum"), + PortField(name="statusDistribution", type="Dict[str,int]", required=False, + description="Tickets pro Status"), + PortField(name="backlog", type="Dict[str,Any]", required=False, + description="Backlog-Statistik"), + PortField(name="featureInstance", type="FeatureInstanceRef", required=False, + description="Redmine-Instanz"), + ]), + + # ----------------------------------------------------------------- + # ClickUp / SharePoint / Email helper results + # ----------------------------------------------------------------- + "TaskAttachmentRef": PortSchema(name="TaskAttachmentRef", fields=[ + PortField(name="taskId", type="str", description="Aufgaben-ID"), + PortField(name="attachmentId", type="str", required=False, description="Attachment-ID"), + PortField(name="fileName", type="str", required=False, description="Dateiname"), + PortField(name="url", type="str", required=False, description="Download-URL"), + ]), + "AttachmentSpec": PortSchema(name="AttachmentSpec", fields=[ + PortField(name="source", type="str", + description="Quellart: path | document | url", + enumValues=["path", "document", "url"]), + PortField(name="ref", type="str", + description="Referenzwert (Pfad / Document.id / URL)"), + PortField(name="fileName", type="str", required=False, + description="Override-Dateiname"), + PortField(name="mimeType", type="str", required=False, description="MIME-Override"), + ]), + + # ----------------------------------------------------------------- + # Expressions (replace string-typed condition / cron params) + # ----------------------------------------------------------------- + "CronExpression": PortSchema(name="CronExpression", fields=[ + PortField(name="expression", type="str", + description="Cron-Ausdruck (5 oder 6 Felder)"), + PortField(name="timezone", type="str", required=False, + description="IANA Timezone (z.B. Europe/Zurich)"), + ]), + "ConditionExpression": PortSchema(name="ConditionExpression", fields=[ + PortField(name="expression", type="str", description="Boolescher Ausdruck"), + PortField(name="syntax", type="str", required=False, + description="jmespath | jsonlogic | python | template", + enumValues=["jmespath", "jsonlogic", "python", "template"]), + ]), + + # ----------------------------------------------------------------- + # Semantic primitives (give meaning to scalar str values) + # ----------------------------------------------------------------- + "DateTime": PortSchema(name="DateTime", fields=[ + PortField(name="iso", type="str", description="ISO-8601 Datum/Zeit"), + PortField(name="timezone", type="str", required=False, + description="IANA Timezone"), + ]), + "Url": PortSchema(name="Url", fields=[ + PortField(name="url", type="str", description="Vollständige URL"), + PortField(name="label", type="str", required=False, description="Anzeigename"), + ]), } +# --------------------------------------------------------------------------- +# Catalog validator +# --------------------------------------------------------------------------- + +# Primitives accepted as PortField.type in addition to catalog schema names. +PRIMITIVE_TYPES: frozenset = frozenset({ + "str", "int", "bool", "float", "Any", "Dict", "List", +}) + + +def _stripContainer(typeStr: str) -> List[str]: + """ + Extract referenced type names from a PortField.type string. + + Examples: + "str" -> ["str"] + "List[Document]" -> ["Document"] + "Dict[str,Any]" -> ["str", "Any"] + "ConnectionRef" -> ["ConnectionRef"] + "List[ProcessError]" -> ["ProcessError"] + """ + s = (typeStr or "").strip() + if not s: + return [] + if "[" in s and s.endswith("]"): + # outer container ignored, inner parts split by comma + inner = s[s.index("[") + 1 : -1] + parts = [p.strip() for p in inner.split(",") if p.strip()] + return parts or [s] + return [s] + + +def _isKnownType(typeName: str) -> bool: + return typeName in PRIMITIVE_TYPES or typeName in PORT_TYPE_CATALOG + + +def _validateCatalog() -> List[str]: + """ + Validate PORT_TYPE_CATALOG integrity. + + Returns a list of error messages. Empty list means catalog is healthy. + + Checks: + 1. Every PortField.type references either a primitive or a known schema. + 2. Discriminator fields exist, are typed "str", and at most one per schema. + 3. No cyclic references via required schema-typed fields + (optional fields may form cycles intentionally, e.g. provenance). + 4. Schema name in catalog key matches PortSchema.name. + """ + errors: List[str] = [] + + # Check 4: key consistency + for key, schema in PORT_TYPE_CATALOG.items(): + if schema.name != key: + errors.append(f"Catalog key '{key}' does not match schema.name '{schema.name}'") + + # Check 1 + 2: type refs and discriminators + for schemaName, schema in PORT_TYPE_CATALOG.items(): + discriminatorCount = 0 + for field in schema.fields: + for refName in _stripContainer(field.type): + if not _isKnownType(refName): + errors.append( + f"{schemaName}.{field.name}: unknown type '{refName}' " + f"(not a primitive and not in catalog)" + ) + if field.discriminator: + discriminatorCount += 1 + if field.type != "str": + errors.append( + f"{schemaName}.{field.name}: discriminator must be 'str', got '{field.type}'" + ) + if discriminatorCount > 1: + errors.append( + f"{schemaName}: has {discriminatorCount} discriminator fields, max 1 allowed" + ) + + # Check 3: cycles via required schema-typed fields + def _requiredSchemaRefs(name: str) -> List[str]: + sch = PORT_TYPE_CATALOG.get(name) + if not sch: + return [] + out: List[str] = [] + for field in sch.fields: + if not field.required: + continue + for ref in _stripContainer(field.type): + if ref in PORT_TYPE_CATALOG: + out.append(ref) + return out + + def _hasCycle(start: str) -> Optional[List[str]]: + stack: List[str] = [start] + path: List[str] = [] + visiting: set = set() + + def _dfs(name: str) -> Optional[List[str]]: + if name in visiting: + return path + [name] + visiting.add(name) + path.append(name) + for ref in _requiredSchemaRefs(name): + if ref == start and len(path) > 0: + return path + [ref] + cycle = _dfs(ref) + if cycle: + return cycle + path.pop() + visiting.discard(name) + return None + + return _dfs(start) + + for schemaName in PORT_TYPE_CATALOG.keys(): + cycle = _hasCycle(schemaName) + if cycle and cycle[0] == schemaName: + errors.append( + f"{schemaName}: cyclic required-ref chain: {' -> '.join(cycle)}" + ) + break # one cycle is enough — avoid spamming + + return errors + + # --------------------------------------------------------------------------- # SYSTEM_VARIABLES # --------------------------------------------------------------------------- @@ -259,6 +677,8 @@ def _defaultForType(typeStr: str) -> Any: return 0 if typeStr == "str": return "" + if typeStr in PORT_TYPE_CATALOG: + return {} return None @@ -272,210 +692,6 @@ def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]: return result -# --------------------------------------------------------------------------- -# Input extractors (one per input port type) -# --------------------------------------------------------------------------- - -def _extractEmailDraft(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract EmailDraft fields from upstream output.""" - result = {} - if upstream.get("responseData") and isinstance(upstream["responseData"], dict): - rd = upstream["responseData"] - for key in ("subject", "body", "to", "cc"): - if key in rd: - result[key] = rd[key] - if not result: - for key in ("subject", "body", "to", "cc"): - if key in upstream: - result[key] = upstream[key] - return result - - -def _extractDocuments(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract documents from upstream output.""" - docs = upstream.get("documents") or upstream.get("documentList") or [] - if not docs and isinstance(upstream.get("data"), dict): - docs = upstream["data"].get("documents") or upstream["data"].get("documentList") or [] - # input.upload format - if not docs: - files = upstream.get("files") or [] - fileObj = upstream.get("file") - fileIds = upstream.get("fileIds") or [] - if fileObj: - docs = [fileObj] - elif files: - docs = files - elif fileIds: - docs = [{"validationMetadata": {"fileId": fid}} for fid in fileIds] - normalized = docs if isinstance(docs, list) else [docs] - return {"documents": normalized, "documentList": normalized} if docs else {} - - -def _extractText(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract text from upstream output.""" - text = upstream.get("text") or upstream.get("response") or upstream.get("context") or "" - if not text and upstream.get("payload"): - import json - payload = upstream["payload"] - text = json.dumps(payload, ensure_ascii=False) if isinstance(payload, dict) else str(payload) - return {"text": str(text)} if text else {} - - -def _extractEmailList(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract email list from upstream output.""" - emails = upstream.get("emails") or [] - if not emails: - docs = upstream.get("documents") or upstream.get("documentList") or [] - if docs: - import json - for doc in docs: - raw = doc.get("documentData") if isinstance(doc, dict) else None - if raw: - try: - data = json.loads(raw) if isinstance(raw, str) else raw - if isinstance(data, dict): - found = (data.get("emails", {}).get("emails", []) - or data.get("searchResults", {}).get("results", [])) - if found: - emails = found - break - except (json.JSONDecodeError, TypeError): - pass - return {"emails": emails} if emails else {} - - -def _extractTaskList(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract task list from upstream output.""" - tasks = upstream.get("tasks") or [] - if not tasks: - docs = upstream.get("documents") or upstream.get("documentList") or [] - if docs: - import json - for doc in docs: - raw = doc.get("documentData") if isinstance(doc, dict) else None - if raw: - try: - data = json.loads(raw) if isinstance(raw, str) else raw - if isinstance(data, dict) and "tasks" in data: - tasks = data["tasks"] - break - except (json.JSONDecodeError, TypeError): - pass - return {"tasks": tasks} if tasks else {} - - -def _extractFileList(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract file list from upstream output.""" - files = upstream.get("files") or [] - return {"files": files} if files else {} - - -def _extractFormPayload(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract form payload from upstream output.""" - payload = upstream.get("payload") - if payload and isinstance(payload, dict): - return {"payload": payload} - return {} - - -def _extractAiResult(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract AI result fields from upstream output.""" - result = {} - for key in ("prompt", "response", "responseData", "context", "documents"): - if key in upstream: - result[key] = upstream[key] - return result - - -def _extractBoolResult(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract bool result from upstream output.""" - result = upstream.get("result") - if isinstance(result, bool): - return {"result": result, "reason": upstream.get("reason", "")} - approved = upstream.get("approved") - if isinstance(approved, bool): - return {"result": approved, "reason": upstream.get("reason", "")} - return {} - - -def _extractTaskResult(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract task result from upstream output.""" - result = {} - if "taskId" in upstream: - result["taskId"] = upstream["taskId"] - if "task" in upstream: - result["task"] = upstream["task"] - elif "clickupTask" in upstream: - result["task"] = upstream["clickupTask"] - if "success" in upstream: - result["success"] = upstream["success"] - return result - - -def _extractAggregateResult(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract aggregate result from upstream output.""" - items = upstream.get("items") or [] - return {"items": items, "count": len(items)} - - -def _extractMergeResult(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract merge result from upstream output.""" - return { - "inputs": upstream.get("inputs", {}), - "first": upstream.get("first"), - "merged": upstream.get("merged", {}), - } - - -def _extractUdmDocument(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract UdmDocument fields from upstream output.""" - if upstream.get("children") is not None and upstream.get("sourceType"): - return upstream - udm = upstream.get("udm") - if isinstance(udm, dict) and udm.get("children") is not None: - return udm - return {} - - -def _extractUdmNodeList(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract UdmNodeList fields from upstream output.""" - nodes = upstream.get("nodes") - if isinstance(nodes, list): - return {"nodes": nodes, "count": len(nodes)} - children = upstream.get("children") - if isinstance(children, list): - return {"nodes": children, "count": len(children)} - return {} - - -def _extractConsolidateResult(upstream: Dict[str, Any]) -> Dict[str, Any]: - """Extract ConsolidateResult fields from upstream output.""" - result = {} - for key in ("result", "mode", "count"): - if key in upstream: - result[key] = upstream[key] - return result - - -INPUT_EXTRACTORS: Dict[str, Callable] = { - "EmailDraft": _extractEmailDraft, - "DocumentList": _extractDocuments, - "TextResult": _extractText, - "EmailList": _extractEmailList, - "TaskList": _extractTaskList, - "FileList": _extractFileList, - "FormPayload": _extractFormPayload, - "AiResult": _extractAiResult, - "BoolResult": _extractBoolResult, - "TaskResult": _extractTaskResult, - "AggregateResult": _extractAggregateResult, - "MergeResult": _extractMergeResult, - "UdmDocument": _extractUdmDocument, - "UdmNodeList": _extractUdmNodeList, - "ConsolidateResult": _extractConsolidateResult, -} - - # --------------------------------------------------------------------------- # Transit helpers # --------------------------------------------------------------------------- @@ -522,27 +738,83 @@ def _resolveTransitChain( # Schema derivation for dynamic outputs # --------------------------------------------------------------------------- -def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]: - """Derive output schema from form field definitions.""" - fields_param = (node.get("parameters") or {}).get("fields") +def _derive_form_payload_schema_from_param(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]: + """Derive output schema from a field-builder JSON list (``fields``, ``formFields``, …).""" + fields_param = (node.get("parameters") or {}).get(param_key) if not fields_param or not isinstance(fields_param, list): return None - portFields = [] + portFields: List[PortField] = [] + + def _append_field(fname: str, ftype: Any, lab: Any, required: bool) -> None: + _desc = resolveText(lab) if lab is not None else fname + if not str(_desc).strip(): + _desc = fname + portFields.append(PortField( + name=fname, + type=str(ftype) if ftype is not None else "str", + description=_desc, + required=required, + )) + for f in fields_param: - if isinstance(f, dict) and f.get("name"): - _lab = f.get("label") - _desc = resolveText(_lab) if _lab is not None else f["name"] - if not _desc.strip(): - _desc = f["name"] - portFields.append(PortField( - name=f["name"], - type=f.get("type", "str"), - description=_desc, - required=f.get("required", False), - )) + if not isinstance(f, dict) or not f.get("name"): + continue + fname = str(f["name"]) + if str(f.get("type", "")).lower() == "group" and isinstance(f.get("fields"), list): + for sub in f["fields"]: + if isinstance(sub, dict) and sub.get("name"): + _append_field( + f"{fname}.{sub['name']}", + sub.get("type", "str"), + sub.get("label"), + bool(sub.get("required", False)), + ) + continue + _append_field(fname, f.get("type", "str"), f.get("label"), bool(f.get("required", False))) return PortSchema(name="FormPayload_dynamic", fields=portFields) if portFields else None +def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]: + """Derive output schema from form field definitions (``parameters.fields``).""" + return _derive_form_payload_schema_from_param(node, "fields") + + +def parse_graph_defined_output_schema( + node: Dict[str, Any], + output_port: Dict[str, Any], +) -> Optional[PortSchema]: + """ + Resolve a node's output port to a concrete PortSchema. + + Supports: + - Static catalog name: ``schema: "ActionResult"`` + - Graph-defined: ``schema: {"kind": "fromGraph", "parameter": "fields"}`` + - Legacy: ``dynamic`` + ``deriveFrom`` on the port dict. + """ + if not isinstance(output_port, dict): + return None + schema_spec = output_port.get("schema") + if isinstance(schema_spec, dict) and schema_spec.get("kind") == "fromGraph": + param_key = str(schema_spec.get("parameter") or "fields") + return _derive_form_payload_schema_from_param(node, param_key) + if output_port.get("dynamic") and output_port.get("deriveFrom"): + return _derive_form_payload_schema_from_param(node, str(output_port.get("deriveFrom"))) + if isinstance(schema_spec, str) and schema_spec: + return PORT_TYPE_CATALOG.get(schema_spec) + return None + + +def resolve_output_schema_name(node: Dict[str, Any], output_port: Dict[str, Any]) -> str: + """Return a schema name for port compatibility / path listing.""" + derived = parse_graph_defined_output_schema(node, output_port) + if derived: + return derived.name + spec = output_port.get("schema") if isinstance(output_port, dict) else None + if isinstance(spec, str) and spec: + return spec + return "Any" + + def _deriveTransformSchema(node: Dict[str, Any]) -> Optional[PortSchema]: """Derive output schema from transform mappings.""" mappings = (node.get("parameters") or {}).get("mappings") diff --git a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py index 11d9d3e9..4332df50 100644 --- a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py @@ -26,6 +26,7 @@ from modules.workflows.automation2.runEnvelope import ( normalize_run_envelope, ) from modules.features.graphicalEditor.entryPoints import find_invocation +from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths from modules.shared.i18nRegistry import apiRouteContext, resolveText routeApiMsg = apiRouteContext("routeFeatureGraphicalEditor") @@ -135,6 +136,48 @@ def get_node_types( return result +@router.post("/{instanceId}/upstream-paths") +@limiter.limit("60/minute") +def post_upstream_paths( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + body: Dict[str, Any] = Body(...), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Return pickable upstream DataRef paths for a node (draft graph in body).""" + _validateInstanceAccess(instanceId, context) + graph = body.get("graph") + node_id = body.get("nodeId") + if not isinstance(graph, dict) or not node_id: + raise HTTPException(status_code=400, detail=routeApiMsg("graph and nodeId are required")) + paths = compute_upstream_paths(graph, str(node_id)) + return {"paths": paths} + + +@router.get("/{instanceId}/upstream-paths/{node_id}") +@limiter.limit("60/minute") +def get_upstream_paths_saved( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + node_id: str = Path(..., description="Target node id"), + workflowId: str = Query(..., description="Workflow id whose saved graph is used"), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Return upstream paths using the persisted workflow graph (same payload as POST variant).""" + mandate_id = _validateInstanceAccess(instanceId, context) + if not workflowId: + raise HTTPException(status_code=400, detail=routeApiMsg("workflowId is required")) + from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface + + iface = getGraphicalEditorInterface(context.user, mandate_id, featureInstanceId=instanceId) + wf = iface.getWorkflow(workflowId) + if not wf: + raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found")) + graph = wf.get("graph") or {} + paths = compute_upstream_paths(graph if isinstance(graph, dict) else {}, str(node_id)) + return {"paths": paths} + + @router.get("/{instanceId}/options/user.connection") @limiter.limit("60/minute") def get_user_connection_options( @@ -813,6 +856,7 @@ async def _runEditorAgent( "\n\nAvailable tools (all valid — use whichever the user's intent calls for):" "\n Graph-mutating: readWorkflowGraph, listAvailableNodeTypes, " "describeNodeType, addNode, removeNode, connectNodes, setNodeParameter, " + "listUpstreamPaths, bindNodeParameter, " "autoLayoutWorkflow, validateGraph." "\n Workflow lifecycle: createWorkflow (new empty workflow), " "updateWorkflowMetadata (rename / change description / tags / activate), " @@ -844,6 +888,8 @@ async def _runEditorAgent( "description, sane defaults, or — for required user-connection fields — " "an actual connectionId). Do NOT pass position; the layout step handles it." "\n6. connectNodes — wire the nodes consistent with port schemas from describeNodeType." + "\n6b. When a parameter must take data from an upstream node, call listUpstreamPaths(nodeId=target) " + "then bindNodeParameter(producerNodeId, path, parameterName) — do not rely on implicit wire fill." "\n7. autoLayoutWorkflow — call exactly once as the LAST graph-mutating step so the " "canvas shows a readable top-down layout instead of overlapping boxes." "\n8. validateGraph — sanity check, then answer the user." diff --git a/modules/features/graphicalEditor/upstreamPathsService.py b/modules/features/graphicalEditor/upstreamPathsService.py new file mode 100644 index 00000000..8075fd00 --- /dev/null +++ b/modules/features/graphicalEditor/upstreamPathsService.py @@ -0,0 +1,128 @@ +# Copyright (c) 2025 Patrick Motsch +"""Compute pickable upstream paths for DataPicker / AI workflow tools.""" +from __future__ import annotations + +from typing import Any, Dict, List, Set + +from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES +from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, PortSchema, parse_graph_defined_output_schema +from modules.workflows.automation2.graphUtils import buildConnectionMap + +_NODE_BY_TYPE = {n["id"]: n for n in STATIC_NODE_TYPES} + + +def _paths_for_port_schema(schema: PortSchema, producer_node_id: str) -> List[Dict[str, Any]]: + out: List[Dict[str, Any]] = [] + for field in schema.fields: + path = [field.name] + out.append( + { + "producerNodeId": producer_node_id, + "path": path, + "type": field.type, + "label": ".".join(str(p) for p in path), + "scopeOrigin": "data", + } + ) + out.append( + { + "producerNodeId": producer_node_id, + "path": [], + "type": schema.name, + "label": "(whole output)", + "scopeOrigin": "data", + } + ) + return out + + +def _paths_for_schema(schema_name: str, producer_node_id: str) -> List[Dict[str, Any]]: + if not schema_name or schema_name == "Transit": + return [] + schema = PORT_TYPE_CATALOG.get(schema_name) + if not schema: + return [] + return _paths_for_port_schema(schema, producer_node_id) + + +def compute_upstream_paths(graph: Dict[str, Any], target_node_id: str) -> List[Dict[str, Any]]: + """ + Return flattened first-level paths for every ancestor node's primary output schema. + """ + nodes = graph.get("nodes") or [] + connections = graph.get("connections") or [] + node_by_id = {n["id"]: n for n in nodes if n.get("id")} + if target_node_id not in node_by_id: + return [] + + conn_map = buildConnectionMap(connections) + # predecessors: walk backwards along edges (target -> source) + preds: Dict[str, Set[str]] = {} + for tgt, pairs in conn_map.items(): + for src, _, _ in pairs: + preds.setdefault(tgt, set()).add(src) + + seen: Set[str] = set() + stack = [target_node_id] + ancestors: Set[str] = set() + while stack: + cur = stack.pop() + for p in preds.get(cur, ()): + if p not in seen: + seen.add(p) + ancestors.add(p) + stack.append(p) + + paths: List[Dict[str, Any]] = [] + for aid in sorted(ancestors): + anode = node_by_id.get(aid) + if not anode: + continue + nt = anode.get("type", "") + ndef = _NODE_BY_TYPE.get(nt) + if not ndef: + continue + out0 = (ndef.get("outputPorts") or {}).get(0, {}) + derived = parse_graph_defined_output_schema(anode, out0 if isinstance(out0, dict) else {}) + if derived: + for entry in _paths_for_port_schema(derived, aid): + entry["producerLabel"] = (anode.get("title") or "").strip() or aid + paths.append(entry) + else: + raw_schema = out0.get("schema") if isinstance(out0, dict) else None + schema_name = raw_schema if isinstance(raw_schema, str) and raw_schema else "ActionResult" + for entry in _paths_for_schema(schema_name, aid): + entry["producerLabel"] = (anode.get("title") or "").strip() or aid + paths.append(entry) + + # Lexical loop hints (flow.loop): any loop node in ancestors adds synthetic paths + for aid in ancestors: + anode = node_by_id.get(aid) or {} + if anode.get("type") == "flow.loop": + paths.extend( + [ + { + "producerNodeId": aid, + "path": ["currentItem"], + "type": "Any", + "label": "loop.currentItem", + "scopeOrigin": "loop", + }, + { + "producerNodeId": aid, + "path": ["currentIndex"], + "type": "int", + "label": "loop.currentIndex", + "scopeOrigin": "loop", + }, + { + "producerNodeId": aid, + "path": ["count"], + "type": "int", + "label": "loop.count", + "scopeOrigin": "loop", + }, + ] + ) + + return paths diff --git a/modules/features/teamsbot/datamodelTeamsbot.py b/modules/features/teamsbot/datamodelTeamsbot.py index f19b4c6c..76c9fb83 100644 --- a/modules/features/teamsbot/datamodelTeamsbot.py +++ b/modules/features/teamsbot/datamodelTeamsbot.py @@ -4,7 +4,8 @@ Teamsbot Feature - Data Models. Pydantic models for Teams Bot sessions, transcripts, bot responses, and configuration. """ -from typing import Optional, List, Dict, Any +from typing import Optional, List, Dict, Any, Literal +from datetime import datetime, timezone from pydantic import BaseModel, Field from enum import Enum import uuid @@ -12,6 +13,14 @@ import uuid from modules.datamodels.datamodelBase import PowerOnModel +# ============================================================================ +# Director Prompt Limits +# ============================================================================ + +DIRECTOR_PROMPT_TEXT_LIMIT = 8000 +DIRECTOR_PROMPT_FILE_LIMIT = 10 + + # ============================================================================ # Enums # ============================================================================ @@ -267,6 +276,56 @@ class SpeechTeamsResponse(BaseModel): reasoning: str = Field(default="", description="Reasoning for the decision (for logging/debug)") detectedIntent: str = Field(default="none", description="Detected intent: addressed, question, proactive, stop, none") commands: Optional[List[TeamsbotCommand]] = Field(default=None, description="Optional list of commands to execute (e.g. toggle transcript, send chat, change language)") + needsAgent: bool = Field(default=False, description="If True, escalate to agentService.runAgent for complex multi-step processing (web research, mail, etc.)") + agentReason: Optional[str] = Field(default=None, description="Why escalation to the full agent is required (used as task brief for the agent)") + + +# ============================================================================ +# Director Prompts (private operator instructions sent during a live meeting) +# ============================================================================ + +class TeamsbotDirectorPromptStatus(str, Enum): + """Lifecycle status of a Director Prompt.""" + QUEUED = "queued" + RUNNING = "running" + SUCCEEDED = "succeeded" + FAILED = "failed" + CONSUMED = "consumed" # one-shot consumed; persistent prompts stay active + + +class TeamsbotDirectorPromptMode(str, Enum): + """How long a Director Prompt remains effective.""" + ONE_SHOT = "oneShot" + PERSISTENT = "persistent" + + +class TeamsbotDirectorPrompt(PowerOnModel): + """A private operator instruction injected into the bot during a live meeting. + + Stored in PostgreSQL so it survives reconnects (persistent prompts) and is + auditable. Visible only to the session owner via SSE; invisible to other + meeting participants. + """ + id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Director prompt ID") + sessionId: str = Field(description="Teams Bot session ID (FK)") + instanceId: str = Field(description="Feature instance ID (FK)") + operatorUserId: str = Field(description="User ID of the operator who issued the prompt") + text: str = Field(description="The director instruction text", max_length=DIRECTOR_PROMPT_TEXT_LIMIT) + mode: TeamsbotDirectorPromptMode = Field(default=TeamsbotDirectorPromptMode.ONE_SHOT, description="oneShot or persistent") + fileIds: List[str] = Field(default_factory=list, description="UDB-selected file/object IDs to attach as RAG context") + status: TeamsbotDirectorPromptStatus = Field(default=TeamsbotDirectorPromptStatus.QUEUED, description="Lifecycle status") + statusMessage: Optional[str] = Field(default=None, description="Optional error or status detail") + createdAt: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat(), description="ISO timestamp when created") + consumedAt: Optional[str] = Field(default=None, description="ISO timestamp when consumed (one-shot) or marked done") + agentRunId: Optional[str] = Field(default=None, description="Reference to the agent run that processed this prompt") + responseText: Optional[str] = Field(default=None, description="Final agent text delivered to the meeting") + + +class TeamsbotDirectorPromptCreateRequest(BaseModel): + """Request body for submitting a new Director Prompt.""" + text: str = Field(description="Director instruction text", min_length=1, max_length=DIRECTOR_PROMPT_TEXT_LIMIT) + mode: TeamsbotDirectorPromptMode = Field(default=TeamsbotDirectorPromptMode.ONE_SHOT, description="oneShot or persistent") + fileIds: List[str] = Field(default_factory=list, description="UDB file IDs to attach (max 10)") # ============================================================================ diff --git a/modules/features/teamsbot/interfaceFeatureTeamsbot.py b/modules/features/teamsbot/interfaceFeatureTeamsbot.py index 5395d922..2408e4cb 100644 --- a/modules/features/teamsbot/interfaceFeatureTeamsbot.py +++ b/modules/features/teamsbot/interfaceFeatureTeamsbot.py @@ -21,6 +21,9 @@ from .datamodelTeamsbot import ( TeamsbotSystemBot, TeamsbotUserSettings, TeamsbotUserAccount, + TeamsbotDirectorPrompt, + TeamsbotDirectorPromptStatus, + TeamsbotDirectorPromptMode, ) logger = logging.getLogger(__name__) @@ -114,11 +117,10 @@ class TeamsbotObjects: return self.db.recordModify(TeamsbotSession, sessionId, updates) def deleteSession(self, sessionId: str) -> bool: - """Delete a session and all related transcripts and responses.""" - # Delete related records first + """Delete a session and all related transcripts, responses and director prompts.""" self._deleteTranscriptsBySession(sessionId) self._deleteResponsesBySession(sessionId) - # Delete session + self._deletePromptsBySession(sessionId) return self.db.recordDelete(TeamsbotSession, sessionId) # ========================================================================= @@ -272,6 +274,62 @@ class TeamsbotObjects: """Delete saved MS credentials.""" return self.db.recordDelete(TeamsbotUserAccount, accountId) + # ========================================================================= + # Director Prompts (private operator instructions during a live meeting) + # ========================================================================= + + def createDirectorPrompt(self, promptData: Dict[str, Any]) -> Dict[str, Any]: + """Create a new director prompt record.""" + return self.db.recordCreate(TeamsbotDirectorPrompt, promptData) + + def getDirectorPrompt(self, promptId: str) -> Optional[Dict[str, Any]]: + """Get a single director prompt by ID.""" + records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter={"id": promptId}) + return records[0] if records else None + + def getDirectorPrompts(self, sessionId: str, operatorUserId: str | None = None) -> List[Dict[str, Any]]: + """Get all director prompts for a session, optionally filtered by operator.""" + recordFilter: Dict[str, Any] = {"sessionId": sessionId} + if operatorUserId: + recordFilter["operatorUserId"] = operatorUserId + records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter=recordFilter) + records.sort(key=lambda r: r.get("createdAt") or "") + return records + + def getActivePersistentPrompts(self, sessionId: str) -> List[Dict[str, Any]]: + """Get persistent prompts that are still active (not consumed/failed) for a session.""" + records = self.db.getRecordset( + TeamsbotDirectorPrompt, + recordFilter={ + "sessionId": sessionId, + "mode": TeamsbotDirectorPromptMode.PERSISTENT.value, + }, + ) + terminal = { + TeamsbotDirectorPromptStatus.CONSUMED.value, + TeamsbotDirectorPromptStatus.FAILED.value, + } + active = [r for r in records if r.get("status") not in terminal] + active.sort(key=lambda r: r.get("createdAt") or "") + return active + + def updateDirectorPrompt(self, promptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Update a director prompt (status, response text, etc.).""" + return self.db.recordModify(TeamsbotDirectorPrompt, promptId, updates) + + def deleteDirectorPrompt(self, promptId: str) -> bool: + """Delete a director prompt (e.g. when operator removes a persistent prompt).""" + return self.db.recordDelete(TeamsbotDirectorPrompt, promptId) + + def _deletePromptsBySession(self, sessionId: str) -> int: + """Delete all director prompts for a session (called from deleteSession).""" + records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter={"sessionId": sessionId}) + count = 0 + for record in records: + self.db.recordDelete(TeamsbotDirectorPrompt, record.get("id")) + count += 1 + return count + # ========================================================================= # Stats / Aggregation # ========================================================================= diff --git a/modules/features/teamsbot/routeFeatureTeamsbot.py b/modules/features/teamsbot/routeFeatureTeamsbot.py index e5ed9425..37cb2d77 100644 --- a/modules/features/teamsbot/routeFeatureTeamsbot.py +++ b/modules/features/teamsbot/routeFeatureTeamsbot.py @@ -36,6 +36,11 @@ from .datamodelTeamsbot import ( TeamsbotUserAccount, TeamsbotResponseChannel, TeamsbotResponseMode, + TeamsbotDirectorPromptCreateRequest, + TeamsbotDirectorPromptMode, + TeamsbotDirectorPromptStatus, + DIRECTOR_PROMPT_FILE_LIMIT, + DIRECTOR_PROMPT_TEXT_LIMIT, ) # Import service @@ -382,7 +387,12 @@ async def streamSession( # Send initial session state yield f"data: {json.dumps({'type': 'sessionState', 'data': session})}\n\n" - + + # Send current bot WebSocket connection state so the operator UI can + # render the live indicator without waiting for the next connect/disconnect. + from .service import getActiveService as _getActiveService + yield f"data: {json.dumps({'type': 'botConnectionState', 'data': {'connected': _getActiveService(sessionId) is not None}})}\n\n" + # Stream events eventQueue = _sessionEvents.get(sessionId) if not eventQueue: @@ -832,6 +842,132 @@ async def submitMfaCode( raise HTTPException(status_code=404, detail=routeApiMsg("No active MFA challenge for this session")) +# ========================================================================= +# Director Prompts (private operator instructions during a live meeting) +# ========================================================================= + +@router.post("/{instanceId}/sessions/{sessionId}/directorPrompts") +@limiter.limit("30/minute") +async def submitDirectorPrompt( + request: Request, + instanceId: str, + sessionId: str, + body: TeamsbotDirectorPromptCreateRequest, + context: RequestContext = Depends(getRequestContext), +): + """Submit a private director prompt to the running bot. Triggers the + full agent path (web, mail, RAG, etc.) and delivers the answer into the + meeting via TTS + chat. Only the session owner can submit prompts.""" + _validateInstanceAccess(instanceId, context) + interface = _getInterface(context, instanceId) + + session = interface.getSession(sessionId) + if not session: + raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found") + _validateSessionOwnership(session, context) + + if session.get("status") not in ( + TeamsbotSessionStatus.ACTIVE.value, + TeamsbotSessionStatus.JOINING.value, + ): + raise HTTPException(status_code=400, detail=routeApiMsg("Session is not active")) + + text = (body.text or "").strip() + if not text: + raise HTTPException(status_code=400, detail=routeApiMsg("Prompt text is required")) + if len(text) > DIRECTOR_PROMPT_TEXT_LIMIT: + raise HTTPException( + status_code=400, + detail=routeApiMsg(f"Prompt text exceeds limit of {DIRECTOR_PROMPT_TEXT_LIMIT} characters"), + ) + fileIds = list(body.fileIds or []) + if len(fileIds) > DIRECTOR_PROMPT_FILE_LIMIT: + raise HTTPException( + status_code=400, + detail=routeApiMsg(f"Too many files ({len(fileIds)}); max {DIRECTOR_PROMPT_FILE_LIMIT}"), + ) + + from .service import getActiveService + service = getActiveService(sessionId) + if not service: + raise HTTPException( + status_code=409, + detail=routeApiMsg( + "Bot is not yet live in the meeting (no WebSocket connection). " + "Wait until the bot status indicator turns green and try again." + ), + ) + + created = await service.submitDirectorPrompt( + sessionId=sessionId, + operatorUserId=str(context.user.id), + text=text, + mode=body.mode, + fileIds=fileIds, + ) + return {"prompt": created} + + +@router.get("/{instanceId}/sessions/{sessionId}/directorPrompts") +@limiter.limit("30/minute") +async def listDirectorPrompts( + request: Request, + instanceId: str, + sessionId: str, + context: RequestContext = Depends(getRequestContext), +): + """List director prompts for a session (only operator's own prompts).""" + _validateInstanceAccess(instanceId, context) + interface = _getInterface(context, instanceId) + + session = interface.getSession(sessionId) + if not session: + raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found") + _validateSessionOwnership(session, context) + + operatorUserId = None if context.isPlatformAdmin else str(context.user.id) + prompts = interface.getDirectorPrompts(sessionId, operatorUserId=operatorUserId) + return {"prompts": prompts} + + +@router.delete("/{instanceId}/sessions/{sessionId}/directorPrompts/{promptId}") +@limiter.limit("30/minute") +async def deleteDirectorPrompt( + request: Request, + instanceId: str, + sessionId: str, + promptId: str, + context: RequestContext = Depends(getRequestContext), +): + """Remove a (typically persistent) director prompt. Marks it consumed so + it no longer influences the bot. The DB record is kept for audit.""" + _validateInstanceAccess(instanceId, context) + interface = _getInterface(context, instanceId) + + session = interface.getSession(sessionId) + if not session: + raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found") + _validateSessionOwnership(session, context) + + prompt = interface.getDirectorPrompt(promptId) + if not prompt or prompt.get("sessionId") != sessionId: + raise HTTPException(status_code=404, detail=f"Prompt '{promptId}' not found") + if not context.isPlatformAdmin and prompt.get("operatorUserId") != str(context.user.id): + raise HTTPException(status_code=404, detail=f"Prompt '{promptId}' not found") + + from .service import getActiveService + service = getActiveService(sessionId) + if service: + await service.removePersistentPrompt(promptId) + else: + # Bot not connected: mark consumed directly + interface.updateDirectorPrompt(promptId, { + "status": TeamsbotDirectorPromptStatus.CONSUMED.value, + "statusMessage": "Removed by operator (bot offline)", + }) + return {"deleted": True, "promptId": promptId} + + # ========================================================================= # Voice Test Endpoint # ========================================================================= @@ -845,7 +981,7 @@ async def testVoice( ): """Test TTS voice with AI-generated sample text in the correct language.""" from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - from modules.serviceCenter.services.serviceAi.mainServiceAi import AiService + from .service import _createAiService from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum mandateId = _validateInstanceAccess(instanceId, context) @@ -856,12 +992,7 @@ async def testVoice( botName = body.get("botName", "AI Assistant") try: - # Generate test text dynamically via AI in the correct language - serviceContext = type('Ctx', (), { - 'user': context.user, 'mandateId': mandateId, - 'featureInstanceId': instanceId, 'featureCode': 'teamsbot' - })() - aiService = AiService(serviceCenter=serviceContext) + aiService = _createAiService(context.user, mandateId, instanceId) await aiService.ensureAiObjectsInitialized() aiRequest = AiCallRequest( diff --git a/modules/features/teamsbot/service.py b/modules/features/teamsbot/service.py index 9e59f653..2067a7f2 100644 --- a/modules/features/teamsbot/service.py +++ b/modules/features/teamsbot/service.py @@ -7,10 +7,11 @@ Manages the audio processing pipeline: STT -> Context Buffer -> SPEECH_TEAMS -> import logging import json +import re import asyncio import time import base64 -from typing import Optional, Dict, Any, List +from typing import Optional, Dict, Any, List, Callable from fastapi import WebSocket @@ -28,13 +29,504 @@ from .datamodelTeamsbot import ( TeamsbotConfig, TeamsbotResponseMode, TeamsbotResponseChannel, + TeamsbotDetectedIntent, SpeechTeamsResponse, TeamsbotCommand, + TeamsbotDirectorPrompt, + TeamsbotDirectorPromptStatus, + TeamsbotDirectorPromptMode, + DIRECTOR_PROMPT_TEXT_LIMIT, + DIRECTOR_PROMPT_FILE_LIMIT, ) from .browserBotConnector import BrowserBotConnector logger = logging.getLogger(__name__) +# Agent run limits for director prompts / speech escalation (meeting context). +# Higher than default workspace agent: Teams research + tool chains need depth. +TEAMSBOT_AGENT_MAX_ROUNDS = 8 +TEAMSBOT_AGENT_MAX_COST_CHF = 0.12 + +# How many recent director-prompt briefings (one-shot + persistent) we keep in +# session memory so SPEECH_TEAMS triggers and speech escalation can still see +# the operator's attached files + analysis after the prompt itself was consumed. +_RECENT_DIRECTOR_BRIEFINGS_MAX = 6 + +# Quick-ack ("Moment...") UX: fire a SHORT TTS the moment the bot's name is +# detected so the speaker hears within ~1s that the bot reacted, instead of +# waiting for the full debounce + SPEECH_TEAMS + agent pipeline (~5-30s). +# Throttled per session to avoid acking every fragment of a long utterance. +_QUICK_ACK_MIN_INTERVAL_SEC = 25.0 + +# Number of phrase variants we generate per kind (rotated round-robin so back- +# to-back acks/notices don't sound identical). +_EPHEMERAL_PHRASE_VARIANTS = 4 + +# Localisation INTENTS for ephemeral phrases. Each kind describes WHAT the +# phrase should express; the actual wording is produced at runtime by the AI +# in the bot's configured language + persona. The intent text below is the +# instruction passed to the LLM (English, since it's a model directive — the +# OUTPUT will be in the configured spoken language). Add new ephemeral phrase +# kinds here, never inline string literals at the call site. +_EPHEMERAL_PHRASE_INTENTS: Dict[str, str] = { + "quickAck": ( + "Very short verbal acknowledgment (1 to 4 words) the assistant says " + "the moment its name is recognised, BEFORE it has formulated a full " + "answer. The intent is purely 'I heard you, I'm thinking' — natural, " + "conversational, never a complete sentence." + ), + "agentBusy": ( + "One short sentence (max ~12 words) the assistant says BEFORE starting " + "a longer research / tool-call task, so the audience knows the answer " + "will take a few seconds. Polite, professional, calm." + ), + "agentRound": ( + "One short sentence (max ~14 words) the assistant says BETWEEN rounds " + "of a longer agent task to signal that work is still in progress. " + "Include the placeholder tokens '{round}' and '{maxRounds}' so the " + "caller can substitute the actual numbers — e.g. 'Step {round} of " + "{maxRounds}, still working.'" + ), +} + + +def _voiceLineLooksLikeBillingOrMeta(line: str) -> bool: + """Heuristic: trailing lines that are separators or billing/usage footers.""" + s = line.strip() + if not s: + return True + lower = s.lower() + if re.match(r"^[-=*_]{3,}\s*$", s): + return True + if re.match(r"^#{1,6}\s*(usage|billing|costs?|meta|technical|statistics)\b", lower): + return True + if "chf" in lower and re.search(r"\d", s): + if re.search( + r"\b(total|usage|cost|billing|token|spent|used|price|estimate|" + r"rounds?|calls?|duration|processing\s*time|model\s*calls?)\b", + lower, + ): + return True + if "token" in lower and re.search(r"\d", s): + if re.search(r"\b(total|usage|prompt|completion)\b", lower): + return True + pl = lower.replace(" ", "") + if "progressafter" in pl and ("aicalls:" in pl or "toolcalls:" in pl): + return True + return False + + +_EMOJI_PATTERN = re.compile( + "[" + "\U0001F300-\U0001FAFF" # symbols & pictographs, emoticons, transport, supplemental + "\U00002600-\U000027BF" # misc symbols + dingbats (incl. ⚙ 🔐 🔌 ✓ ✗) + "\U0001F1E6-\U0001F1FF" # regional indicator (flags) + "\U00002B00-\U00002BFF" # arrows, geometric + "\U0001F900-\U0001F9FF" # supplemental symbols (incl. 🤖 🧠) + "\U0000FE0F" # variation selector-16 (emoji presentation) + "]+", + flags=re.UNICODE, +) + + +def _voiceFriendlyMeetingText(raw: str) -> str: + """Sanitise a chat/markdown response so it can be SPOKEN naturally. + + Aggressive cleanup — when a TTS engine reads raw markdown out loud the + listener hears "hash hash hash Zusammenfassung pipe pipe pipe", which + is unbearable in a meeting. The chat / DB / UI keep the original text; + only the audio path goes through this sanitiser. + + What we strip: + * Code fences and inline code + * Markdown emphasis (**bold**, *italic*, __bold__, _italic_) + * Markdown links → keep label + * Headings (# .. ######) + * Markdown tables (any line with two or more pipes is dropped wholesale) + * Horizontal rules (---, ***, ___ on their own line) + * Bullet markers (-, *, •, ·) and numbered list markers (1., 2)) at line start + * Emojis (full Unicode pictograph ranges + variation selector) + * Decorative trailing colons on bullet headings + * Stray pipes left over from inline tables + * Trailing billing / "maximum rounds reached" / "budget exceeded" footers + + Whitespace is then collapsed to single spaces. + """ + if not raw: + return "" + + # Trim trailing operator/billing footers BEFORE any structural rewrite + # so we don't waste effort sanitising a footer that gets dropped. + low = raw.lower() + if "maximum rounds reached" in low: + m = re.search(r"(?is)maximum\s+rounds\s+reached", raw) + if m: + head = raw[: m.start()].strip() + raw = head or ( + "Die Abklaerung brauchte mehr Schritte als vorgesehen; Details stehen im Chat." + ) + if "budget exceeded" in low: + m = re.search(r"(?is)budget\s+exceeded", raw) + if m: + head = raw[: m.start()].strip() + raw = head or "Das eingestellte Kostenlimit ist erreicht; Details stehen im Chat." + + lines = raw.strip().split("\n") + while lines and _voiceLineLooksLikeBillingOrMeta(lines[-1]): + lines.pop() + t = "\n".join(lines).strip() + if not t: + t = raw.strip() + + # 1) Strip code blocks (multi-line first, then inline) + t = re.sub(r"```[\s\S]*?```", " ", t) + t = re.sub(r"`([^`]+)`", r"\1", t) + + # 2) Drop markdown table rows (any line with two or more pipes) and the + # separator lines they come with (|---|---|). A paragraph that just + # happens to contain ONE pipe survives. + cleanedLines: List[str] = [] + for ln in t.split("\n"): + stripped = ln.strip() + if stripped.count("|") >= 2: + continue + if re.fullmatch(r"\s*\|?[\s\-:|]+\|?\s*", stripped) and "-" in stripped: + continue + cleanedLines.append(ln) + t = "\n".join(cleanedLines) + + # 3) Drop horizontal rule lines (---, ***, ___, with optional spaces) + t = re.sub(r"(?m)^\s*([-*_])\s*\1\s*\1[\s\1]*$", "", t) + + # 4) Headings: drop the leading hashes + t = re.sub(r"(?m)^\s*#{1,6}\s+", "", t) + + # 5) Bullet markers at line start — keep the content, drop the bullet + t = re.sub(r"(?m)^\s*[-*•·]\s+", "", t) + # 6) Numbered list markers at line start ("1.", "2)", "3 -") + t = re.sub(r"(?m)^\s*\d+[\.\)]\s+", "", t) + + # 7) Emphasis markers (after bullets so a "**Bold:**" heading is handled) + t = re.sub(r"\*\*([^*]+)\*\*", r"\1", t) + t = re.sub(r"\*([^*\n]+)\*", r"\1", t) + t = re.sub(r"__([^_]+)__", r"\1", t) + t = re.sub(r"(?` `{` `}` `[` `]` `(` `)` + # `_` `&` `@` `$` `%` `` -- replaced with a space so word + # boundaries are preserved. + t = re.sub(r"[*#~^=+|\\<>{}\[\]()_&@$%`]+", " ", t) + + # 10e) Drop ASCII double-quote (single quotes are legitimate apostrophes + # in contractions like "don't" / "geht's", so we keep U+0027). + t = t.replace('"', "") + + # 10f) Slash between letters/digits — TTS reads "slash". Replace with + # " or " for readability when it separates words like "und/oder". + t = re.sub(r"(?<=\w)\s*/\s*(?=\w)", " oder ", t) + # Any remaining stray slash is just whitespace. + t = t.replace("/", " ") + + # 10g) Trim multiple punctuation runs ("...!!!" → "..." / "!" / etc.) + t = re.sub(r"([\.,;:!\?])\1{1,}", r"\1", t) + # Remove orphan punctuation directly preceded by whitespace + # (common after symbol stripping: " , ", " . "). + t = re.sub(r"\s+([\.,;:!\?])", r"\1", t) + # Collapse trailing colon at end of meaningful phrase to a period for + # nicer cadence ("Was ist PowerOn:" → "Was ist PowerOn."). + t = re.sub(r":\s*$", ".", t.rstrip()) + # 10h) Collapse " :" tail of MULTI-LINE blocks the same way. + t = re.sub(r"\s+:\s*$", ":", t, flags=re.MULTILINE) + + # 11) Collapse whitespace to single spaces; protect sentence breaks by + # turning paragraph blanks into a period if the previous chunk + # didn't already terminate. + paragraphs = [p.strip() for p in re.split(r"\n\s*\n", t) if p.strip()] + rebuilt: List[str] = [] + for p in paragraphs: + p = re.sub(r"\s+", " ", p).strip() + if not p: + continue + if not re.search(r"[\.!\?\u2026:]\s*$", p): + p = p.rstrip() + "." + rebuilt.append(p) + t = " ".join(rebuilt) + t = re.sub(r"\s+", " ", t).strip() + + # If we sanitised away everything (e.g. the input was *only* a markdown + # table or a wall of pictographs) return empty — the caller (TTS / voice + # summary) treats empty as "nothing to say", which is the safe default. + # Falling back to raw markdown here would leak the very symbols we just + # spent ten passes removing. + return t + + +# Google Cloud TTS rejects single sentences that exceed ~5000 bytes. The Chirp3 +# voices are stricter: long, comma-heavy sentences (no terminating punctuation) +# also fail with "Sentence ... is too long". We chunk well below the documented +# limit AND inject sentence terminators so the synthesizer accepts every chunk. +_TTS_MAX_CHUNK_CHARS = 800 + + +def _splitTextForTts(text: str, maxChars: int = _TTS_MAX_CHUNK_CHARS) -> List[str]: + """Split a long voice line into TTS-safe chunks at sentence/paragraph boundaries. + + The result preserves order and contains no empty strings. A single + sentence longer than ``maxChars`` is hard-cut at word boundaries. + """ + cleaned = (text or "").strip() + if not cleaned: + return [] + if len(cleaned) <= maxChars: + return [cleaned] + + sentencePattern = re.compile(r"(?<=[\.!\?\u2026])\s+|\n+") + rawSentences = [s.strip() for s in sentencePattern.split(cleaned) if s and s.strip()] + if not rawSentences: + rawSentences = [cleaned] + + chunks: List[str] = [] + buffer = "" + for sentence in rawSentences: + if len(sentence) > maxChars: + if buffer: + chunks.append(buffer.strip()) + buffer = "" + words = sentence.split(" ") + current = "" + for word in words: + candidate = (current + " " + word).strip() if current else word + if len(candidate) > maxChars and current: + chunks.append(current.strip()) + current = word + else: + current = candidate + if current: + if not re.search(r"[\.!\?\u2026]\s*$", current): + current = current.rstrip() + "." + chunks.append(current.strip()) + continue + + candidate = (buffer + " " + sentence).strip() if buffer else sentence + if len(candidate) > maxChars and buffer: + chunks.append(buffer.strip()) + buffer = sentence + else: + buffer = candidate + + if buffer: + chunks.append(buffer.strip()) + + finalized: List[str] = [] + for c in chunks: + if not c: + continue + if not re.search(r"[\.!\?\u2026]\s*$", c): + c = c.rstrip() + "." + finalized.append(c) + return finalized + + +async def _speakTextChunked( + websocket: Optional[WebSocket], + voiceInterface: Any, + sessionId: str, + voiceText: str, + languageCode: str, + voiceName: Optional[str], + isCancelled: Optional[Callable[[], bool]] = None, +) -> Dict[str, Any]: + """Run TTS in chunks and dispatch each ``playAudio`` over the websocket. + + Returns ``{"success": bool, "chunks": int, "played": int, "error": Optional[str], "cancelled": bool}``. + Failure for one chunk does NOT abort the rest; partial playback still + counts as ``success=True`` so the caller can decide whether to add a chat + fallback for the missing parts. + + ``isCancelled`` is an optional zero-arg predicate the caller passes in to + signal "abort the remaining chunks". It is checked BEFORE each Google + TTS round-trip and again BEFORE each websocket send, so a stop word in + the meeting can interrupt a multi-chunk dispatch within at most one + chunk boundary instead of waiting for the whole answer to finish. + """ + chunks = _splitTextForTts(voiceText) + result: Dict[str, Any] = {"success": False, "chunks": len(chunks), "played": 0, "error": None, "cancelled": False} + if not chunks: + result["error"] = "no text" + return result + if voiceInterface is None: + result["error"] = "no voice interface" + return result + + lastError: Optional[str] = None + for idx, chunk in enumerate(chunks, start=1): + if isCancelled is not None and isCancelled(): + result["cancelled"] = True + logger.info( + f"Session {sessionId}: TTS chunk loop cancelled before chunk " + f"{idx}/{len(chunks)} (user stop or newer answer in flight)" + ) + break + try: + ttsResult = await voiceInterface.textToSpeech( + text=chunk, + languageCode=languageCode, + voiceName=voiceName, + ) + except Exception as ttsErr: # pragma: no cover - network/runtime errors + lastError = f"chunk {idx}/{len(chunks)} raised: {ttsErr}" + logger.warning(f"Session {sessionId}: TTS {lastError}") + continue + + if not isinstance(ttsResult, dict) or ttsResult.get("success") is False: + err = (ttsResult or {}).get("error", "unknown") if isinstance(ttsResult, dict) else "no result" + lastError = f"chunk {idx}/{len(chunks)} failed: {err}" + logger.warning(f"Session {sessionId}: TTS {lastError}") + continue + + audioContent = ttsResult.get("audioContent") + if not audioContent: + lastError = f"chunk {idx}/{len(chunks)} returned no audioContent" + logger.warning(f"Session {sessionId}: TTS {lastError}") + continue + + if websocket is None: + lastError = "websocket unavailable" + break + + if isCancelled is not None and isCancelled(): + result["cancelled"] = True + logger.info( + f"Session {sessionId}: TTS chunk loop cancelled before " + f"sending chunk {idx}/{len(chunks)} (audio dropped)" + ) + break + + try: + await websocket.send_text(json.dumps({ + "type": "playAudio", + "sessionId": sessionId, + "audio": { + "data": base64.b64encode( + audioContent if isinstance(audioContent, bytes) else audioContent.encode() + ).decode(), + "format": "mp3", + }, + })) + result["played"] += 1 + except Exception as wsErr: # pragma: no cover - websocket failures + lastError = f"chunk {idx}/{len(chunks)} websocket send failed: {wsErr}" + logger.warning(f"Session {sessionId}: TTS {lastError}") + break + + result["success"] = result["played"] > 0 + if lastError: + result["error"] = lastError + return result + + +def _coercePersistedDetectedIntent(raw: Optional[str]) -> tuple: + """Map free-form intent labels (e.g. agent:directorPrompt) to TeamsbotDetectedIntent + for DB persistence; return (enum, meta_suffix_or_None for reasoning).""" + if not raw or not str(raw).strip(): + return TeamsbotDetectedIntent.NONE, None + s = str(raw).strip().lower() + for member in TeamsbotDetectedIntent: + if member.value == s: + return member, None + if s.startswith("agent:"): + return TeamsbotDetectedIntent.PROACTIVE, str(raw).strip()[:120] + return TeamsbotDetectedIntent.NONE, str(raw).strip()[:120] + + +# Director prompts are PRIVATE operator instructions — they must NOT be echoed +# verbatim into the meeting. The agent is asked to start its FINAL answer with +# either ``MEETING_REPLY:`` (followed by the text actually meant for the meeting) +# or ``SILENT:`` / ``INTERNAL_ONLY:`` (followed by an internal note for the +# operator UI). Anything else → treat as silent (safe default). +_DIRECTOR_REPLY_PATTERN = re.compile( + r"^\s*(MEETING_REPLY|MEETING|REPLY|SAY|SPEAK)\s*:\s*", + re.IGNORECASE, +) +_DIRECTOR_SILENT_PATTERN = re.compile( + r"^\s*(SILENT|INTERNAL(?:_ONLY)?|NOTE|NO_MEETING_OUTPUT|ACK(?:NOWLEDGE)?)\s*:\s*", + re.IGNORECASE, +) + + +def _parseDirectorPromptFinal(finalText: str) -> Dict[str, Any]: + """Parse the agent's final answer for a director prompt. + + Returns ``{"kind": "meeting"|"silent", "meetingText": str, "internalNote": str}``. + + Default is ``silent`` so unmarked replies are NOT broadcast into the meeting. + """ + text = (finalText or "").strip() + if not text: + return {"kind": "silent", "meetingText": "", "internalNote": ""} + + meetingMatch = _DIRECTOR_REPLY_PATTERN.match(text) + if meetingMatch: + body = text[meetingMatch.end():].strip() + return {"kind": "meeting", "meetingText": body, "internalNote": ""} + + silentMatch = _DIRECTOR_SILENT_PATTERN.match(text) + if silentMatch: + body = text[silentMatch.end():].strip() + return {"kind": "silent", "meetingText": "", "internalNote": body} + + # No marker → safe default: do NOT spam the meeting with the agent's + # internal reasoning. Keep the full text as an internal note for the + # operator UI so nothing is lost. + return {"kind": "silent", "meetingText": "", "internalNote": text} + + +# ========================================================================= +# Active Service Registry (sessionId -> running TeamsbotService instance) +# +# Required so HTTP endpoints (e.g. director-prompt POST) can reach the +# TeamsbotService instance currently holding the live websocket + voice +# interface for that session, without going through the websocket loop. +# ========================================================================= +_activeServices: Dict[str, "TeamsbotService"] = {} + + +def getActiveService(sessionId: str) -> Optional["TeamsbotService"]: + """Return the running TeamsbotService for a session, or None if not active.""" + return _activeServices.get(sessionId) + # ========================================================================= # AI Service Factory (for billing-aware AI calls) @@ -65,6 +557,25 @@ async def _emitSessionEvent(sessionId: str, eventType: str, data: Any): await _sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getIsoTimestamp()}) +def _normalizeGatewayHostForBotWs(host: str) -> str: + """Use IPv4 loopback for local dev WebSocket URLs passed to the Node browser-bot. + + Node on Windows often resolves ``localhost`` to ``::1`` first; Uvicorn bound to + ``0.0.0.0`` typically accepts IPv4 only, so the bot gets ``ECONNREFUSED ::1``. + """ + h = host.strip() + lower = h.lower() + if lower == "localhost": + return "127.0.0.1" + if lower.startswith("localhost:"): + return "127.0.0.1" + h[len("localhost"):] + if lower.startswith("[::1]:"): + return "127.0.0.1" + h.partition("]")[2] + if lower in ("[::1]", "::1"): + return "127.0.0.1" + return h + + class TeamsbotService: """ Pipeline Orchestrator for Teams Bot sessions. @@ -102,6 +613,75 @@ class TeamsbotService: self._pendingNameTrigger: Optional[Dict[str, Any]] = None self._followUpWindowEnd: float = 0.0 + # Quick-ack throttle (timestamp of the last short "Moment..." ack we + # spoke into the meeting). Without this guard a long sentence with + # multiple name mentions would trigger several acks in a row. + self._lastQuickAckTs: float = 0.0 + + # Session-scoped phrase pool for SHORT ephemeral utterances (quick + # acks, "checking..." notices, per-round progress). Lazily populated + # by the AI in the bot's configured language + persona — no hardcoded + # strings or hardcoded language branching anywhere downstream. Keyed + # by the kinds defined in ``_EPHEMERAL_PHRASE_INTENTS``. + # * ``self._phrasePool[kind]`` -> list of variants for that kind + # * ``self._phrasePoolIdx[kind]`` -> round-robin pointer + # Concurrent generation calls for the same kind are serialised by the + # lock so we don't spawn duplicate AI requests on a burst. + self._phrasePool: Dict[str, List[str]] = {} + self._phrasePoolIdx: Dict[str, int] = {} + self._phrasePoolLock: asyncio.Lock = asyncio.Lock() + + # Voice pipeline: a single per-session lock that serialises every TTS + # dispatch into the meeting. Without it three independent code paths + # (SPEECH_TEAMS direct answer, agent escalation final answer, and + # operator-driven director prompt) can all reach + # ``websocket.send_text({"type": "playAudio", ...})`` at the same time + # and the browser bot then plays interleaved chunks — i.e. "two bots + # talking over each other" exactly as the operator suspects. Chat + # (text) sends are NOT locked: they're cheap and can interleave fine. + self._meetingTtsLock: asyncio.Lock = asyncio.Lock() + # Generation counter incremented every time we begin producing a NEW + # meeting answer OR every time the user issues a hard stop. Any TTS + # chunk loop captures the counter value at start; before sending + # each chunk to the bot it re-checks the counter and bails out if + # it has moved on. This is what makes "Stopp" actually feel + # instantaneous: the in-flight TTS dispatch loop drops itself the + # moment the next chunk would have been sent, without waiting for + # any AI round-trip or extra Google TTS call to come back. + self._answerGenerationCounter: int = 0 + # Tracking handles for cancellable background tasks. Keeping a + # reference lets ``_cancelInFlightSpeech`` actually call + # ``task.cancel()`` instead of just hoping the task notices the + # generation counter has moved on. Cleared in the task's own + # ``finally`` block. + self._currentEscalationTask: Optional[asyncio.Task] = None + self._currentQuickAckTask: Optional[asyncio.Task] = None + # Whether an agent escalation task is in flight. Kept separate from + # ``_aiAnalysisInProgress`` (which only covers the SPEECH_TEAMS phase) + # so a new speech trigger that arrives WHILE the agent is still + # researching does not start a parallel SPEECH_TEAMS that would then + # answer at the same time as the agent. + self._agentEscalationInFlight: bool = False + + # Live transport handles for out-of-band actions (director prompts, agent escalation). + # Set in handleBotWebSocket once the bot connects; cleared on disconnect. + self._activeSessionId: Optional[str] = None + self._websocket: Optional[WebSocket] = None + self._voiceInterface = None + + # Persistent director prompts kept in memory for context injection across triggers. + # Loaded from DB on (re)connect; mutated by submit/delete director prompt routes. + self._activePersistentPrompts: List[Dict[str, Any]] = [] + + # Recent director-prompt briefings (one-shot AND persistent) — keeps the + # operator's attached files and the agent's internal analysis available + # for later SPEECH_TEAMS triggers, even after a one-shot prompt has been + # consumed. Without this pool, the bot "forgets" attached docs as soon + # as the director prompt finished, and answers later meeting questions + # ("summarize the doc") with general babble instead of the file content. + # Capped by ``_RECENT_DIRECTOR_BRIEFINGS_MAX`` to bound prompt size. + self._recentDirectorBriefings: List[Dict[str, Any]] = [] + # ========================================================================= # Session Lifecycle # ========================================================================= @@ -145,6 +725,7 @@ class TeamsbotService: # gatewayBaseUrl is passed from the route handler (derived from request.base_url) wsScheme = "wss" if gatewayBaseUrl.startswith("https") else "ws" gatewayHost = gatewayBaseUrl.replace("https://", "").replace("http://", "").rstrip("/") + gatewayHost = _normalizeGatewayHostForBotWs(gatewayHost) fullGatewayWsUrl = f"{wsScheme}://{gatewayHost}/api/teamsbot/{self.instanceId}/bot/ws/{sessionId}" hasAuth = bool(botAccountEmail and botAccountPassword) @@ -262,6 +843,41 @@ class TeamsbotService: except Exception: self._botAccountEmail = None + # Register the live service so out-of-band callers (director prompts, + # agent escalation) can deliver text/audio through this same websocket. + self._activeSessionId = sessionId + self._websocket = websocket + self._voiceInterface = voiceInterface + _activeServices[sessionId] = self + + # Notify the operator UI that the bot's WebSocket is now live so the + # director-prompt panel can enable its submit button. + try: + await _emitSessionEvent(sessionId, "botConnectionState", { + "connected": True, + "timestamp": getIsoTimestamp(), + }) + except Exception: + pass + + # Restore active persistent director prompts from DB (survives reconnects). + try: + self._activePersistentPrompts = interface.getActivePersistentPrompts(sessionId) or [] + if self._activePersistentPrompts: + logger.info( + f"Session {sessionId}: Loaded {len(self._activePersistentPrompts)} active persistent director prompt(s)" + ) + except Exception as restoreErr: + logger.warning(f"Session {sessionId}: Could not restore persistent director prompts: {restoreErr}") + self._activePersistentPrompts = [] + + # Pre-warm the ephemeral phrase pool in the background so the first + # quick-ack ("Moment...") and interim agent notice don't have to wait + # for the AI round-trip. Best-effort: if generation fails, the + # corresponding ephemeral cue is silently skipped at runtime — never + # falls back to hardcoded language strings. + asyncio.create_task(self._warmEphemeralPhrasePool(sessionId)) + logger.info(f"[WS] Handler started for session {sessionId}") try: @@ -339,83 +955,64 @@ class TeamsbotService: ) elif msgType == "voiceGreeting": + # Legacy path: older bot images send a pre-built greeting + # text. New bots use ``requestGreeting`` and let the + # Gateway own greeting generation. greetingText = message.get("text", "") greetingLang = message.get("language", self.config.language) - logger.info(f"[WS] Voice greeting: text={greetingText[:60]}..., language={greetingLang}") + logger.info( + f"[WS] Voice greeting (legacy): text={greetingText[:60]}..., language={greetingLang}" + ) if greetingText and voiceInterface: + await self._dispatchGreetingToMeeting( + sessionId=sessionId, + greetingText=greetingText, + greetingLang=greetingLang, + sendToChat=False, + interface=interface, + voiceInterface=voiceInterface, + websocket=websocket, + ) + + elif msgType == "requestGreeting": + # New path: bot just signals "I have joined" — Gateway + # generates the greeting text via AI in the configured + # language + persona, then dispatches it to BOTH the + # meeting chat (sendChatMessage command) and TTS. No + # hardcoded language strings on the bot side. + requestedLang = ( + message.get("language") or self.config.language or "" + ).strip() or "en-US" + botNameHint = ( + message.get("botName") or self.config.botName or "" + ).strip() or self.config.botName + logger.info( + f"[WS] Greeting request from bot: language={requestedLang}, name={botNameHint}" + ) + if voiceInterface: try: - await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { - "status": "requested", - "hasWebSocket": True, - "message": "Voice greeting TTS requested", - "timestamp": getIsoTimestamp(), - }) - ttsResult = await voiceInterface.textToSpeech( - text=greetingText, - languageCode=greetingLang, - voiceName=self.config.voiceId + greetingText = await self._generateGreetingText( + requestedLang ) - if ttsResult and isinstance(ttsResult, dict): - audioContent = ttsResult.get("audioContent") - if audioContent: - await websocket.send_text(json.dumps({ - "type": "playAudio", - "sessionId": sessionId, - "audio": { - "data": base64.b64encode(audioContent if isinstance(audioContent, bytes) else audioContent.encode()).decode(), - "format": "mp3", - } - })) - logger.info(f"Voice greeting TTS sent for session {sessionId}") - await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { - "status": "dispatched", - "hasWebSocket": True, - "message": "Voice greeting TTS dispatched to bot", - "timestamp": getIsoTimestamp(), - }) - - greetingTranscriptData = TeamsbotTranscript( + except Exception as genErr: + logger.warning( + f"Greeting generation failed for session {sessionId}: {genErr}" + ) + greetingText = "" + if greetingText: + await self._dispatchGreetingToMeeting( sessionId=sessionId, - speaker=self.config.botName, - text=greetingText, - timestamp=getIsoTimestamp(), - confidence=1.0, - language=greetingLang, - isFinal=True, - source="botResponse", - ).model_dump() - greetingTranscript = interface.createTranscript(greetingTranscriptData) - - self._contextBuffer.append({ - "speaker": self.config.botName, - "text": greetingText, - "timestamp": getUtcTimestamp(), - "source": "botResponse", - }) - self._lastTranscriptSpeaker = self.config.botName - self._lastTranscriptText = greetingText - self._lastTranscriptId = greetingTranscript.get("id") - - await _emitSessionEvent(sessionId, "botResponse", { - "id": greetingTranscript.get("id"), - "responseText": greetingText, - "responseType": TeamsbotResponseType.AUDIO.value, - "detectedIntent": "greeting", - "reasoning": "Automatic join greeting", - "timestamp": getIsoTimestamp(), - }) - await _emitSessionEvent(sessionId, "transcript", { - "id": greetingTranscript.get("id"), - "speaker": self.config.botName, - "text": greetingText, - "confidence": 1.0, - "timestamp": getIsoTimestamp(), - "isContinuation": False, - "source": "botResponse", - "speakerResolvedFromHint": False, - }) - except Exception as ttsErr: - logger.warning(f"Voice greeting TTS failed for session {sessionId}: {ttsErr}") + greetingText=greetingText, + greetingLang=requestedLang, + sendToChat=True, + interface=interface, + voiceInterface=voiceInterface, + websocket=websocket, + ) + else: + logger.warning( + f"Session {sessionId}: Skipping greeting — AI generation produced no text" + ) elif msgType == "ping": await websocket.send_text(json.dumps({"type": "pong"})) @@ -516,6 +1113,19 @@ class TeamsbotService: except Exception as e: if "disconnect" not in str(e).lower(): logger.error(f"[WS] Error for session {sessionId}: {type(e).__name__}: {e}") + finally: + if _activeServices.get(sessionId) is self: + _activeServices.pop(sessionId, None) + self._websocket = None + self._voiceInterface = None + self._activeSessionId = None + try: + await _emitSessionEvent(sessionId, "botConnectionState", { + "connected": False, + "timestamp": getIsoTimestamp(), + }) + except Exception: + pass logger.info(f"[WS] Handler ended for session {sessionId} after {msgCount} messages") @@ -723,6 +1333,12 @@ class TeamsbotService: if isNew: logger.info(f"Session {sessionId}: Bot name in caption, debounce trigger started") asyncio.create_task(self._checkPendingNameTrigger()) + # Fire a short audible "Moment..." in parallel so the + # speaker hears the bot react immediately, instead of + # waiting for debounce + SPEECH_TEAMS + agent (~5-30s). + self._currentQuickAckTask = asyncio.create_task( + self._runQuickAck(sessionId) + ) return # Chat history: messages sent before the bot joined the meeting. @@ -854,10 +1470,23 @@ class TeamsbotService: if source == "chat" and isBotSpeaker: return - # Stop phrases: trigger immediately without debounce (root cause: 3s debounce delayed stop) + # Stop phrases: HARD STOP, no AI round-trip. We previously routed + # this through ``_analyzeAndRespond`` which spent 1-2 seconds in + # the speech LLM just to classify the intent, during which the + # current TTS kept playing — and the LLM round-trip would also + # produce yet another response that joined the queue. The new + # path goes straight to the browser bot's audio cancel and + # invalidates everything else in flight. if self._isStopPhrase(text): - logger.info(f"Session {sessionId}: Stop phrase detected, triggering analysis immediately") - await self._analyzeAndRespond(sessionId, interface, voiceInterface, websocket, createdTranscript) + logger.info( + f"Session {sessionId}: Stop phrase detected ('{text.strip()[:60]}'), " + f"hard-cancelling in-flight speech immediately" + ) + await self._cancelInFlightSpeech( + sessionId=sessionId, + websocket=websocket, + reason="userStopPhrase", + ) return # Update activity for any pending debounced trigger @@ -869,6 +1498,12 @@ class TeamsbotService: isNew = self._setPendingNameTrigger(sessionId, interface, voiceInterface, websocket, createdTranscript) if isNew: asyncio.create_task(self._checkPendingNameTrigger()) + # Audible early-feedback ack ("Moment...") in parallel — runs + # while we still wait the debounce window and SPEECH_TEAMS + # decides what to actually answer. + self._currentQuickAckTask = asyncio.create_task( + self._runQuickAck(sessionId) + ) return # Follow-up window: after a bot response, trigger AI for any human speech @@ -938,19 +1573,128 @@ class TeamsbotService: return False def _isStopPhrase(self, text: str) -> bool: - """Check if text is a stop command (stop, halt, be quiet, etc.). Triggers immediate analysis.""" + """Check if text is an immediate-cancel command from the meeting. + + Recognised intents (any language we hear in practice): + * Hard stop: stop / stopp / halt / ruhe / stille / arrete / quiet / shut + * Pause / wait: warte / wait / moment / pause / hold (hold on) + * Silence: sei still / be quiet / shut up / aufhoeren / aufhören / silence + Hits trigger the direct stop pipeline in ``_cancelInFlightSpeech``: + kill TTS, invalidate pending generations, clear name-trigger debounce. + Critically: NO new AI call is fired — the user explicitly asked the + bot to be quiet, so the worst thing we could do is generate yet + another response on top of the one we just cancelled. + """ if not text or len(text.strip()) < 2: return False t = text.strip().lower() words = [w.strip(".,!?:;\"'()[]") for w in t.split() if w.strip()] wordSet = set(words) - stopWords = {"stop", "stopp", "halt", "ruhe", "stille", "schweig", "arrete", "quiet", "shut"} + stopWords = { + # Hard-stop verbs + "stop", "stopp", "halt", "ruhe", "stille", "schweig", + "arrete", "quiet", "shut", "silence", + # Pause / wait verbs (still "be quiet now" semantics) + "warte", "wait", "moment", "pause", + } if wordSet & stopWords: return True - if "sei still" in t or "be quiet" in t or "shut up" in t or "aufhoeren" in t or "aufhören" in t: + if ( + "sei still" in t + or "be quiet" in t + or "shut up" in t + or "hold on" in t + or "aufhoeren" in t + or "aufhören" in t + ): return True return False + def _makeAnswerCancelHook(self) -> Callable[[], bool]: + """Capture the current ``_answerGenerationCounter`` and return a + zero-arg predicate that returns ``True`` once a hard stop (or any + future "supersede this answer" event) has bumped the counter. + + Pass the returned predicate as ``isCancelled`` into + ``_speakTextChunked`` so a multi-chunk dispatch can bail out + between chunks instead of speaking a 30-second answer to the end. + """ + snapshot = self._answerGenerationCounter + return lambda: self._answerGenerationCounter != snapshot + + async def _cancelInFlightSpeech( + self, + sessionId: str, + websocket: Optional[WebSocket], + reason: str, + ) -> None: + """Hard stop everything the bot is currently doing in the meeting. + + Pipeline (ALL synchronous from the caller's point of view, no AI + round-trips): + + 1. Bump ``_answerGenerationCounter`` so any in-flight TTS chunk + loop, agent escalation or quick-ack drops its remaining work + the moment it next checks the counter. + 2. Clear ``_pendingNameTrigger`` so a debounced "speaker just said + the bot name" trigger that was queued before the stop word + cannot wake up 3 seconds later and answer anyway. + 3. Cancel tracked background tasks (escalation, quick-ack). The + tasks themselves swallow ``CancelledError`` in their finally + block. + 4. Send ``{"type":"stopAudio"}`` to the browser bot — it stops the + current playback in the AudioContext and clears its play queue + so nothing buffered comes through afterwards. + + Deliberately does NOT generate a new response. The user just told + the bot to be quiet; producing a "Okay, ich bin still" reply on + top would be the exact opposite of what was asked for. + """ + self._answerGenerationCounter += 1 + gen = self._answerGenerationCounter + logger.info( + f"Session {sessionId}: Cancelling in-flight speech " + f"(reason={reason}, gen={gen})" + ) + + if self._pendingNameTrigger: + logger.info( + f"Session {sessionId}: Dropping pending debounced name " + f"trigger (was queued before stop)" + ) + self._pendingNameTrigger = None + + for taskAttr in ("_currentEscalationTask", "_currentQuickAckTask"): + task = getattr(self, taskAttr, None) + if task is not None and not task.done(): + logger.info( + f"Session {sessionId}: Cancelling background task " + f"{taskAttr}" + ) + task.cancel() + + if websocket is not None: + try: + await websocket.send_text(json.dumps({ + "type": "stopAudio", + "sessionId": sessionId, + "reason": reason, + })) + except Exception as stopErr: + logger.warning( + f"Session {sessionId}: Failed to send stopAudio to " + f"browser bot: {stopErr}" + ) + + try: + await _emitSessionEvent(sessionId, "speechCancelled", { + "reason": reason, + "generation": gen, + "timestamp": getIsoTimestamp(), + }) + except Exception: + pass + def _detectBotName(self, text: str) -> bool: """Check if text contains the bot's name (exact or phonetically similar).""" botNameLower = self.config.botName.lower() @@ -990,6 +1734,376 @@ class TeamsbotService: } return True + async def _warmEphemeralPhrasePool(self, sessionId: str) -> None: + """Fire-and-forget background task: generate the ephemeral phrase + pool for every kind defined in ``_EPHEMERAL_PHRASE_INTENTS`` so the + first quick-ack / interim notice doesn't pay the AI round-trip + latency at runtime. Failures are logged but never raised — the + runtime selectors handle empty pools by silently skipping the cue.""" + try: + for kind in _EPHEMERAL_PHRASE_INTENTS: + try: + await self._getEphemeralPhrases(kind) + except Exception as innerErr: + logger.warning( + f"Session {sessionId}: Phrase pool warmup failed for " + f"kind={kind}: {innerErr}" + ) + except Exception as warmErr: + logger.warning( + f"Session {sessionId}: Phrase pool warmup task crashed: {warmErr}" + ) + + # ---------------------------------------------------------------- Voice + # When the bot's full answer is a long structured chat post (markdown + # tables, bullet lists, headings, multi-paragraph) we MUST NOT read it + # out verbatim into the meeting — even after sanitisation it sounds + # like a wall of text and easily takes 5+ minutes. The chat keeps the + # full answer; the audio path goes through ``_summarizeForVoice`` which + # asks the AI for a 1-3 sentence spoken paraphrase in the configured + # bot persona / language. + + # Threshold: anything longer than this many characters (after sanitise) + # OR any answer whose source contains markdown structure (tables / + # multiple bullets / multiple headings) gets condensed before TTS. + _VOICE_DIRECT_MAX_CHARS = 600 + _VOICE_SUMMARY_MAX_CHARS = 350 + + @staticmethod + def _looksLikeStructuredText(raw: str) -> bool: + """Heuristic: does the original answer have markdown structure that + would be miserable to listen to verbatim? Used to trigger the + AI summary path even when the sanitised text is short enough.""" + if not raw: + return False + if raw.count("|") >= 4: # at least one markdown table row + return True + if raw.count("\n#") >= 1: # at least one heading after newline + return True + if raw.count("\n- ") + raw.count("\n* ") + raw.count("\n• ") >= 3: + return True # 3+ bullets → list-like + if re.search(r"\n\d+[\.\)]\s", raw): # numbered list + count = len(re.findall(r"(?m)^\s*\d+[\.\)]\s", raw)) + if count >= 3: + return True + return False + + async def _summarizeForVoice( + self, + sessionId: str, + rawAnswer: str, + ) -> str: + """Return a SHORT, naturally-spoken paraphrase of ``rawAnswer`` for + TTS playback. Falls back to the sanitised + truncated original if + the AI call fails — never blocks the response. + + The chat / DB / UI keep the original ``rawAnswer`` untouched. Only + the voice channel goes through this condensation. + """ + if not rawAnswer or not rawAnswer.strip(): + return "" + + sanitised = _voiceFriendlyMeetingText(rawAnswer) + # Short + unstructured → speak as-is, no AI round-trip + if ( + len(sanitised) <= self._VOICE_DIRECT_MAX_CHARS + and not self._looksLikeStructuredText(rawAnswer) + ): + return sanitised + + targetLang = (self.config.language or "de-DE").strip() + botName = (self.config.botName or "").strip() or "the assistant" + persona = (self.config.aiSystemPrompt or "").strip() + personaBlock = ( + f"\n\nBOT PERSONA / TONE:\n{persona}\n" + if persona else "" + ) + + prompt = ( + f"You are condensing a long written answer into a SHORT spoken " + f"paraphrase that the assistant '{botName}' will say out loud " + f"into a Microsoft Teams meeting. The full written answer is " + f"already in the meeting chat — your job is to summarise it for " + f"the EAR, not the eye.\n\n" + f"STRICT REQUIREMENTS:\n" + f"1. Output language: BCP-47 '{targetLang}'. No other language.\n" + f"2. 1 to 3 sentences, max ~{self._VOICE_SUMMARY_MAX_CHARS} characters total.\n" + f"3. Natural spoken style — no headings, no bullet points, no " + f"tables, no markdown, no emojis, no enumerations like 'Erstens... " + f"Zweitens...' unless that genuinely flows in speech.\n" + f"4. Capture the essence and the most important conclusion. Do " + f"NOT try to fit every detail. Listeners can read the chat for " + f"the full version.\n" + f"5. End by gently pointing the audience to the chat for details, " + f"e.g. 'Details stehen im Chat.' (adapted to the target language).\n" + f"6. Output ONLY the spoken text. No JSON, no quotes around it, " + f"no preamble like 'Here is the summary:'.\n" + f"{personaBlock}\n" + f"FULL WRITTEN ANSWER (markdown-formatted, sometimes long):\n" + f"---\n{rawAnswer.strip()[:6000]}\n---\n" + ) + + try: + aiService = _createAiService( + self.currentUser, self.mandateId, self.instanceId + ) + await aiService.ensureAiObjectsInitialized() + request = AiCallRequest( + prompt=prompt, + context="", + options=AiCallOptions( + operationType=OperationTypeEnum.DATA_ANALYSE, + priority=PriorityEnum.SPEED, + ), + ) + response = await aiService.callAi(request) + except Exception as aiErr: + logger.warning( + f"Session {sessionId}: Voice summary AI call failed: {aiErr}" + ) + return sanitised[: self._VOICE_DIRECT_MAX_CHARS] + + if not response or response.errorCount != 0 or not response.content: + logger.warning( + f"Session {sessionId}: Voice summary returned empty/error" + ) + return sanitised[: self._VOICE_DIRECT_MAX_CHARS] + + spoken = response.content.strip() + # Defensive sanitiser pass — the model usually obeys the + # "no markdown" instruction but not always. + spoken = _voiceFriendlyMeetingText(spoken) + if not spoken: + return sanitised[: self._VOICE_DIRECT_MAX_CHARS] + + logger.info( + f"Session {sessionId}: Voice summary generated " + f"(orig={len(rawAnswer)} chars, sanitised={len(sanitised)}, " + f"spoken={len(spoken)})" + ) + return spoken + + async def _pickQuickAckText(self) -> Optional[str]: + """Return a short ack text in the bot's configured language. The + actual phrases are AI-generated once per session (cached) and rotated + round-robin so consecutive acks don't sound identical. Returns + ``None`` only if AI generation completely failed and no fallback + variant could be produced — in that case the caller silently skips + the ack.""" + return await self._pickEphemeralPhrase("quickAck") + + async def _pickEphemeralPhrase( + self, + kind: str, + substitutions: Optional[Dict[str, Any]] = None, + ) -> Optional[str]: + """Round-robin selector over the cached phrase pool for ``kind``. + Lazily generates the pool on first use. ``substitutions`` is applied + to the chosen phrase via ``str.format(**substitutions)`` so kinds + like ``agentRound`` can render ``{round}`` / ``{maxRounds}``. + Returns ``None`` if no phrases are available.""" + variants = await self._getEphemeralPhrases(kind) + if not variants: + return None + idx = self._phrasePoolIdx.get(kind, 0) % len(variants) + self._phrasePoolIdx[kind] = (idx + 1) % len(variants) + chosen = variants[idx] + if substitutions: + try: + chosen = chosen.format(**substitutions) + except (KeyError, IndexError, ValueError) as fmtErr: + # The AI didn't include the expected placeholder — return the + # raw phrase rather than crash. The user still hears something + # in the right language; only the numeric hint is missing. + logger.debug( + f"Ephemeral phrase substitution failed for kind={kind}: {fmtErr}" + ) + return chosen + + async def _getEphemeralPhrases(self, kind: str) -> List[str]: + """Return the cached pool of AI-generated variants for ``kind``, + generating it on first request. Subsequent calls hit the in-memory + cache. Concurrent first-time callers are serialised by the pool lock + so only ONE AI request is fired per kind per session.""" + cached = self._phrasePool.get(kind) + if cached: + return cached + async with self._phrasePoolLock: + cached = self._phrasePool.get(kind) + if cached: + return cached + phrases = await self._generateEphemeralPhrases( + kind, _EPHEMERAL_PHRASE_VARIANTS + ) + if phrases: + self._phrasePool[kind] = phrases + return phrases + + async def _generateEphemeralPhrases( + self, kind: str, count: int + ) -> List[str]: + """Ask the AI to produce ``count`` short utterances for ``kind`` in + the bot's configured language and persona. Returns ``[]`` on any + failure — callers must treat empty as 'silently skip this ephemeral + cue', NEVER fall back to a hardcoded localized string.""" + intent = _EPHEMERAL_PHRASE_INTENTS.get(kind) + if not intent: + logger.warning(f"Unknown ephemeral phrase kind requested: {kind}") + return [] + + targetLang = (self.config.language or "").strip() or "en-US" + botName = (self.config.botName or "the assistant").strip() + persona = (self.config.aiSystemPrompt or "").strip() + + # The prompt is in English on purpose — these are instructions to the + # LLM, not user-facing text. The OUTPUT is required to be in + # ``targetLang``. We ask for a strict JSON array so parsing is robust. + prompt = ( + f"You are localizing short SPOKEN-LANGUAGE utterances for a " + f"meeting assistant named '{botName}'.\n\n" + f"Persona / style guide for the assistant:\n" + f"{persona or '(no persona configured — use a neutral, polite, professional tone)'}\n\n" + f"Target spoken language (BCP-47 code): {targetLang}\n\n" + f"Utterance intent:\n{intent}\n\n" + f"Generate {count} DIFFERENT variants matching this intent, in " + f"the target language. Variants should feel natural when spoken " + f"aloud, not robotic. Do NOT include the assistant's name in " + f"the variants.\n\n" + f"Output STRICTLY a JSON array of {count} plain-text strings, " + f"with no markdown fences, no commentary, no surrounding " + f"quotation marks beyond the JSON syntax itself. Example " + f"format: [\"...\", \"...\", \"...\", \"...\"]" + ) + + try: + aiService = _createAiService( + self.currentUser, self.mandateId, self.instanceId + ) + await aiService.ensureAiObjectsInitialized() + request = AiCallRequest( + prompt=prompt, + context="", + options=AiCallOptions( + operationType=OperationTypeEnum.DATA_ANALYSE, + priority=PriorityEnum.SPEED, + ), + ) + response = await aiService.callAi(request) + except Exception as aiErr: + logger.warning( + f"Ephemeral phrase generation failed (kind={kind}, lang={targetLang}): {aiErr}" + ) + return [] + + if not response or response.errorCount != 0 or not response.content: + logger.warning( + f"Ephemeral phrase generation returned empty/error " + f"(kind={kind}, lang={targetLang})" + ) + return [] + + raw = response.content.strip() + # Strip optional ```json ... ``` fences before parsing. + raw = re.sub(r"^```(?:json)?\s*", "", raw) + raw = re.sub(r"\s*```\s*$", "", raw) + try: + arr = json.loads(raw) + except json.JSONDecodeError as parseErr: + logger.warning( + f"Ephemeral phrase generation: could not parse JSON " + f"(kind={kind}, lang={targetLang}): {parseErr} " + f"raw={raw[:200]}" + ) + return [] + if not isinstance(arr, list): + return [] + cleaned = [ + str(v).strip() + for v in arr + if isinstance(v, str) and str(v).strip() + ] + cleaned = cleaned[:count] + if cleaned: + logger.info( + f"Ephemeral phrase pool generated (kind={kind}, " + f"lang={targetLang}, count={len(cleaned)})" + ) + return cleaned + + def _shouldFireQuickAck(self) -> bool: + """Centralized gate so the call sites stay short and consistent.""" + now = time.time() + if (now - self._lastQuickAckTs) < _QUICK_ACK_MIN_INTERVAL_SEC: + return False + # If we are already producing a real response, the ack would step on + # the actual answer's TTS — skip it. Same for an in-flight agent + # escalation: the agent will deliver its own answer (and we already + # spoke an interim "moment please" when it started). + if self._aiAnalysisInProgress or self._agentEscalationInFlight: + return False + # Voice channel must be active. Chat-only mode would just spam "...". + channelRaw = self.config.responseChannel + channelStr = ( + channelRaw.value if hasattr(channelRaw, "value") else str(channelRaw) + ).lower().strip() + if channelStr not in ("voice", "both"): + return False + if self.config.responseMode in ( + TeamsbotResponseMode.MANUAL, + TeamsbotResponseMode.TRANSCRIBE_ONLY, + ): + return False + return True + + async def _runQuickAck(self, sessionId: str) -> None: + """Background task: speak the short ack into the meeting via TTS. + + Designed to be fired as ``asyncio.create_task(self._runQuickAck(...))`` + the moment the bot's name is detected — does not block the regular + debounced analysis pipeline. Persists nothing to the DB and emits no + botResponse event; this is purely an audio cue ("Moment...") so the + speaker hears within ~1s that the bot is reacting. + """ + websocket = self._websocket + voiceInterface = self._voiceInterface + if websocket is None or voiceInterface is None: + return + if not self._shouldFireQuickAck(): + return + ackText = await self._pickQuickAckText() + if not ackText: + return + # Mark the throttle BEFORE TTS so two near-simultaneous detections + # don't both fire (TTS dispatch can take a few hundred ms). + self._lastQuickAckTs = time.time() + try: + await _emitSessionEvent(sessionId, "quickAck", { + "text": ackText, + "timestamp": getIsoTimestamp(), + }) + cancelHook = self._makeAnswerCancelHook() + async with self._meetingTtsLock: + outcome = await _speakTextChunked( + websocket=websocket, + voiceInterface=voiceInterface, + sessionId=sessionId, + voiceText=ackText, + languageCode=self.config.language, + voiceName=self.config.voiceId, + isCancelled=cancelHook, + ) + if not outcome.get("success"): + logger.info( + f"Session {sessionId}: Quick ack TTS failed silently " + f"({outcome.get('error')}) — main response will still go through" + ) + except asyncio.CancelledError: + logger.info(f"Session {sessionId}: Quick ack cancelled by stop signal") + except Exception as ackErr: + logger.warning(f"Session {sessionId}: Quick ack failed: {ackErr}") + finally: + self._currentQuickAckTask = None + async def _checkPendingNameTrigger(self, delaySec: float = 3.0): """Async loop: fire the pending name trigger once the speaker is quiet.""" await asyncio.sleep(delaySec) @@ -1032,6 +2146,19 @@ class TeamsbotService: if self._aiAnalysisInProgress: logger.info(f"Session {sessionId}: AI analysis already in progress, skipping duplicate trigger") return + # An agent escalation from a previous trigger may still be researching + # (it lives in its own task, ``_aiAnalysisInProgress`` was already + # released when SPEECH_TEAMS returned). If we let a fresh SPEECH_TEAMS + # run now, both pipelines would race to the meeting voice channel and + # the operator would hear "two bots talking". Skip until the agent + # finishes; the speaker can re-trigger by saying the bot name again + # if they have a new question. + if self._agentEscalationInFlight: + logger.info( + f"Session {sessionId}: Agent escalation still in flight — " + f"skipping new SPEECH_TEAMS trigger to prevent overlapping replies" + ) + return self._aiAnalysisInProgress = True self._lastAiCallTime = time.time() @@ -1060,7 +2187,11 @@ class TeamsbotService: if self._contextSummary: summaryStr = f"\nEARLIER_CONVERSATION_SUMMARY:\n{self._contextSummary}\n" - transcriptContext = f"BOT_NAME:{self.config.botName}{sessionContextStr}{summaryStr}\nRECENT_TRANSCRIPT:\n" + "\n".join(contextLines) + # Persistent director prompts: private operator instructions that stay + # in effect across triggers (e.g. "respond in English", "always be brief"). + directorStr = self._buildPersistentDirectorContext() + + transcriptContext = f"BOT_NAME:{self.config.botName}{sessionContextStr}{summaryStr}{directorStr}\nRECENT_TRANSCRIPT:\n" + "\n".join(contextLines) # Call SPEECH_TEAMS try: @@ -1112,8 +2243,68 @@ class TeamsbotService: "modelName": response.modelName, "processingTime": response.processingTime, "priceCHF": response.priceCHF, + "needsAgent": speechResult.needsAgent, + "agentReason": speechResult.agentReason, }) + # Hybrid routing: SPEECH_TEAMS detected a complex request that + # requires the full agent (web research, mail, multi-step). Hand + # off to the agent path; do NOT speak the SPEECH_TEAMS placeholder. + if speechResult.needsAgent: + # Director prompts (persistent + recent one-shot) have already + # delivered files to the operator. The escalation agent MUST see + # them — otherwise it answers "summarize the doc" with general + # babble because the SPEECH_TEAMS prompt itself never had file + # access. We also forward the prior agent analysis so the + # escalation can build on, not duplicate, the earlier work. + briefings = self._collectActiveDirectorBriefings() + briefingFileIds = self._collectDirectorFileIds() + briefingBlock = "" + if briefings: + parts = [] + for b in briefings: + seg = f"- ({b.get('mode')}) {b.get('text', '')}".rstrip() + if b.get("fileIds"): + seg += f"\n attachedFileIds: {', '.join(b['fileIds'])}" + if b.get("note"): + note = b["note"] + seg += ( + "\n priorAgentAnalysis: " + + (note if len(note) <= 800 else note[:800] + "...") + ) + parts.append(seg) + briefingBlock = ( + "\n\nACTIVE_OPERATOR_BRIEFINGS (private; you may read the " + "attached files via summarizeContent / readFile / " + "readContentObjects to answer the user precisely; do NOT " + "quote the directive text itself):\n" + "\n".join(parts) + ) + logger.info( + f"Session {sessionId}: SPEECH_TEAMS escalates to agent. " + f"Reason: {speechResult.agentReason or speechResult.reasoning} | " + f"briefings={len(briefings)}, fileIds={len(briefingFileIds)}" + ) + taskBrief = ( + (speechResult.agentReason + or speechResult.responseText + or "Verarbeite die juengste Sprecheranfrage und antworte ins Meeting.") + + briefingBlock + ) + # Mark escalation as in-flight BEFORE we create the task so the + # ``_aiAnalysisInProgress=False`` released in our finally block + # cannot let a competing speech trigger sneak past the gate + # before the agent task has even been scheduled. + self._agentEscalationInFlight = True + self._currentEscalationTask = asyncio.create_task( + self._runEscalationAndRelease( + sessionId=sessionId, + taskBrief=taskBrief, + briefingFileIds=briefingFileIds, + triggerTranscriptId=triggerTranscript.get("id"), + ) + ) + return + # Step 4a: Handle STOP intent -- stop audio immediately if speechResult.detectedIntent == "stop": logger.info(f"Session {sessionId}: AI detected STOP intent: {speechResult.reasoning}") @@ -1190,70 +2381,69 @@ class TeamsbotService: textForChat = speechResult.responseTextForChat or speechResult.responseText storedText = textForChat or textForVoice or speechResult.responseText - # 4a: Voice response (TTS -> Audio to bot) + # 4a: Voice response (TTS -> Audio to bot, chunked for long replies) if sendVoice and textForVoice: - try: + await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { + "status": "requested", + "hasWebSocket": websocket is not None, + "message": "TTS generation requested", + "timestamp": getIsoTimestamp(), + }) + logger.info( + f"Session {sessionId}: TTS requested (websocket_available={websocket is not None})" + ) + if not websocket: + logger.warning( + f"Session {sessionId}: TTS skipped (bot websocket unavailable, likely fallback mode)" + ) await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { - "status": "requested", - "hasWebSocket": websocket is not None, - "message": "TTS generation requested", + "status": "unavailable", + "hasWebSocket": False, + "message": "TTS skipped — bot websocket unavailable", "timestamp": getIsoTimestamp(), }) - logger.info( - f"Session {sessionId}: TTS requested (websocket_available={websocket is not None})" - ) - ttsResult = await voiceInterface.textToSpeech( - text=textForVoice, - languageCode=self.config.language, - voiceName=self.config.voiceId - ) - - if not ttsResult or not isinstance(ttsResult, dict): - raise RuntimeError("TTS returned invalid result payload") - - if ttsResult.get("success") is False: - raise RuntimeError(f"TTS backend error: {ttsResult.get('error', 'unknown')}") - - audioContent = ttsResult.get("audioContent") - if not audioContent: - raise RuntimeError("TTS returned no audioContent") - - if websocket: - await websocket.send_text(json.dumps({ - "type": "playAudio", - "sessionId": sessionId, - "audio": { - "data": base64.b64encode(audioContent if isinstance(audioContent, bytes) else audioContent.encode()).decode(), - "format": "mp3", - }, - })) - logger.info(f"Session {sessionId}: TTS audio dispatched to bot") + if not sendChat: + sendChat = True + else: + # Long / structured answers → AI condenses for ear; chat keeps full text. + spokenText = await self._summarizeForVoice(sessionId, textForVoice) + cancelHook = self._makeAnswerCancelHook() + async with self._meetingTtsLock: + ttsOutcome = await _speakTextChunked( + websocket=websocket, + voiceInterface=voiceInterface, + sessionId=sessionId, + voiceText=spokenText, + languageCode=self.config.language, + voiceName=self.config.voiceId, + isCancelled=cancelHook, + ) + if ttsOutcome.get("success"): + logger.info( + f"Session {sessionId}: TTS audio dispatched to bot " + f"(chunks={ttsOutcome.get('chunks')}, played={ttsOutcome.get('played')})" + ) await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { "status": "dispatched", "hasWebSocket": True, - "message": "TTS audio dispatched to bot", + "chunks": ttsOutcome.get("chunks"), + "played": ttsOutcome.get("played"), "timestamp": getIsoTimestamp(), }) else: logger.warning( - f"Session {sessionId}: TTS audio generated but cannot be played (bot websocket unavailable, likely fallback mode)" + f"TTS failed for session {sessionId}: {ttsOutcome.get('error')}" ) await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { - "status": "unavailable", - "hasWebSocket": False, - "message": "TTS audio generated but bot websocket unavailable", + "status": "failed", + "hasWebSocket": True, + "chunks": ttsOutcome.get("chunks"), + "played": ttsOutcome.get("played"), + "message": ttsOutcome.get("error"), "timestamp": getIsoTimestamp(), }) - except Exception as ttsErr: - logger.warning(f"TTS failed for session {sessionId}: {ttsErr}") - await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { - "status": "failed", - "hasWebSocket": websocket is not None, - "message": str(ttsErr), - "timestamp": getIsoTimestamp(), - }) - if not sendChat: - sendChat = True # Fallback to chat if voice-only and TTS failed + if not sendChat: + sendChat = True # Fallback to chat if voice-only and TTS failed # 4b: Chat response (send text message to meeting chat) if sendChat and textForChat: @@ -1399,6 +2589,41 @@ class TeamsbotService: finally: self._aiAnalysisInProgress = False + async def _runEscalationAndRelease( + self, + sessionId: str, + taskBrief: str, + briefingFileIds: List[str], + triggerTranscriptId: Optional[str], + ) -> None: + """Background wrapper for ``_runAgentForMeeting`` that holds the + ``_agentEscalationInFlight`` flag for the entire duration of the agent + run — not just for the moment we schedule the task. Without this + wrapper, ``_aiAnalysisInProgress`` would already be ``False`` while + the agent is still researching, and a fresh SPEECH_TEAMS trigger from + a new utterance would race the agent to the voice channel.""" + try: + await self._runAgentForMeeting( + sessionId=sessionId, + taskText=taskBrief, + fileIds=briefingFileIds, + sourceLabel="speechEscalation", + triggerTranscriptId=triggerTranscriptId, + ) + except asyncio.CancelledError: + logger.info( + f"Session {sessionId}: Escalation agent task cancelled by stop signal" + ) + except Exception as escErr: + logger.error( + f"Session {sessionId}: Escalation agent task failed: " + f"{type(escErr).__name__}: {escErr}", + exc_info=True, + ) + finally: + self._agentEscalationInFlight = False + self._currentEscalationTask = None + # ========================================================================= # AI Command Execution # ========================================================================= @@ -1535,23 +2760,18 @@ class TeamsbotService: if not summary: summary = "Keine Chat-Nachrichten im angegebenen Zeitraum." if voiceInterface and websocket: - ttsResult = await voiceInterface.textToSpeech( - text=summary[:2000], - languageCode=self.config.language, - voiceName=self.config.voiceId, - ) - if ttsResult and isinstance(ttsResult, dict) and ttsResult.get("audioContent"): - audioContent = ttsResult["audioContent"] - await websocket.send_text(json.dumps({ - "type": "playAudio", - "sessionId": sessionId, - "audio": { - "data": base64.b64encode( - audioContent if isinstance(audioContent, bytes) else audioContent.encode() - ).decode(), - "format": "mp3", - }, - })) + spokenSummary = await self._summarizeForVoice(sessionId, summary[:2000]) + cancelHook = self._makeAnswerCancelHook() + async with self._meetingTtsLock: + await _speakTextChunked( + websocket=websocket, + voiceInterface=voiceInterface, + sessionId=sessionId, + voiceText=spokenSummary, + languageCode=self.config.language, + voiceName=self.config.voiceId, + isCancelled=cancelHook, + ) async def _cmdReadAloud( self, @@ -1562,25 +2782,18 @@ class TeamsbotService: ): """Read text aloud via TTS and play in meeting.""" readText = params.get("text", "") - if readText and voiceInterface: - ttsResult = await voiceInterface.textToSpeech( - text=readText, - languageCode=self.config.language, - voiceName=self.config.voiceId, - ) - if ttsResult and isinstance(ttsResult, dict): - audioContent = ttsResult.get("audioContent") - if audioContent and websocket: - await websocket.send_text(json.dumps({ - "type": "playAudio", - "sessionId": sessionId, - "audio": { - "data": base64.b64encode( - audioContent if isinstance(audioContent, bytes) else audioContent.encode() - ).decode(), - "format": "mp3", - }, - })) + if readText and voiceInterface and websocket: + cancelHook = self._makeAnswerCancelHook() + async with self._meetingTtsLock: + await _speakTextChunked( + websocket=websocket, + voiceInterface=voiceInterface, + sessionId=sessionId, + voiceText=_voiceFriendlyMeetingText(readText), + languageCode=self.config.language, + voiceName=self.config.voiceId, + isCancelled=cancelHook, + ) async def _cmdChangeLanguage(self, sessionId: str, params: dict): """Change bot language.""" @@ -1674,6 +2887,1041 @@ class TeamsbotService: except Exception as e: logger.warning(f"Session {sessionId}: storeDocument failed: {e}") + # ========================================================================= + # Director Prompts (private operator instructions during a live meeting) + # ========================================================================= + + def _collectActiveDirectorBriefings(self) -> List[Dict[str, Any]]: + """Return the deduplicated list of director-prompt briefings that are + currently relevant for the meeting context: every active persistent + prompt PLUS every recent one-shot prompt that still sits in the + ``_recentDirectorBriefings`` pool. Each entry carries ``text``, + ``fileIds`` (UDB attachments), ``mode``, ``promptId`` and ``note`` + (the agent's internal analysis from the SILENT director run, if any). + """ + seen: Dict[str, Dict[str, Any]] = {} + for p in self._activePersistentPrompts: + pid = p.get("id") or "" + seen[pid] = { + "promptId": pid, + "mode": p.get("mode") or "persistent", + "text": (p.get("text") or "").strip(), + "fileIds": list(p.get("fileIds") or []), + "note": (p.get("responseText") or "").strip(), + } + for b in self._recentDirectorBriefings: + pid = b.get("promptId") or "" + if pid in seen: + # Refresh note with the latest analysis if the persistent run + # produced one after the prompt was first loaded from DB. + if b.get("note"): + seen[pid]["note"] = b["note"] + continue + seen[pid] = { + "promptId": pid, + "mode": b.get("mode") or "oneShot", + "text": (b.get("text") or "").strip(), + "fileIds": list(b.get("fileIds") or []), + "note": (b.get("note") or "").strip(), + } + return [v for v in seen.values() if v.get("text") or v.get("fileIds")] + + def _collectDirectorFileIds(self) -> List[str]: + """Flat, deduplicated list of UDB file IDs attached to any currently + relevant director prompt (persistent + recent one-shot). Used when + SPEECH_TEAMS escalates to the agent so the agent can actually READ the + documents the operator already provided.""" + out: List[str] = [] + seen: set = set() + for b in self._collectActiveDirectorBriefings(): + for fid in b.get("fileIds") or []: + if fid and fid not in seen: + seen.add(fid) + out.append(fid) + return out + + def _buildPersistentDirectorContext(self) -> str: + """Render active director-prompt briefings as private operator guidance + for the SPEECH_TEAMS system prompt context block. + + Surfaces three things SPEECH_TEAMS otherwise misses: + + * the operator's directive text (as before) + * the IDs of any UDB files the operator attached — so SPEECH_TEAMS + knows the documents exist and can decide to escalate to the agent, + which has the tooling to read them. + * the agent's previous internal analysis of the prompt (the SILENT + ``MEETING_REPLY/SILENT`` decision's note), so SPEECH_TEAMS can answer + short questions without re-running the agent. + """ + briefings = self._collectActiveDirectorBriefings() + if not briefings: + return "" + lines: List[str] = [] + for b in briefings: + entry = f"- ({b.get('mode', 'persistent')}) {b.get('text', '')}".rstrip() + fileIds = b.get("fileIds") or [] + if fileIds: + entry += ( + "\n ATTACHED_FILES (operator-provided documents — the AGENT " + "has tools to read them via summarizeContent / readFile / " + "readContentObjects): " + + ", ".join(fileIds) + ) + note = b.get("note") + if note: + noteShort = note if len(note) <= 600 else note[:600] + "..." + entry += f"\n AGENT_ANALYSIS (already computed by the bot): {noteShort}" + lines.append(entry) + return ( + "\nOPERATOR_DIRECTIVES (private; never quote them verbatim, just follow them. " + "If the user asks about an attached document, use AGENT_ANALYSIS first; " + "if more depth is needed, set needsAgent=true so the agent can re-read the file):\n" + + "\n".join(lines) + + "\n" + ) + + def _recordDirectorBriefing( + self, + prompt: Dict[str, Any], + internalNote: str, + meetingText: str, + ) -> None: + """Append a director-prompt briefing to the session-scoped pool so the + attached files and the agent's analysis stay available for subsequent + SPEECH_TEAMS triggers — even after a one-shot prompt was consumed. + Idempotent per ``promptId`` (latest entry wins).""" + pid = prompt.get("id") or "" + # Drop any older entry for the same prompt so we keep the freshest note. + self._recentDirectorBriefings = [ + b for b in self._recentDirectorBriefings if b.get("promptId") != pid + ] + self._recentDirectorBriefings.append({ + "promptId": pid, + "mode": prompt.get("mode") or "oneShot", + "text": (prompt.get("text") or "").strip(), + "fileIds": list(prompt.get("fileIds") or []), + "note": (internalNote or meetingText or "").strip(), + "recordedAt": getIsoTimestamp(), + }) + if len(self._recentDirectorBriefings) > _RECENT_DIRECTOR_BRIEFINGS_MAX: + self._recentDirectorBriefings = self._recentDirectorBriefings[ + -_RECENT_DIRECTOR_BRIEFINGS_MAX: + ] + + async def submitDirectorPrompt( + self, + sessionId: str, + operatorUserId: str, + text: str, + mode: TeamsbotDirectorPromptMode, + fileIds: List[str], + ) -> Dict[str, Any]: + """Persist a new director prompt and trigger immediate agent processing. + + Returns the created prompt record. Processing happens asynchronously + and emits SSE events ('directorPrompt') for the operator UI. + """ + from . import interfaceFeatureTeamsbot as interfaceDb + + interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId) + + promptData = TeamsbotDirectorPrompt( + sessionId=sessionId, + instanceId=self.instanceId, + operatorUserId=operatorUserId, + text=text, + mode=mode, + fileIds=fileIds or [], + status=TeamsbotDirectorPromptStatus.QUEUED, + ).model_dump() + created = interface.createDirectorPrompt(promptData) + + # Persistent prompts join in-memory directives immediately so they + # also influence subsequent SPEECH_TEAMS triggers, not only the + # one-shot agent run we kick off below. + if mode == TeamsbotDirectorPromptMode.PERSISTENT: + self._activePersistentPrompts.append(created) + + await _emitSessionEvent(sessionId, "directorPrompt", { + "id": created.get("id"), + "status": created.get("status"), + "mode": created.get("mode"), + "text": created.get("text"), + "fileIds": created.get("fileIds", []), + "createdAt": created.get("createdAt"), + }) + + asyncio.create_task(self._processDirectorPrompt(created)) + return created + + async def removePersistentPrompt(self, promptId: str) -> bool: + """Remove a persistent director prompt (operator clicked 'remove').""" + from . import interfaceFeatureTeamsbot as interfaceDb + + interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId) + sessionId = self._activeSessionId + prompt = interface.getDirectorPrompt(promptId) + if not prompt: + return False + interface.updateDirectorPrompt(promptId, { + "status": TeamsbotDirectorPromptStatus.CONSUMED.value, + "consumedAt": getIsoTimestamp(), + "statusMessage": "Removed by operator", + }) + self._activePersistentPrompts = [ + p for p in self._activePersistentPrompts if p.get("id") != promptId + ] + # Also drop the briefing copy so SPEECH_TEAMS forgets the doc reference + # immediately; otherwise the bot would keep "remembering" a doc the + # operator just retired. + self._recentDirectorBriefings = [ + b for b in self._recentDirectorBriefings if b.get("promptId") != promptId + ] + if sessionId: + await _emitSessionEvent(sessionId, "directorPrompt", { + "id": promptId, + "status": TeamsbotDirectorPromptStatus.CONSUMED.value, + "mode": prompt.get("mode"), + "text": prompt.get("text"), + "removed": True, + }) + return True + + async def _processDirectorPrompt(self, prompt: Dict[str, Any]) -> None: + """Run the agent for a director prompt and deliver the FINAL text into + the meeting via TTS + chat (using the bot's existing channels).""" + from . import interfaceFeatureTeamsbot as interfaceDb + + sessionId = prompt.get("sessionId") + promptId = prompt.get("id") + interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId) + + interface.updateDirectorPrompt(promptId, { + "status": TeamsbotDirectorPromptStatus.RUNNING.value, + }) + await _emitSessionEvent(sessionId, "directorPrompt", { + "id": promptId, + "status": TeamsbotDirectorPromptStatus.RUNNING.value, + }) + + # Build a task brief for the agent that surfaces the meeting context. + recentTranscript = self._renderRecentTranscriptForAgent(maxLines=20) + directorText = (prompt.get("text") or "").strip() + attachedFileIds = list(prompt.get("fileIds") or []) + promptMode = (prompt.get("mode") or "").lower() + isPersistentPrompt = promptMode == TeamsbotDirectorPromptMode.PERSISTENT.value.lower() + + # Make file attachment EXPLICIT in the brief. The agent service already + # prepends a "## Attached Files & Folders" header via _enrichPromptWithFiles + # when fileIds are passed, but without an explicit instruction the agent + # sometimes goes straight to a generic answer. We force the workflow: + # studyDocs -> form briefing -> decide MEETING_REPLY vs SILENT. + filesBlock = "" + if attachedFileIds: + filesBlock = ( + "\nANGEHAENGTE DOKUMENTE (UDB-File-IDs): " + + ", ".join(attachedFileIds) + + "\nDu MUSST diese Dokumente VOR der finalen Antwort lesen / zusammenfassen " + "(z.B. summarizeContent, readFile, readContentObjects, describeImage). " + "Beziehe Fakten und Zitate aus den Dokumenten in deine Notiz / dein " + "Meeting-Reply ein, statt allgemein zu antworten.\n" + ) + + # Persistent prompts that ship documents are usually a "knowledge briefing" + # the operator wants the bot to STUDY now and USE LATER. The SILENT note + # in that case must be a useful, file-grounded summary that subsequent + # SPEECH_TEAMS triggers can pick up — not "noted". + persistentNoteHint = "" + if isPersistentPrompt and attachedFileIds: + persistentNoteHint = ( + "\nSPEZIAL fuer PERSISTENT + Dokumente: Wenn die Anweisung KEIN explizites " + "Meeting-Statement verlangt, antworte mit 'SILENT:' und liefere als interne " + "Notiz eine STRUKTURIERTE, faktendichte Briefing-Zusammenfassung der Dokumente " + "(Stichpunkte, Kennzahlen, Aussagen, die fuer Folgefragen im Meeting relevant " + "sein koennen). Diese Notiz wird spaeteren Meeting-Antworten als Wissensbasis " + "vorgelegt — schreibe sie also so, dass der Bot daraus zitieren kann.\n" + ) + + taskText = ( + f"Du bist der KI-Assistent in einem laufenden Teams-Meeting (Bot-Name: {self.config.botName}).\n" + f"Der Operator hat dir folgende PRIVATE Regieanweisung gegeben (die anderen Teilnehmer im " + f"Meeting sehen sie NICHT). Sie ist KEINE Frage an das Meeting, sondern eine interne " + f"Anweisung an dich:\n\n" + f"{directorText}\n" + f"{filesBlock}" + f"{persistentNoteHint}\n" + f"AKTUELLER MEETING-KONTEXT (juengste Aussagen):\n{recentTranscript}\n\n" + "ANTWORT-PROTOKOLL — Beginne deine FINALE Antwort mit GENAU EINEM dieser Marker:\n" + " • 'MEETING_REPLY:' gefolgt vom Text, der im Meeting gesprochen / in den Meeting-Chat " + "gepostet werden soll. Verwende diesen Marker NUR, wenn die Regieanweisung dich explizit " + "auffordert, jetzt etwas im Meeting zu sagen oder zu schreiben (Beispiele: 'stell dich vor', " + "'fasse zusammen', 'stelle Person X eine Frage', 'beantworte die letzte Frage'). Halte den " + "Text kurz, sprachlich passend zur Stimme und ohne Marker oder Meta-Kommentare.\n" + " • 'SILENT:' gefolgt von einer internen Notiz fuer das Operator-UI. " + "Verwende diesen Marker fuer interne Direktiven und Wissens-Briefings (Beispiele: " + "'achte ab jetzt auf X', 'merke dir Y', 'studiere Dokument Z'). " + "Dieser Text wird NICHT ins Meeting gegeben, dient aber spaeteren Meeting-Antworten " + "als Wissensbasis. Wenn Dokumente angehaengt sind, MUSS die Notiz konkrete, " + "zitierfaehige Fakten aus den Dokumenten enthalten.\n\n" + "Standard ist SILENT, wenn nicht eindeutig zur Meeting-Interaktion aufgefordert wurde. " + "Wiederhole NIEMALS die Regieanweisung selbst im MEETING_REPLY-Text." + ) + + try: + finalText = await self._runAgentForMeeting( + sessionId=sessionId, + taskText=taskText, + fileIds=attachedFileIds, + sourceLabel="directorPrompt", + triggerTranscriptId=None, + promptId=promptId, + directorPromptMode=True, + ) + + # One-shot: mark consumed; persistent: keep active but record success. + isPersistent = prompt.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value + updates: Dict[str, Any] = { + "status": TeamsbotDirectorPromptStatus.SUCCEEDED.value, + "responseText": finalText or "", + } + if not isPersistent: + updates["status"] = TeamsbotDirectorPromptStatus.CONSUMED.value + updates["consumedAt"] = getIsoTimestamp() + interface.updateDirectorPrompt(promptId, updates) + await _emitSessionEvent(sessionId, "directorPrompt", { + "id": promptId, + "status": updates["status"], + "responseText": finalText, + }) + + except Exception as e: + logger.error( + f"Session {sessionId}: Director prompt {promptId} failed: {type(e).__name__}: {e}", + exc_info=True, + ) + interface.updateDirectorPrompt(promptId, { + "status": TeamsbotDirectorPromptStatus.FAILED.value, + "statusMessage": f"{type(e).__name__}: {str(e)[:300]}", + }) + await _emitSessionEvent(sessionId, "directorPrompt", { + "id": promptId, + "status": TeamsbotDirectorPromptStatus.FAILED.value, + "error": f"{type(e).__name__}: {str(e)[:300]}", + }) + self._activePersistentPrompts = [ + p for p in self._activePersistentPrompts if p.get("id") != promptId + ] + + def _renderRecentTranscriptForAgent(self, maxLines: int = 20) -> str: + """Render the most recent context buffer entries for inclusion in the + agent task brief (similar to SPEECH_TEAMS context, but plain text).""" + if not self._contextBuffer: + return "(noch keine Aussagen erfasst)" + recent = self._contextBuffer[-maxLines:] + lines = [] + for seg in recent: + speaker = seg.get("speaker", "Unknown") + text = seg.get("text", "") + segSource = seg.get("source", "caption") + prefix = "Chat: " if segSource == "chat" else "" + if self._isBotSpeaker(speaker): + lines.append(f"[YOU ({self.config.botName})]: {text}") + else: + lines.append(f"[{prefix}{speaker}]: {text}") + return "\n".join(lines) + + async def _interimAgentBusyMessage(self) -> Optional[str]: + """Short spoken/chat line before a potentially long agent run (web, + tools). Phrasing is AI-localised to ``self.config.language`` and + cached per session — no hardcoded language branching. Returns + ``None`` if generation failed; caller must treat that as + 'silently skip the interim notice'.""" + return await self._pickEphemeralPhrase("agentBusy") + + async def _interimAgentRoundMessage( + self, roundNum: int, maxRounds: int + ) -> Optional[str]: + """Per-round progress notice for long agent runs (meeting voice / + chat, ephemeral). Phrasing is AI-localised once per session; + ``{round}`` and ``{maxRounds}`` placeholders are substituted at + render time. Returns ``None`` if generation failed.""" + return await self._pickEphemeralPhrase( + "agentRound", + substitutions={"round": roundNum, "maxRounds": maxRounds}, + ) + + async def _notifyMeetingEphemeral(self, sessionId: str, text: str) -> None: + """Deliver a short line to the meeting (TTS + chat per config) without + persisting botResponses/transcripts, so the main agent answer stays the + single recorded follow-up.""" + websocket = self._websocket + voiceInterface = self._voiceInterface + if not websocket: + logger.warning(f"Session {sessionId}: Interim notice skipped — no WebSocket") + return + + channelRaw = self.config.responseChannel + channelStr = ( + channelRaw.value if hasattr(channelRaw, "value") else str(channelRaw) + ).lower().strip() + sendVoice = channelStr in ("voice", "both") + sendChat = channelStr in ("chat", "both") + + if sendVoice and voiceInterface: + cancelHook = self._makeAnswerCancelHook() + async with self._meetingTtsLock: + outcome = await _speakTextChunked( + websocket=websocket, + voiceInterface=voiceInterface, + sessionId=sessionId, + voiceText=_voiceFriendlyMeetingText(text), + languageCode=self.config.language, + voiceName=self.config.voiceId, + isCancelled=cancelHook, + ) + if not outcome.get("success"): + logger.warning( + f"Session {sessionId}: Interim TTS failed ({outcome.get('error')}) — falling back to chat" + ) + if not sendChat: + sendChat = True + + if sendChat: + try: + await websocket.send_text(json.dumps({ + "type": "sendChatMessage", + "sessionId": sessionId, + "text": text, + })) + except Exception as chatErr: + logger.warning(f"Session {sessionId}: Interim chat failed: {chatErr}") + + await _emitSessionEvent(sessionId, "agentRun", { + "status": "interimNotice", + "message": text, + "timestamp": getIsoTimestamp(), + }) + + async def _runAgentForMeeting( + self, + sessionId: str, + taskText: str, + fileIds: List[str], + sourceLabel: str, + triggerTranscriptId: Optional[str] = None, + promptId: Optional[str] = None, + directorPromptMode: bool = False, + ) -> str: + """Run agentService.runAgent for a meeting context, deliver the FINAL + text via the bot's existing TTS + chat channels, and return that text. + + sourceLabel is used for logging and SSE differentiation + ('directorPrompt' or 'speechEscalation'). + + ``directorPromptMode`` activates the silent-by-default protocol for + operator director prompts: interim notices are suppressed, no per-round + meeting updates, and the FINAL text is parsed for an explicit + ``MEETING_REPLY:`` / ``SILENT:`` marker. Only ``MEETING_REPLY`` content + is dispatched to the meeting; everything else stays internal. + """ + from modules.serviceCenter.services.serviceAgent.datamodelAgent import ( + AgentConfig, AgentEventTypeEnum + ) + + ctx = ServiceCenterContext( + user=self.currentUser, + mandate_id=self.mandateId, + feature_instance_id=self.instanceId, + feature_code="teamsbot", + ) + agentService = _getServiceCenterService("agent", ctx) + + # Workflow id stable per session so RAG/round-memory accumulate per meeting. + workflowId = f"teamsbot:{sessionId}" + + agentConfig = AgentConfig( + maxRounds=TEAMSBOT_AGENT_MAX_ROUNDS, + maxCostCHF=TEAMSBOT_AGENT_MAX_COST_CHF, + toolSet="core", + initialToolboxes=["core", "web"], + excludeActionTools=True, + ) + + await _emitSessionEvent(sessionId, "agentRun", { + "source": sourceLabel, + "promptId": promptId, + "status": "started", + "timestamp": getIsoTimestamp(), + }) + + # Director prompts run silently by default — no spontaneous "moment please" + # in the meeting just because the operator gave an internal directive. + if not directorPromptMode: + try: + interimText = await self._interimAgentBusyMessage() + if interimText: + await self._notifyMeetingEphemeral(sessionId, interimText) + except Exception as interimErr: + logger.warning(f"Session {sessionId}: Interim agent notice failed: {interimErr}") + + finalText: str = "" + rounds = 0 + try: + async for event in agentService.runAgent( + prompt=taskText, + fileIds=fileIds or None, + config=agentConfig, + toolSet="core", + workflowId=workflowId, + ): + if event.type == AgentEventTypeEnum.AGENT_PROGRESS: + rounds += 1 + pdata = event.data or {} + roundNum = int(pdata.get("round", rounds)) + maxR = int(pdata.get("maxRounds", TEAMSBOT_AGENT_MAX_ROUNDS)) + await _emitSessionEvent(sessionId, "agentRun", { + "source": sourceLabel, + "promptId": promptId, + "status": "progress", + "round": roundNum, + "maxRounds": maxR, + }) + # Runde 1: schon allgemeiner Start-Hinweis; ab Runde 2 ins Meeting melden. + # Director prompts bleiben still — keine Zwischen-Updates ins Meeting. + if roundNum >= 2 and not directorPromptMode: + try: + roundText = await self._interimAgentRoundMessage(roundNum, maxR) + if roundText: + await self._notifyMeetingEphemeral(sessionId, roundText) + except Exception as roundNoticeErr: + logger.warning( + f"Session {sessionId}: Per-round agent notice failed: {roundNoticeErr}" + ) + elif event.type == AgentEventTypeEnum.TOOL_CALL: + toolName = (event.data or {}).get("toolName") if event.data else None + await _emitSessionEvent(sessionId, "agentRun", { + "source": sourceLabel, + "promptId": promptId, + "status": "toolCall", + "toolName": toolName, + }) + elif event.type == AgentEventTypeEnum.FINAL: + finalText = (event.content or "").strip() + elif event.type == AgentEventTypeEnum.ERROR: + raise RuntimeError(event.content or "Agent error") + except Exception as runErr: + await _emitSessionEvent(sessionId, "agentRun", { + "source": sourceLabel, + "promptId": promptId, + "status": "error", + "error": str(runErr)[:500], + }) + raise + + await _emitSessionEvent(sessionId, "agentRun", { + "source": sourceLabel, + "promptId": promptId, + "status": "completed", + "rounds": rounds, + "hasText": bool(finalText), + }) + + if finalText: + if directorPromptMode: + decision = _parseDirectorPromptFinal(finalText) + kind = decision.get("kind", "silent") + meetingText = (decision.get("meetingText") or "").strip() + internalNote = (decision.get("internalNote") or "").strip() + + logger.info( + f"Session {sessionId}: Director prompt {promptId} -> kind={kind}, " + f"meetingChars={len(meetingText)}, noteChars={len(internalNote)}" + ) + + await _emitSessionEvent(sessionId, "directorPrompt", { + "id": promptId, + "status": "decision", + "decision": kind, + "meetingText": meetingText, + "internalNote": internalNote, + }) + + # Record this prompt as a session-scoped briefing BEFORE we hand + # delivery off. This is what later SPEECH_TEAMS triggers see, so + # if the user attached a doc with mode=PERSISTENT and the agent + # produced a file-grounded SILENT note, that note (and the + # original fileIds) stays available for "summarize the doc" + # follow-up questions in the meeting. + try: + promptRecord: Dict[str, Any] = {} + if promptId: + try: + from . import interfaceFeatureTeamsbot as _ifaceDb + _iface = _ifaceDb.getInterface( + self.currentUser, self.mandateId, self.instanceId + ) + promptRecord = _iface.getDirectorPrompt(promptId) or {} + except Exception as _lookupErr: + logger.debug( + f"Briefing pool: could not look up prompt {promptId}: {_lookupErr}" + ) + if promptRecord or promptId: + self._recordDirectorBriefing( + prompt=promptRecord or {"id": promptId}, + internalNote=internalNote, + meetingText=meetingText, + ) + except Exception as briefErr: + logger.warning( + f"Session {sessionId}: Director briefing pool update failed: {briefErr}" + ) + + # If this was a persistent prompt, the live in-memory copy in + # ``_activePersistentPrompts`` was loaded BEFORE the agent ran + # — refresh its ``responseText`` so subsequent + # ``_collectActiveDirectorBriefings`` calls show the latest + # analysis without waiting for the next session reload. + if promptId: + for p in self._activePersistentPrompts: + if p.get("id") == promptId: + p["responseText"] = internalNote or meetingText or finalText + break + + if kind == "meeting" and meetingText: + await self._deliverTextToMeeting( + sessionId=sessionId, + text=meetingText, + detectedIntent=f"agent:{sourceLabel}", + reasoning=f"Agent run ({sourceLabel})", + triggerTranscriptId=triggerTranscriptId, + ) + else: + # Silent: persist as internal-only botResponse so the operator + # UI keeps a record, but DO NOT push into the meeting (no TTS, + # no chat send). The director prompt SSE above already carries + # the note for the operator UI. + await self._persistInternalDirectorReply( + sessionId=sessionId, + internalNote=internalNote or finalText, + promptId=promptId, + triggerTranscriptId=triggerTranscriptId, + ) + return meetingText if kind == "meeting" else "" + + await self._deliverTextToMeeting( + sessionId=sessionId, + text=finalText, + detectedIntent=f"agent:{sourceLabel}", + reasoning=f"Agent run ({sourceLabel})", + triggerTranscriptId=triggerTranscriptId, + ) + + return finalText + + async def _deliverTextToMeeting( + self, + sessionId: str, + text: str, + detectedIntent: str, + reasoning: str, + triggerTranscriptId: Optional[str] = None, + ) -> None: + """Send agent text into the meeting via the same channels SPEECH_TEAMS + uses: TTS + chat per config, plus DB persistence and SSE events. + + Uses the websocket/voiceInterface stored on this instance. If the bot + is not connected anymore, the call still records the response in the DB + and emits SSE so the operator UI shows the agent answer. + """ + from . import interfaceFeatureTeamsbot as interfaceDb + interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId) + + websocket = self._websocket + voiceInterface = self._voiceInterface + + channelRaw = self.config.responseChannel + channelStr = ( + channelRaw.value if hasattr(channelRaw, "value") else str(channelRaw) + ).lower().strip() + sendVoice = channelStr in ("voice", "both") + sendChat = channelStr in ("chat", "both") + + if sendVoice and sendChat: + responseType = TeamsbotResponseType.BOTH + elif sendVoice: + responseType = TeamsbotResponseType.AUDIO + else: + responseType = TeamsbotResponseType.CHAT + + # Voice (TTS input is voice-sanitized; chat + DB keep full structured text). + # Long agent answers must be chunked: Google TTS rejects single sentences + # > ~5000 bytes, and the Chirp3 voices fail on long comma-heavy lines too. + ttsOutcome: Optional[Dict[str, Any]] = None + if sendVoice and voiceInterface and websocket: + spokenText = await self._summarizeForVoice(sessionId, text) + cancelHook = self._makeAnswerCancelHook() + async with self._meetingTtsLock: + ttsOutcome = await _speakTextChunked( + websocket=websocket, + voiceInterface=voiceInterface, + sessionId=sessionId, + voiceText=spokenText, + languageCode=self.config.language, + voiceName=self.config.voiceId, + isCancelled=cancelHook, + ) + await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { + "status": "dispatched" if ttsOutcome.get("success") else "failed", + "hasWebSocket": True, + "chunks": ttsOutcome.get("chunks"), + "played": ttsOutcome.get("played"), + "error": ttsOutcome.get("error"), + "timestamp": getIsoTimestamp(), + }) + if not ttsOutcome.get("success"): + logger.warning( + f"Session {sessionId}: Agent TTS delivery failed " + f"({ttsOutcome.get('error')}) — falling back to meeting chat" + ) + if not sendChat: + sendChat = True + + # Chat + if sendChat and websocket: + try: + await websocket.send_text(json.dumps({ + "type": "sendChatMessage", + "sessionId": sessionId, + "text": text, + })) + logger.info(f"Session {sessionId}: Agent chat dispatched ({len(text)} chars)") + except Exception as chatErr: + logger.warning(f"Session {sessionId}: Agent chat delivery failed: {chatErr}") + + # Persist as botResponse + transcript so it shows up in history/UI. + intentEnum, intentMeta = _coercePersistedDetectedIntent(detectedIntent) + reasoningForDb = ( + f"{reasoning} [{intentMeta}]" if intentMeta else reasoning + ) + + botResponseData = TeamsbotBotResponse( + sessionId=sessionId, + responseText=text, + responseType=responseType, + detectedIntent=intentEnum, + reasoning=reasoningForDb, + triggeredByTranscriptId=triggerTranscriptId, + modelName="agent", + processingTime=0.0, + priceCHF=0.0, + timestamp=getIsoTimestamp(), + ).model_dump() + createdResponse = interface.createBotResponse(botResponseData) + + await _emitSessionEvent(sessionId, "botResponse", { + "id": createdResponse.get("id"), + "responseText": text, + "responseType": responseType.value, + "detectedIntent": intentEnum.value, + "reasoning": reasoningForDb, + "modelName": "agent", + "processingTime": 0.0, + "priceCHF": 0.0, + "timestamp": botResponseData.get("timestamp"), + }) + + botTranscriptData = TeamsbotTranscript( + sessionId=sessionId, + speaker=self.config.botName, + text=text, + timestamp=getIsoTimestamp(), + confidence=1.0, + language=self.config.language, + isFinal=True, + source="botResponse", + ).model_dump() + botTranscript = interface.createTranscript(botTranscriptData) + + self._contextBuffer.append({ + "speaker": self.config.botName, + "text": text, + "timestamp": getUtcTimestamp(), + "source": "botResponse", + }) + self._lastTranscriptSpeaker = self.config.botName + self._lastTranscriptText = text + self._lastTranscriptId = botTranscript.get("id") + self._lastBotResponseText = text.strip().lower() + self._lastBotResponseTs = time.time() + self._followUpWindowEnd = time.time() + 15.0 + + await _emitSessionEvent(sessionId, "transcript", { + "id": botTranscript.get("id"), + "speaker": self.config.botName, + "text": text, + "confidence": 1.0, + "timestamp": getIsoTimestamp(), + "isContinuation": False, + "source": "botResponse", + "speakerResolvedFromHint": False, + }) + + session = interface.getSession(sessionId) + if session: + count = session.get("botResponseCount", 0) + 1 + interface.updateSession(sessionId, {"botResponseCount": count}) + + async def _persistInternalDirectorReply( + self, + sessionId: str, + internalNote: str, + promptId: Optional[str], + triggerTranscriptId: Optional[str] = None, + ) -> None: + """Record a director-prompt agent reply as INTERNAL (operator-UI only). + + Unlike ``_deliverTextToMeeting`` this never dispatches TTS or chat into + the meeting, never appends to the meeting context buffer, and does not + create a meeting transcript line. It only persists a botResponse and + emits an SSE event so the operator UI shows what the agent decided. + """ + from . import interfaceFeatureTeamsbot as interfaceDb + + note = (internalNote or "").strip() + if not note: + return + + interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId) + + intentEnum, _intentMeta = _coercePersistedDetectedIntent("agent:directorPrompt") + reasoningForDb = ( + f"Director prompt {promptId or ''} — silent / internal only " + f"(not sent to meeting)" + ).strip() + + botResponseData = TeamsbotBotResponse( + sessionId=sessionId, + responseText=note, + responseType=TeamsbotResponseType.CHAT, + detectedIntent=intentEnum, + reasoning=reasoningForDb, + triggeredByTranscriptId=triggerTranscriptId, + modelName="agent", + processingTime=0.0, + priceCHF=0.0, + timestamp=getIsoTimestamp(), + ).model_dump() + createdResponse = interface.createBotResponse(botResponseData) + + await _emitSessionEvent(sessionId, "botResponse", { + "id": createdResponse.get("id"), + "responseText": note, + "responseType": TeamsbotResponseType.CHAT.value, + "detectedIntent": intentEnum.value, + "reasoning": reasoningForDb, + "modelName": "agent", + "processingTime": 0.0, + "priceCHF": 0.0, + "timestamp": botResponseData.get("timestamp"), + "internalOnly": True, + "promptId": promptId, + }) + + logger.info( + f"Session {sessionId}: Director prompt {promptId} silent reply " + f"persisted internally ({len(note)} chars)" + ) + + # ========================================================================= + # Greeting (AI-localised, no hardcoded language strings) + # ========================================================================= + + async def _generateGreetingText(self, languageCode: str) -> str: + """Generate the bot's join greeting via AI in ``languageCode`` and the + configured persona. Returns empty string on failure — the caller must + treat that as 'skip the greeting' (NEVER fall back to a hardcoded + localised string).""" + targetLang = (languageCode or self.config.language or "").strip() or "en-US" + botName = (self.config.botName or "the assistant").strip() + firstName = botName.split(" ")[0] if botName else botName + persona = (self.config.aiSystemPrompt or "").strip() + + # English instructions to the LLM; the OUTPUT must be in ``targetLang``. + prompt = ( + f"You are localizing the join greeting for a meeting assistant.\n\n" + f"Assistant display name (use exactly this, no translation): {firstName}\n\n" + f"Persona / style guide for the assistant:\n" + f"{persona or '(no persona configured — use a neutral, polite, professional tone)'}\n\n" + f"Target spoken language (BCP-47 code): {targetLang}\n\n" + f"Generate ONE short greeting (max ~14 words) for the assistant " + f"to say AND post in chat the moment it joins a meeting. The " + f"greeting MUST:\n" + f" - be in the target language\n" + f" - introduce the assistant by name ({firstName})\n" + f" - signal that it is now present and ready\n" + f" - sound natural when spoken aloud (this text is also TTS'd)\n\n" + f"Output ONLY the greeting text, no quotes, no markdown, no " + f"commentary, no surrounding punctuation beyond what naturally " + f"belongs to the sentence." + ) + + try: + aiService = _createAiService( + self.currentUser, self.mandateId, self.instanceId + ) + await aiService.ensureAiObjectsInitialized() + request = AiCallRequest( + prompt=prompt, + context="", + options=AiCallOptions( + operationType=OperationTypeEnum.DATA_ANALYSE, + priority=PriorityEnum.SPEED, + ), + ) + response = await aiService.callAi(request) + except Exception as aiErr: + logger.warning( + f"Greeting generation crashed (lang={targetLang}): {aiErr}" + ) + return "" + + if not response or response.errorCount != 0 or not response.content: + logger.warning( + f"Greeting generation returned empty/error (lang={targetLang})" + ) + return "" + + text = response.content.strip() + # Strip any wrapping quotes/code fences the model might have added. + text = re.sub(r"^```.*?\n", "", text, flags=re.DOTALL) + text = re.sub(r"\n```\s*$", "", text) + text = text.strip().strip("\"'`").strip() + if not text: + return "" + logger.info( + f"Greeting generated (lang={targetLang}, chars={len(text)}): {text[:80]}" + ) + return text + + async def _dispatchGreetingToMeeting( + self, + sessionId: str, + greetingText: str, + greetingLang: str, + sendToChat: bool, + interface: Any, + voiceInterface: Any, + websocket: WebSocket, + ) -> None: + """Centralised dispatcher for the bot's join greeting: speaks the + text via TTS into the meeting and (optionally) tells the bot to post + it in the meeting chat. Persists the greeting as a bot transcript / + botResponse so it appears in the operator UI history. + + ``sendToChat`` is ``False`` for the legacy ``voiceGreeting`` path + (the bot already chatted itself) and ``True`` for the new + ``requestGreeting`` path where the Gateway owns chat dispatch too. + """ + try: + await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { + "status": "requested", + "hasWebSocket": True, + "message": "Greeting TTS requested", + "timestamp": getIsoTimestamp(), + }) + cancelHook = self._makeAnswerCancelHook() + async with self._meetingTtsLock: + ttsOutcome = await _speakTextChunked( + websocket=websocket, + voiceInterface=voiceInterface, + sessionId=sessionId, + voiceText=_voiceFriendlyMeetingText(greetingText), + languageCode=greetingLang, + voiceName=self.config.voiceId, + isCancelled=cancelHook, + ) + if ttsOutcome.get("success"): + logger.info( + f"Greeting TTS sent for session {sessionId} " + f"(chunks={ttsOutcome.get('chunks')})" + ) + await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { + "status": "dispatched", + "hasWebSocket": True, + "chunks": ttsOutcome.get("chunks"), + "played": ttsOutcome.get("played"), + "timestamp": getIsoTimestamp(), + }) + else: + logger.warning( + f"Greeting TTS failed for session {sessionId}: {ttsOutcome.get('error')}" + ) + await _emitSessionEvent(sessionId, "ttsDeliveryStatus", { + "status": "failed", + "hasWebSocket": True, + "message": ttsOutcome.get("error"), + "timestamp": getIsoTimestamp(), + }) + + if sendToChat: + try: + await websocket.send_text(json.dumps({ + "type": "sendChatMessage", + "sessionId": sessionId, + "text": greetingText, + })) + logger.info(f"Greeting chat dispatch queued for session {sessionId}") + except Exception as chatErr: + logger.warning( + f"Greeting chat dispatch failed for session {sessionId}: {chatErr}" + ) + + greetingTranscriptData = TeamsbotTranscript( + sessionId=sessionId, + speaker=self.config.botName, + text=greetingText, + timestamp=getIsoTimestamp(), + confidence=1.0, + language=greetingLang, + isFinal=True, + source="botResponse", + ).model_dump() + greetingTranscript = interface.createTranscript(greetingTranscriptData) + + self._contextBuffer.append({ + "speaker": self.config.botName, + "text": greetingText, + "timestamp": getUtcTimestamp(), + "source": "botResponse", + }) + self._lastTranscriptSpeaker = self.config.botName + self._lastTranscriptText = greetingText + self._lastTranscriptId = greetingTranscript.get("id") + + await _emitSessionEvent(sessionId, "botResponse", { + "id": greetingTranscript.get("id"), + "responseText": greetingText, + "responseType": TeamsbotResponseType.AUDIO.value, + "detectedIntent": "greeting", + "reasoning": "Automatic join greeting", + "timestamp": getIsoTimestamp(), + }) + await _emitSessionEvent(sessionId, "transcript", { + "id": greetingTranscript.get("id"), + "speaker": self.config.botName, + "text": greetingText, + "confidence": 1.0, + "timestamp": getIsoTimestamp(), + "isContinuation": False, + "source": "botResponse", + "speakerResolvedFromHint": False, + }) + except Exception as dispatchErr: + logger.warning( + f"Greeting dispatch failed for session {sessionId}: {dispatchErr}" + ) + # ========================================================================= # Context Summarization (for long sessions) # ========================================================================= diff --git a/modules/features/trustee/accounting/accountingDataSync.py b/modules/features/trustee/accounting/accountingDataSync.py index a606c58a..ef8789ea 100644 --- a/modules/features/trustee/accounting/accountingDataSync.py +++ b/modules/features/trustee/accounting/accountingDataSync.py @@ -144,6 +144,8 @@ class AccountingDataSync: "journalLines": 0, "contacts": 0, "accountBalances": 0, + "oldestBookingDate": None, + "newestBookingDate": None, "errors": [], "startedAt": time.time(), } @@ -211,12 +213,14 @@ class AccountingDataSync: ) _dumpSyncData("journalEntries", rawEntries) _progress(60, f"Speichere {len(rawEntries)} Buchungssaetze...") - entriesCount, linesCount = await asyncio.to_thread( + entriesCount, linesCount, oldestDate, newestDate = await asyncio.to_thread( self._persistJournal, rawEntries, scope, featureInstanceId, TrusteeDataJournalEntry, TrusteeDataJournalLine, ) summary["journalEntries"] = entriesCount summary["journalLines"] = linesCount + summary["oldestBookingDate"] = oldestDate + summary["newestBookingDate"] = newestDate _progress(65, f"{entriesCount} Saetze + {linesCount} Buchungszeilen gespeichert.") except Exception as e: logger.error(f"Import journal entries failed: {e}", exc_info=True) @@ -277,6 +281,11 @@ class AccountingDataSync: "journalLines": int(summary.get("journalLines", 0)), "contacts": int(summary.get("contacts", 0)), "accountBalances": int(summary.get("accountBalances", 0)), + # Actual oldest/newest booking date observed in the + # imported journal entries. Lets the user verify that the + # full requested window was returned by the source system. + "oldestBookingDate": summary.get("oldestBookingDate"), + "newestBookingDate": summary.get("newestBookingDate"), }, } try: @@ -321,6 +330,9 @@ class AccountingDataSync: We pre-build the line rows in memory keyed by the freshly minted entryId so a single ``execute_values`` call can persist all of them. + + Returns ``(entriesCount, linesCount, oldestBookingDate, newestBookingDate)`` + where the date strings are ISO ``YYYY-MM-DD`` (or ``None`` if no entries). """ import uuid as _uuid t0 = time.time() @@ -329,12 +341,22 @@ class AccountingDataSync: entryRows: List[Dict[str, Any]] = [] lineRows: List[Dict[str, Any]] = [] + oldestDate: Optional[str] = None + newestDate: Optional[str] = None for raw in rawEntries: entryId = str(_uuid.uuid4()) + bookingDate = raw.get("bookingDate") + if bookingDate: + normalized = str(bookingDate).split("T")[0][:10] + if normalized: + if oldestDate is None or normalized < oldestDate: + oldestDate = normalized + if newestDate is None or normalized > newestDate: + newestDate = normalized entryRows.append({ "id": entryId, "externalId": raw.get("externalId"), - "bookingDate": raw.get("bookingDate"), + "bookingDate": bookingDate, "reference": raw.get("reference"), "description": raw.get("description", ""), "currency": raw.get("currency", "CHF"), @@ -363,9 +385,10 @@ class AccountingDataSync: linesCount = self._bulkCreate(modelLine, lineRows) logger.info( f"Persisted {entriesCount} entries + {linesCount} lines for " - f"{featureInstanceId} in {time.time() - t0:.1f}s" + f"{featureInstanceId} in {time.time() - t0:.1f}s " + f"(window: {oldestDate or '?'} .. {newestDate or '?'})" ) - return entriesCount, linesCount + return entriesCount, linesCount, oldestDate, newestDate def _persistContacts(self, customers: list, vendors: list, scope: Dict[str, Any], featureInstanceId: str, modelContact: Type) -> int: diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py index 79a61d77..9e372099 100644 --- a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py +++ b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py @@ -437,7 +437,10 @@ class AccountingConnectorRma(BaseAccountingConnector): "creditAmount": credit, "description": desc, }) - entry["totalAmount"] += max(debit, credit) + # Booking total = sum of debits (== sum of credits for a balanced + # booking). Summing max(debit, credit) per line would double-count + # a balanced 2-line booking (200 instead of 100). + entry["totalAmount"] += debit return list(entriesByRef.values()) except Exception as e: @@ -494,7 +497,9 @@ class AccountingConnectorRma(BaseAccountingConnector): "creditAmount": credit, "description": t.get("memo", ""), }) - totalAmt += max(debit, credit) + # Sum debits only -- equals sum of credits for a balanced + # booking. max(debit, credit) per line would double-count. + totalAmt += debit entries.append({ "externalId": str(batch.get("id", ref)), diff --git a/modules/features/trustee/datamodelFeatureTrustee.py b/modules/features/trustee/datamodelFeatureTrustee.py index 265227a0..a87f6f55 100644 --- a/modules/features/trustee/datamodelFeatureTrustee.py +++ b/modules/features/trustee/datamodelFeatureTrustee.py @@ -3,7 +3,7 @@ """Trustee models: TrusteeOrganisation, TrusteeRole, TrusteeAccess, TrusteeContract, TrusteeDocument, TrusteePosition.""" from enum import Enum -from typing import Optional, Dict +from typing import Optional, Dict, Any from pydantic import BaseModel, Field from modules.datamodels.datamodelBase import PowerOnModel @@ -832,7 +832,7 @@ class TrusteeAccountingConfig(PowerOnModel): lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error", json_schema_extra={"label": "Fehlermeldung"}) lastSyncDateFrom: Optional[str] = Field(default=None, description="dateFrom (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster von"}) lastSyncDateTo: Optional[str] = Field(default=None, description="dateTo (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster bis"}) - lastSyncCounts: Optional[Dict[str, int]] = Field(default=None, description="Per-entity counts of the last import (accounts, journalEntries, journalLines, contacts, accountBalances)", json_schema_extra={"label": "Letzte Import-Zaehler"}) + lastSyncCounts: Optional[Dict[str, Any]] = Field(default=None, description="Last import summary: per-entity counts (accounts, journalEntries, journalLines, contacts, accountBalances) plus oldestBookingDate / newestBookingDate (ISO YYYY-MM-DD) for completeness verification", json_schema_extra={"label": "Letzte Import-Zaehler"}) cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})", json_schema_extra={"label": "Cached Kontoplan"}) chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt"}) mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py index d040c37d..fbdd0966 100644 --- a/modules/features/trustee/routeFeatureTrustee.py +++ b/modules/features/trustee/routeFeatureTrustee.py @@ -1864,13 +1864,87 @@ def clear_ai_data_cache( instanceId: str = Path(..., description="Feature Instance ID"), context: RequestContext = Depends(getRequestContext), ) -> Dict[str, Any]: - """Clear the AI feature-data query cache for this instance so the next AI query reads fresh DB data.""" + """Clear ONLY the AI feature-data query result cache (in-memory, ~5 min TTL). + + Important: this does NOT touch the synchronised ``TrusteeData*`` tables. + The synced rows (chart of accounts, journal entries/lines, contacts, balances) + stay exactly as imported. To wipe those rows, use POST .../wipe-imported-data. + """ _validateInstanceAccess(instanceId, context) from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache removed = clearFeatureQueryCache(instanceId) return {"cleared": removed, "featureInstanceId": instanceId} +@router.post("/{instanceId}/accounting/wipe-imported-data") +@limiter.limit("3/minute") +def wipe_imported_accounting_data( + request: Request, + instanceId: str = Path(..., description="Feature Instance ID"), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, Any]: + """Delete every ``TrusteeData*`` row imported for this feature instance. + + Use when the source system was changed, test data needs to be cleared, or + the user suspects stale rows from earlier connector versions. Also resets + the ``lastSync*`` markers on the active config so the UI no longer reports + a stale "letzter Import" status. The connector configuration / credentials + remain untouched -- only synchronised payload data is removed. + """ + mandateId = _validateInstanceAccess(instanceId, context) + interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) + from .datamodelFeatureTrustee import ( + TrusteeDataAccount, TrusteeDataJournalEntry, TrusteeDataJournalLine, + TrusteeDataContact, TrusteeDataAccountBalance, TrusteeAccountingConfig, + ) + from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache + + removed: Dict[str, int] = {} + for tableName, model in [ + ("accounts", TrusteeDataAccount), + ("journalEntries", TrusteeDataJournalEntry), + ("journalLines", TrusteeDataJournalLine), + ("contacts", TrusteeDataContact), + ("accountBalances", TrusteeDataAccountBalance), + ]: + try: + removed[tableName] = int(interface.db.recordDeleteWhere(model, {"featureInstanceId": instanceId}) or 0) + except Exception as ex: + logger.warning("wipeImportedData: failed for %s: %s", tableName, ex) + removed[tableName] = 0 + + cfgRecords = interface.db.getRecordset( + TrusteeAccountingConfig, + recordFilter={"featureInstanceId": instanceId, "isActive": True}, + ) + if cfgRecords: + cfgId = cfgRecords[0].get("id") + if cfgId: + try: + interface.db.recordModify(TrusteeAccountingConfig, cfgId, { + "lastSyncAt": None, + "lastSyncStatus": None, + "lastSyncErrorMessage": None, + "lastSyncDateFrom": None, + "lastSyncDateTo": None, + "lastSyncCounts": None, + }) + except Exception as ex: + logger.warning("wipeImportedData: failed to reset lastSync* on cfg %s: %s", cfgId, ex) + + cacheCleared = clearFeatureQueryCache(instanceId) + logger.info( + "wipeImportedData instance=%s removed=%s cacheCleared=%s", + instanceId, removed, cacheCleared, + ) + return { + "removed": removed, + "totalRemoved": sum(removed.values()), + "cacheCleared": cacheCleared, + "featureInstanceId": instanceId, + } + + # ===== Data Export ===== @router.get("/{instanceId}/accounting/export-data") diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index e9d78e55..3e8bf4ea 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -210,7 +210,18 @@ def _buildSystemTemplates(): "nodes": [ {"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Täglicher Check", "parameters": {}}, {"id": "n2", "type": "email.checkEmail", "x": 300, "y": 200, "title": "Mailbox prüfen", "parameters": {}}, - {"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro E-Mail", "parameters": {}}, + { + "id": "n3", + "type": "flow.loop", + "x": 550, + "y": 200, + "title": "Pro E-Mail", + "parameters": { + "items": {"type": "ref", "nodeId": "n2", "path": ["emails"]}, + "level": "auto", + "concurrency": 1, + }, + }, {"id": "n4", "type": "ai.prompt", "x": 800, "y": 200, "title": "Analyse: Antwort nötig?", "parameters": {}}, {"id": "n5", "type": "flow.ifElse", "x": 1050, "y": 200, "title": "Antwort nötig?", "parameters": {}}, {"id": "n6", "type": "ai.prompt", "x": 1300, "y": 100, "title": "Kontext abrufen & Antwort formulieren", "parameters": {}}, @@ -239,7 +250,18 @@ def _buildSystemTemplates(): "nodes": [ {"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Geplanter Import", "parameters": {}}, {"id": "n2", "type": "sharepoint.listFiles", "x": 300, "y": 200, "title": "SharePoint Ordner lesen", "parameters": {}}, - {"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro Dokument", "parameters": {}}, + { + "id": "n3", + "type": "flow.loop", + "x": 550, + "y": 200, + "title": "Pro Dokument", + "parameters": { + "items": {"type": "ref", "nodeId": "n2", "path": ["files"]}, + "level": "auto", + "concurrency": 1, + }, + }, {"id": "n4", "type": "sharepoint.readFile", "x": 800, "y": 200, "title": "PDF-Inhalt lesen", "parameters": {}}, {"id": "n5", "type": "ai.prompt", "x": 1050, "y": 200, "title": "Typ klassifizieren (Rechnung, Beleg, Bankauszug, Vertrag, etc.)", "parameters": {}}, {"id": "n6", "type": "trustee.extractFromFiles", "x": 1300, "y": 200, "title": "Dokument extrahieren", "parameters": {}}, diff --git a/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py b/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py index 0026fa23..e0b5cb43 100644 --- a/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py +++ b/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py @@ -3,7 +3,7 @@ """ActionToolAdapter: wraps existing workflow actions (dynamicMode=True) as agent tools.""" import logging -from typing import Dict, Any, List, Optional +from typing import Dict, Any, List from modules.serviceCenter.services.serviceAgent.datamodelAgent import ( ToolDefinition, ToolResult @@ -70,22 +70,28 @@ def _buildToolDefinition(compoundName: str, actionDef, actionInfo: Dict[str, Any def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]: - """Convert workflow action parameter schema to JSON Schema for tool definitions.""" - properties = {} - required = [] + """Convert workflow action parameter schema to JSON Schema for tool definitions. + + Schicht-3 Adapter (typed): looks up each parameter's `type` against the + PORT_TYPE_CATALOG and produces a strict JSON Schema fragment. + Falls back to a generic string schema only when the type is fully unknown + (which should never happen after Phase 2's signature validator). + """ + properties: Dict[str, Any] = {} + required: List[str] = [] for paramName, paramInfo in actionParams.items(): - paramType = paramInfo.get("type", "str") if isinstance(paramInfo, dict) else "str" - paramDesc = paramInfo.get("description", "") if isinstance(paramInfo, dict) else "" - paramRequired = paramInfo.get("required", False) if isinstance(paramInfo, dict) else False + if not isinstance(paramInfo, dict): + properties[paramName] = {"type": "string", "description": ""} + continue - jsonType = _pythonTypeToJsonType(paramType) - prop: Dict[str, Any] = { - "type": jsonType, - "description": paramDesc, - } - if jsonType == "array": - prop["items"] = _pythonTypeToArrayItems(paramType) or {"type": "string"} + paramType = paramInfo.get("type", "str") + paramDesc = paramInfo.get("description", "") or "" + paramRequired = bool(paramInfo.get("required", False)) + + prop = _catalogTypeToJsonSchema(paramType) + if paramDesc: + prop["description"] = paramDesc properties[paramName] = prop if paramRequired: @@ -94,41 +100,90 @@ def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]: return { "type": "object", "properties": properties, - "required": required + "required": required, } -_TYPE_MAPPING = { +# Primitive Python type strings → JSON Schema scalar types. +_PRIMITIVE_JSON_TYPE: Dict[str, str] = { "str": "string", "int": "integer", "float": "number", "bool": "boolean", - "list": "array", - "dict": "object", - "List[str]": "array", - "List[int]": "array", - "List[dict]": "array", - "List[float]": "array", - "Dict[str, Any]": "object", -} - -_ARRAY_ITEMS_MAPPING = { - "list": {"type": "string"}, - "List[str]": {"type": "string"}, - "List[int]": {"type": "integer"}, - "List[float]": {"type": "number"}, - "List[dict]": {"type": "object"}, } -def _pythonTypeToJsonType(pythonType: str) -> str: - """Map Python type strings to JSON Schema types.""" - return _TYPE_MAPPING.get(pythonType, "string") +def _catalogTypeToJsonSchema(typeStr: str, _depth: int = 0) -> Dict[str, Any]: + """Recursively convert a PORT_TYPE_CATALOG type reference into a JSON Schema fragment. + Supports: + - Primitives (str/int/bool/float/Any) + - Catalog object schemas (recursively expanded with properties/required) + - List[X] (array with typed items) + - Dict[K, V] (object with typed additionalProperties) -def _pythonTypeToArrayItems(pythonType: str) -> Optional[Dict[str, Any]]: - """Return the JSON Schema `items` descriptor for array types, or None.""" - return _ARRAY_ITEMS_MAPPING.get(pythonType) + `_depth` guards against pathological recursion in case of a cyclic catalog. + """ + from modules.features.graphicalEditor.portTypes import ( + PORT_TYPE_CATALOG, + PRIMITIVE_TYPES, + ) + + if _depth > 6: + return {"type": "object", "description": "(max-depth)"} + + if not typeStr or not isinstance(typeStr, str): + return {"type": "string"} + + typeStr = typeStr.strip() + + if typeStr in _PRIMITIVE_JSON_TYPE: + return {"type": _PRIMITIVE_JSON_TYPE[typeStr]} + if typeStr == "Any": + return {} + + if typeStr.startswith("List[") and typeStr.endswith("]"): + inner = typeStr[5:-1].strip() + return {"type": "array", "items": _catalogTypeToJsonSchema(inner, _depth + 1)} + + if typeStr.startswith("Dict[") and typeStr.endswith("]"): + inner = typeStr[5:-1].strip() + valueType = "Any" + parts = [p.strip() for p in inner.split(",", 1)] + if len(parts) == 2: + valueType = parts[1] + return { + "type": "object", + "additionalProperties": _catalogTypeToJsonSchema(valueType, _depth + 1), + } + + schema = PORT_TYPE_CATALOG.get(typeStr) + if schema is not None: + props: Dict[str, Any] = {} + required: List[str] = [] + for f in schema.fields: + fragment = _catalogTypeToJsonSchema(f.type, _depth + 1) + if f.description: + fragment["description"] = f.description + if f.enumValues: + fragment["enum"] = list(f.enumValues) + props[f.name] = fragment + if f.required: + required.append(f.name) + out: Dict[str, Any] = { + "type": "object", + "properties": props, + "description": f"PORT_TYPE_CATALOG schema '{schema.name}'", + } + if required: + out["required"] = required + return out + + # Lowercase 'list' / 'dict' aliases (legacy, should be eradicated by Phase 2 validator) + if typeStr in PRIMITIVE_TYPES and typeStr in {"List", "Dict"}: + return {"type": "array" if typeStr == "List" else "object"} + + return {"type": "string", "description": f"unknown type '{typeStr}' (defaulted to string)"} def _createDispatchHandler(actionExecutor, methodName: str, actionName: str): diff --git a/modules/serviceCenter/services/serviceAgent/workflowTools.py b/modules/serviceCenter/services/serviceAgent/workflowTools.py index 34ca5d46..7f01ee79 100644 --- a/modules/serviceCenter/services/serviceAgent/workflowTools.py +++ b/modules/serviceCenter/services/serviceAgent/workflowTools.py @@ -291,6 +291,85 @@ async def _setNodeParameter(params: Dict[str, Any], context: Any) -> ToolResult: return _err(name, str(e)) +async def _list_upstream_paths(params: Dict[str, Any], context: Any) -> ToolResult: + """List pickable upstream DataRef paths for a node (saved workflow graph).""" + name = "listUpstreamPaths" + try: + workflow_id, instance_id = _resolveIds(params, context) + node_id = params.get("nodeId") + if not workflow_id or not instance_id or not node_id: + return _err(name, "workflowId, instanceId, and nodeId required") + + iface = _getInterface(context, instance_id) + wf = iface.getWorkflow(workflow_id) + if not wf: + return _err(name, f"Workflow {workflow_id} not found") + + graph = wf.get("graph", {}) or {} + from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths + + paths = compute_upstream_paths(graph if isinstance(graph, dict) else {}, str(node_id)) + return _ok(name, {"paths": paths}) + except Exception as e: + logger.exception("listUpstreamPaths failed: %s", e) + return _err(name, str(e)) + + +async def _bind_node_parameter(params: Dict[str, Any], context: Any) -> ToolResult: + """Bind a node parameter to an upstream field via an explicit DataRef.""" + name = "bindNodeParameter" + try: + workflow_id, instance_id = _resolveIds(params, context) + node_id = params.get("nodeId") + param_name = params.get("parameterName") + producer_node_id = params.get("producerNodeId") + path = params.get("path") + if not workflow_id or not instance_id or not node_id or not param_name: + return _err(name, "workflowId, instanceId, nodeId, and parameterName required") + if not producer_node_id: + return _err(name, "producerNodeId required") + + iface = _getInterface(context, instance_id) + wf = iface.getWorkflow(workflow_id) + if not wf: + return _err(name, f"Workflow {workflow_id} not found") + + graph = dict(wf.get("graph", {}) or {}) + nodes = list(graph.get("nodes", []) or []) + found = False + ref: Dict[str, Any] = { + "type": "ref", + "nodeId": str(producer_node_id), + "path": list(path) if isinstance(path, (list, tuple)) else [], + } + exp_type = params.get("expectedType") + if exp_type: + ref["expectedType"] = str(exp_type) + + for n in nodes: + if n.get("id") == node_id: + node_params = dict(n.get("parameters", {}) or {}) + node_params[param_name] = ref + n["parameters"] = node_params + found = True + break + + if not found: + return _err(name, f"Node {node_id} not found in graph") + + graph["nodes"] = nodes + iface.updateWorkflow(workflow_id, {"graph": graph}) + return _ok(name, { + "nodeId": node_id, + "parameter": param_name, + "dataRef": ref, + "message": f"Parameter '{param_name}' bound to upstream {producer_node_id}", + }) + except Exception as e: + logger.exception("bindNodeParameter failed: %s", e) + return _err(name, str(e)) + + def _coerceLabel(rawLabel: Any, fallback: str) -> str: """Normalize a node label which may be a string, dict {locale: str}, or other.""" if isinstance(rawLabel, str): @@ -950,6 +1029,45 @@ def getWorkflowToolDefinitions() -> List[Dict[str, Any]]: }, "toolSet": TOOLBOX_ID, }, + { + "name": "listUpstreamPaths", + "handler": _list_upstream_paths, + "description": ( + "List pickable upstream paths for binding DataRefs on a node. " + "Call after readWorkflowGraph; use with bindNodeParameter instead of relying on implicit wiring." + ), + "parameters": { + "type": "object", + "properties": { + **_idFields, + "nodeId": {"type": "string", "description": "Target node id (the node whose parameters you will bind)"}, + }, + "required": ["nodeId"], + }, + "readOnly": True, + "toolSet": TOOLBOX_ID, + }, + { + "name": "bindNodeParameter", + "handler": _bind_node_parameter, + "description": ( + "Bind a parameter to an upstream output using an explicit DataRef " + "(producerNodeId + path). Prefer listUpstreamPaths to discover valid paths." + ), + "parameters": { + "type": "object", + "properties": { + **_idFields, + "nodeId": {"type": "string"}, + "parameterName": {"type": "string"}, + "producerNodeId": {"type": "string", "description": "Upstream node id (port 0 producer)"}, + "path": {"type": "array", "items": {}, "description": "JSON path segments, e.g. [\"documents\"] or [\"id\"]"}, + "expectedType": {"type": "string", "description": "Optional type hint stored on the ref"}, + }, + "required": ["nodeId", "parameterName", "producerNodeId"], + }, + "toolSet": TOOLBOX_ID, + }, { "name": "listAvailableNodeTypes", "handler": _listAvailableNodeTypes, diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index cd7de3e9..6428bed3 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -520,7 +520,28 @@ STOP-ERKENNUNG: Wenn jemand dich bittet aufzuhoeren, still zu sein, zu stoppen, oder nicht mehr zu reden (in JEDER Sprache, z.B. "{botFirstName} stop", "{botFirstName} sei still", "{botFirstName} halt", "{botFirstName} be quiet", "{botFirstName} shut up", "{botFirstName} arrete", etc.), dann setze detectedIntent auf "stop" und -shouldRespond auf false. Du musst NICHT antworten wenn jemand dich stoppt.""" +shouldRespond auf false. Du musst NICHT antworten wenn jemand dich stoppt. + +AGENT-ESKALATION (needsAgent): +Du bist ein SCHNELLER Reflex-Pfad. Fuer komplexe Aufgaben gibt es einen vollwertigen Agent +mit Web-Recherche, E-Mail-Versand, Dokumenten-Erzeugung und Datenquellen-Zugriff +(SharePoint, Outlook, GoogleDrive etc. via User-Connections). + +Setze "needsAgent": true und "agentReason": "" +WENN die Aufgabe eines oder mehrere dieser Merkmale hat: +- Recherche im Internet noetig (z.B. "recherchier was im Internet ueber XY", "schau mal nach", "google das") +- E-Mail an Teilnehmer/Kontakte versenden +- Dokument (PDF, Word, Excel) generieren oder im SharePoint/Drive ablegen +- Mehrere Schritte oder Tool-Aufrufe noetig (Zusammenfassung + Versand, Recherche + Empfehlung etc.) +- Daten aus externen Quellen abrufen (Outlook-Kontakte, SharePoint-Dateien, Kalender etc.) + +Wenn needsAgent=true: +- Setze shouldRespond=false (der Agent uebernimmt; du sprichst NICHT eigenstaendig). +- responseText kann eine kurze Bestaetigung sein, wird aber nicht ausgesprochen. +- agentReason ist die Aufgabenbeschreibung fuer den Agent (klar, in einer Zeile). + +Wenn die Aufgabe einfach ist (Definition, Wissensfrage aus eigenem Wissen, kurze Meinung, +Wiedergabe von vorhandenem Kontext), erledige sie SELBST mit shouldRespond=true und needsAgent=false.""" # Append user-configured instructions if provided if userSystemPrompt and userSystemPrompt.strip(): @@ -546,7 +567,9 @@ WICHTIG: Antworte IMMER als valides JSON in exakt diesem Format: "responseChannels": optional - ["voice"], ["chat"] oder ["voice","chat"] je nach User-Anfrage, "reasoning": "Kurze Begruendung deiner Entscheidung", "detectedIntent": "addressed" | "question" | "proactive" | "stop" | "none", - "commands": [] oder null + "commands": [] oder null, + "needsAgent": false (true nur bei komplexen Aufgaben gemaess Eskalations-Regeln), + "agentReason": null (oder kurze Aufgabenbeschreibung wenn needsAgent=true) }} detectedIntent-Werte: diff --git a/modules/workflows/automation2/executionEngine.py b/modules/workflows/automation2/executionEngine.py index 92615062..b5b5b754 100644 --- a/modules/workflows/automation2/executionEngine.py +++ b/modules/workflows/automation2/executionEngine.py @@ -77,7 +77,11 @@ def _outputSchemaForNode(nodeType: str) -> Optional[str]: if isinstance(ports, dict): p0 = ports.get(0) or ports.get("0") if isinstance(p0, dict): - return p0.get("schema") + spec = p0.get("schema") + if isinstance(spec, dict) and spec.get("kind") == "fromGraph": + return "FormPayload" + if isinstance(spec, str): + return spec return None @@ -329,6 +333,15 @@ async def executeGraph( ) from modules.workflows.processing.shared.methodDiscovery import discoverMethods discoverMethods(services) + from modules.workflows.automation2.pickNotPushMigration import materializeConnectionRefs + from modules.workflows.automation2.featureInstanceRefMigration import ( + materializeFeatureInstanceRefs, + ) + + # Phase-5 Schicht-4: typed-ref envelopes are materialized FIRST so the + # subsequent connection-ref pass and validation see the canonical shape. + graph = materializeFeatureInstanceRefs(graph) + graph = materializeConnectionRefs(graph) nodeTypeIds = _getNodeTypeIds(services) logger.debug("executeGraph nodeTypeIds (%d): %s", len(nodeTypeIds), sorted(nodeTypeIds)) errors = validateGraph(graph, nodeTypeIds) diff --git a/modules/workflows/automation2/executors/actionNodeExecutor.py b/modules/workflows/automation2/executors/actionNodeExecutor.py index 31cfc39c..d9fc99a7 100644 --- a/modules/workflows/automation2/executors/actionNodeExecutor.py +++ b/modules/workflows/automation2/executors/actionNodeExecutor.py @@ -1,19 +1,17 @@ # Copyright (c) 2025 Patrick Motsch # Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions. # -# Typed Port System: no heuristic merging. Uses INPUT_EXTRACTORS for wire-handover, -# DataRef for explicit parameter mapping, and _normalizeToSchema for output normalization. +# Typed Port System: explicit DataRefs / static parameters only (no runtime wire-handover). +# ``materializeConnectionRefs`` (see pickNotPushMigration) may still rewrite empty connectionReference at run start. import json import logging import re -from typing import Dict, Any, Optional +from typing import Any, Dict, Optional from modules.features.graphicalEditor.portTypes import ( - INPUT_EXTRACTORS, - _normalizeToSchema, _normalizeError, - _unwrapTransit, + _normalizeToSchema, ) from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionInactiveException as _SubscriptionInactiveException from modules.serviceCenter.services.serviceBilling.mainServiceBilling import BillingContextError as _BillingContextError @@ -119,6 +117,63 @@ def _buildSearchQuery( return " ".join(parts) if parts else "*" +def _buildConnectionRefDict(connRef: str, chatService, services) -> Optional[Dict[str, Any]]: + """ + Build {id, authority, label} for node outputs (no secrets). + connRef may be UUID or logical connection:authority:user. + """ + if not connRef or not isinstance(connRef, str): + return None + original_ref = connRef.strip() + ref = original_ref + if _isUserConnectionId(ref): + resolved = _resolveConnectionIdToReference(chatService, ref, services) + if resolved: + ref = resolved + if not ref.startswith("connection:"): + return None + parts = ref.split(":", 2) + authority = parts[1] if len(parts) > 1 else "" + user = parts[2] if len(parts) > 2 else "" + label = ref + conn_id = "" + if chatService: + try: + for c in chatService.getUserConnections() or []: + conn = c if isinstance(c, dict) else (c.model_dump() if hasattr(c, "model_dump") else {}) + aid = conn.get("authority", "") + if hasattr(aid, "value"): + aid = aid.value + un = conn.get("externalUsername", "") or conn.get("externalId", "") or "" + logical = f"connection:{aid}:{un}" + if logical == ref or str(conn.get("id")) == original_ref: + conn_id = str(conn.get("id", "") or "") + break + except Exception as e: + logger.debug("_buildConnectionRefDict: getUserConnections: %s", e) + return {"id": conn_id, "authority": authority, "label": label or f"{authority}:{user}"} + + +def _attachConnectionProvenance( + out: Dict[str, Any], + resolvedParams: Dict[str, Any], + outputSchema: str, + chatService, + services, +) -> None: + """Mutates out to include connection provenance for typed list/draft outputs.""" + if out.get("connection"): + return + cref = resolvedParams.get("connectionReference") + if not cref: + return + if outputSchema not in ("FileList", "DocumentList", "EmailList", "TaskList", "EmailDraft", "UdmDocument"): + return + payload = _buildConnectionRefDict(str(cref), chatService, services) + if payload: + out["connection"] = payload + + def _resolveConnectionParam(params: Dict, chatService, services) -> None: """Resolve connectionReference if it looks like a UUID (UserConnection.id).""" connRef = params.get("connectionReference") @@ -157,45 +212,6 @@ def _applyEmailSearchQuery(params: Dict) -> None: params.pop(k, None) -def _wireHandover(nodeDef: Dict, inputSources: Dict, nodeOutputs: Dict, params: Dict) -> None: - """Apply wire-handover: extract fields from upstream using INPUT_EXTRACTORS.""" - if 0 not in inputSources: - logger.debug("_wireHandover: no port 0 in inputSources=%s", inputSources) - return - srcId, _ = inputSources[0] - upstream = nodeOutputs.get(srcId) - if not upstream or not isinstance(upstream, dict): - logger.debug("_wireHandover: upstream for %s is missing or not dict: %s", srcId, type(upstream)) - return - - data = _unwrapTransit(upstream) - if not isinstance(data, dict): - logger.debug("_wireHandover: unwrapped data is not dict: %s", type(data)) - return - - inputPorts = nodeDef.get("inputPorts", {}) - port0 = inputPorts.get(0, {}) - accepts = port0.get("accepts", []) - logger.debug("_wireHandover: srcId=%s accepts=%s upstream_keys=%s params_keys_before=%s", srcId, accepts, list(data.keys()), list(params.keys())) - - for schemaName in accepts: - if schemaName == "Transit": - continue - extractor = INPUT_EXTRACTORS.get(schemaName) - if extractor: - extracted = extractor(data) - logger.debug("_wireHandover: extractor %s returned keys=%s", schemaName, list(extracted.keys()) if extracted else None) - if extracted: - for k, v in extracted.items(): - existing = params.get(k) - if not existing: - params[k] = v - logger.debug("_wireHandover: set %s (was empty/missing) type=%s len=%s", k, type(v).__name__, len(v) if isinstance(v, (list, str, dict)) else "n/a") - else: - logger.debug("_wireHandover: skip %s (already has value, type=%s)", k, type(existing).__name__) - return - - def _getOutputSchemaName(nodeDef: Dict) -> str: """Get the output schema name from the node definition.""" outputPorts = nodeDef.get("outputPorts", {}) @@ -238,22 +254,17 @@ class ActionNodeExecutor: resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {})) logger.debug("ActionNodeExecutor node %s resolved params keys=%s documentList_present=%s documentList_type=%s", nodeId, list(resolvedParams.keys()), "documentList" in resolvedParams, type(resolvedParams.get("documentList")).__name__) - # 2. Wire-handover via extractors (fills missing params from upstream) - inputSources = context.get("inputSources", {}).get(nodeId, {}) - _wireHandover(nodeDef, inputSources, context.get("nodeOutputs", {}), resolvedParams) - logger.debug("ActionNodeExecutor node %s after wireHandover: params keys=%s documentList_present=%s documentList_type=%s", nodeId, list(resolvedParams.keys()), "documentList" in resolvedParams, type(resolvedParams.get("documentList")).__name__) - - # 3. Apply defaults from parameter definitions + # 2. Apply defaults from parameter definitions for pDef in nodeDef.get("parameters", []): pName = pDef.get("name") if pName and pName not in resolvedParams and "default" in pDef: resolvedParams[pName] = pDef["default"] - # 4. Resolve connectionReference + # 3. Resolve connectionReference chatService = getattr(self.services, "chat", None) _resolveConnectionParam(resolvedParams, chatService, self.services) - # 5. Node-type-specific param transformations + # 4. Node-type-specific param transformations if nodeType == "email.checkEmail": _applyEmailCheckFilter(resolvedParams) elif nodeType == "email.searchEmail": @@ -262,7 +273,7 @@ class ActionNodeExecutor: from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries merge_clickup_task_update_entries(resolvedParams) - # 6. email.checkEmail pause for email wait + # 5. email.checkEmail pause for email wait if nodeType == "email.checkEmail": runId = context.get("_runId") workflowId = context.get("workflowId") @@ -277,12 +288,12 @@ class ActionNodeExecutor: } raise PauseForEmailWaitError(runId=runId, nodeId=nodeId, waitConfig=waitConfig) - # 7. AI nodes: normalize legacy "prompt" -> "aiPrompt" + # 6. AI nodes: normalize legacy "prompt" -> "aiPrompt" if nodeType == "ai.prompt": if "aiPrompt" not in resolvedParams and "prompt" in resolvedParams: resolvedParams["aiPrompt"] = resolvedParams.pop("prompt") - # 8. Build context for email.draftEmail from subject + body + # 7. Build context for email.draftEmail from subject + body if nodeType == "email.draftEmail": subject = resolvedParams.get("subject", "") body = resolvedParams.get("body", "") @@ -296,7 +307,7 @@ class ActionNodeExecutor: resolvedParams.pop("subject", None) resolvedParams.pop("body", None) - # 9. Execute action + # 8. Execute action logger.info("ActionNodeExecutor node %s calling %s.%s with %d params", nodeId, methodName, actionName, len(resolvedParams)) try: executor = ActionExecutor(self.services) @@ -307,7 +318,7 @@ class ActionNodeExecutor: logger.exception("ActionNodeExecutor node %s FAILED: %s", nodeId, e) return _normalizeError(e, outputSchema) - # 10. Persist generated documents as files and build JSON-safe output + # 9. Persist generated documents as files and build JSON-safe output docsList = [] for d in (result.documents or []): dumped = d.model_dump() if hasattr(d, "model_dump") else dict(d) if isinstance(d, dict) else d @@ -360,7 +371,6 @@ class ActionNodeExecutor: "success": result.success, "error": result.error, "documents": docsList, - "documentList": docsList, "data": dataField, } @@ -396,6 +406,8 @@ class ActionNodeExecutor: "mode": data_dict.get("mode", resolvedParams.get("mode", "summarize")), "count": int(data_dict.get("count", 0)), } + _attachConnectionProvenance(cr_out, resolvedParams, outputSchema, chatService, self.services) return _normalizeToSchema(cr_out, outputSchema) + _attachConnectionProvenance(out, resolvedParams, outputSchema, chatService, self.services) return _normalizeToSchema(out, outputSchema) diff --git a/modules/workflows/automation2/executors/dataExecutor.py b/modules/workflows/automation2/executors/dataExecutor.py index 26334dd0..5a33f9e2 100644 --- a/modules/workflows/automation2/executors/dataExecutor.py +++ b/modules/workflows/automation2/executors/dataExecutor.py @@ -1,5 +1,5 @@ # Copyright (c) 2025 Patrick Motsch -# Data manipulation node executor: data.aggregate, data.transform, data.filter. +# Data manipulation node executor: data.aggregate, data.filter, data.consolidate. import logging from typing import Any, Dict @@ -10,7 +10,7 @@ logger = logging.getLogger(__name__) class DataExecutor: - """Execute data.aggregate, data.transform, data.filter nodes.""" + """Execute data.aggregate, data.filter, data.consolidate nodes.""" async def execute( self, @@ -26,8 +26,6 @@ class DataExecutor: if nodeType == "data.aggregate": return await self._aggregate(node, nodeOutputs, nodeId, inputSources, context) - if nodeType == "data.transform": - return await self._transform(node, nodeOutputs, nodeId, inputSources) if nodeType == "data.filter": return await self._filter(node, nodeOutputs, nodeId, inputSources) if nodeType == "data.consolidate": @@ -70,41 +68,6 @@ class DataExecutor: return {"items": items, "count": len(items), "_success": True} - async def _transform( - self, - node: Dict, - nodeOutputs: Dict, - nodeId: str, - inputSources: Dict, - ) -> Any: - """Apply mappings to restructure data.""" - from modules.workflows.automation2.graphUtils import resolveParameterReferences - - inp = self._getInput(inputSources, nodeOutputs) - data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp - mappings = (node.get("parameters") or {}).get("mappings", []) - - result = {} - for mapping in mappings: - if not isinstance(mapping, dict): - continue - outputField = mapping.get("outputField") - if not outputField: - continue - source = mapping.get("source") - if source and isinstance(source, dict) and source.get("type") == "ref": - resolved = resolveParameterReferences(source, nodeOutputs) - result[outputField] = resolved - elif source and isinstance(source, dict) and source.get("type") == "value": - result[outputField] = source.get("value") - elif isinstance(data, dict) and mapping.get("sourceField"): - result[outputField] = data.get(mapping["sourceField"]) - else: - result[outputField] = source - - result["_success"] = True - return result - async def _filter( self, node: Dict, diff --git a/modules/workflows/automation2/featureInstanceRefMigration.py b/modules/workflows/automation2/featureInstanceRefMigration.py new file mode 100644 index 00000000..b4fba529 --- /dev/null +++ b/modules/workflows/automation2/featureInstanceRefMigration.py @@ -0,0 +1,159 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Phase-5 Schicht-4 migration: convert raw ``featureInstanceId: ""`` workflow +parameters into typed ``FeatureInstanceRef`` envelopes on disk. + +Why +--- +The Typed Action Architecture (see +``wiki/c-work/1-plan/2026-04-typed-action-architecture.md``) declares +``featureInstanceId`` as ``FeatureInstanceRef`` (a catalog-typed reference with +a ``featureCode`` discriminator). Older workflows persist this parameter as a +plain UUID string, which carries no type information and forces every action / +adapter to re-derive the feature code from the node type. + +What this module does +--------------------- +``materializeFeatureInstanceRefs(graph)`` walks every node, and whenever a +node parameter named ``featureInstanceId`` is a non-empty string (raw UUID), +it rewrites the value to a typed envelope:: + + {"$type": "FeatureInstanceRef", + "id": "", + "featureCode": ""} + +The runtime resolver (``graphUtils._unwrapTypedRef``) automatically unwraps +that envelope back to the canonical primitive (``id``) when feeding action +implementations, so legacy action code keeps working unchanged. + +Idempotent +---------- +Already-migrated values (already-envelope dicts, empty strings, ``None``) are +left untouched. Running the migration twice is a no-op. + +Out of scope +------------ +The runtime helper ``pickNotPushMigration.materializeConnectionRefs`` solves a +related but different problem (resolving empty ``connectionReference`` to +upstream DataRefs at run-start). Both helpers compose: the typical +``executeGraph`` pipeline is + + raw graph + -> materializeFeatureInstanceRefs (this module, on save / on load) + -> materializeConnectionRefs (pickNotPushMigration, at run-start) + -> ActionNodeExecutor / ActionExecutor +""" +from __future__ import annotations + +import copy +import logging +from typing import Any, Dict, Optional + +logger = logging.getLogger(__name__) + + +# Single source of truth for node-type → feature code mapping. Keep in sync +# with the method registry; values must be the same string the FeatureInstance +# row uses for its ``featureCode`` column. +_NODE_TYPE_PREFIX_TO_FEATURE_CODE: Dict[str, str] = { + "trustee": "trustee", + "redmine": "redmine", + "clickup": "clickup", + "sharepoint": "sharepoint", + "outlook": "outlook", + "email": "outlook", + "teamsbot": "teamsbot", + "ai": "ai", +} + + +def _deriveFeatureCode(nodeType: str) -> Optional[str]: + """Best-effort feature-code derivation from a node type id. + + Returns ``None`` if the prefix is not in the registry — the migration then + omits ``featureCode`` from the envelope rather than guessing wrongly. + """ + if not nodeType or not isinstance(nodeType, str): + return None + prefix = nodeType.split(".", 1)[0].strip().lower() + return _NODE_TYPE_PREFIX_TO_FEATURE_CODE.get(prefix) + + +def _isAlreadyTypedEnvelope(value: Any) -> bool: + return ( + isinstance(value, dict) + and value.get("$type") == "FeatureInstanceRef" + and isinstance(value.get("id"), str) + ) + + +def _isMigratableUuidValue(value: Any) -> bool: + """A bare non-empty string is treated as a UUID candidate worth migrating. + + We deliberately do NOT enforce a strict UUID regex — historically + workflows have been seen with non-UUID instance ids (e.g. demo seeds). + The migration converts whatever string is there; downstream code already + treats the value as opaque. + """ + return isinstance(value, str) and value.strip() != "" + + +def _buildEnvelope(uuidValue: str, nodeType: str) -> Dict[str, Any]: + envelope: Dict[str, Any] = { + "$type": "FeatureInstanceRef", + "id": uuidValue.strip(), + } + code = _deriveFeatureCode(nodeType) + if code: + envelope["featureCode"] = code + return envelope + + +def materializeFeatureInstanceRefs(graph: Dict[str, Any]) -> Dict[str, Any]: + """Return a deep-copied graph with raw ``featureInstanceId`` strings rewritten + to typed ``FeatureInstanceRef`` envelopes. + + The function never mutates its input. It is safe to call repeatedly + (idempotent) and on partial graphs (missing nodes, missing parameters). + """ + if not isinstance(graph, dict): + return graph + + out = copy.deepcopy(graph) + nodes = out.get("nodes") + if not isinstance(nodes, list): + return out + + migratedCount = 0 + for node in nodes: + if not isinstance(node, dict): + continue + params = node.get("parameters") + if not isinstance(params, dict): + continue + current = params.get("featureInstanceId") + if current is None: + continue + if _isAlreadyTypedEnvelope(current): + continue + if not _isMigratableUuidValue(current): + continue + envelope = _buildEnvelope(current, node.get("type") or "") + params["featureInstanceId"] = envelope + migratedCount += 1 + logger.debug( + "materializeFeatureInstanceRefs: node %s (%s) -> envelope %r", + node.get("id"), + node.get("type"), + envelope, + ) + + if migratedCount: + logger.info( + "materializeFeatureInstanceRefs: migrated %d featureInstanceId value(s)", + migratedCount, + ) + return out + + +__all__ = ["materializeFeatureInstanceRefs"] diff --git a/modules/workflows/automation2/graphUtils.py b/modules/workflows/automation2/graphUtils.py index 1cd2dc3e..1f01a57d 100644 --- a/modules/workflows/automation2/graphUtils.py +++ b/modules/workflows/automation2/graphUtils.py @@ -2,7 +2,7 @@ # Graph parsing, validation, and topological sort for automation2. import logging -from typing import Dict, List, Any, Tuple, Set +from typing import Dict, List, Any, Tuple, Set, Optional logger = logging.getLogger(__name__) @@ -113,10 +113,11 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]: if nid not in nodeIds: errors.append(f"Connection references non-existent node {nid}") - # Soft port compatibility check (warnings, not errors) - warnings = _checkPortCompatibility(nodes, connMap) - if warnings: - logger.info("validateGraph port warnings: %s", warnings) + # Port compatibility: hard-fail (Pick-not-Push typed graph) + port_errors = _checkPortCompatibility(nodes, connMap) + if port_errors: + logger.warning("validateGraph port mismatches: %s", port_errors) + errors.extend(port_errors) if errors: logger.debug("validateGraph errors: %s", errors) @@ -125,19 +126,35 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]: return errors +def parse_graph_defined_schema(node: Dict[str, Any], parameter_key: str) -> Optional[Dict[str, Any]]: + """ + Build a JSON-serializable port schema dict from graph parameters (e.g. form ``fields``). + Used by tooling and future API surfaces; mirrors ``parse_graph_defined_output_schema`` logic. + """ + from modules.features.graphicalEditor.portTypes import _derive_form_payload_schema_from_param + + sch = _derive_form_payload_schema_from_param(node, parameter_key) + if sch is None: + return None + return { + "name": sch.name, + "fields": [f.model_dump() for f in sch.fields], + } + + def _checkPortCompatibility( nodes: List[Dict], connMap: Dict[str, List[Tuple[str, int, int]]], ) -> List[str]: """ - Soft check: warn if connected port types are incompatible. - Returns warnings (never blocks execution). + Hard typed-port check: incompatible connections become validation errors. """ from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES + from modules.features.graphicalEditor.portTypes import resolve_output_schema_name nodeDefMap = {n["id"]: n for n in STATIC_NODE_TYPES} nodeById = {n["id"]: n for n in nodes if n.get("id")} - warnings = [] + warnings: List[str] = [] for tgt, pairs in connMap.items(): tgtNode = nodeById.get(tgt) @@ -156,20 +173,28 @@ def _checkPortCompatibility( if not srcDef: continue srcOutputPorts = srcDef.get("outputPorts", {}) - srcPort = srcOutputPorts.get(srcOut, {}) - tgtPort = tgtInputPorts.get(tgtIn, {}) + srcPort = srcOutputPorts.get(srcOut, {}) or {} + tgtPort = tgtInputPorts.get(tgtIn, {}) or {} - srcSchema = srcPort.get("schema", "") + if not isinstance(srcPort, dict): + continue + src_schema = resolve_output_schema_name(srcNode, srcPort) accepts = tgtPort.get("accepts", []) - if not accepts or not srcSchema: + if not accepts or not src_schema: continue - if "Transit" in accepts: + if src_schema in accepts: continue - if srcSchema not in accepts: - warnings.append( - f"Port mismatch: {src}[out:{srcOut}] ({srcSchema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})" - ) + # Port that only declares Transit behaves as an untyped sink (legacy graphs). + if len(accepts) == 1 and accepts[0] == "Transit": + continue + if src_schema == "FormPayload_dynamic" and "FormPayload" in accepts: + continue + if src_schema.startswith("FormPayload") and "FormPayload" in accepts: + continue + warnings.append( + f"Port mismatch: {src}[out:{srcOut}] ({src_schema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})" + ) return warnings @@ -217,12 +242,35 @@ def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, in return order +_WILDCARD_SEGMENT = "*" + + def _get_by_path(data: Any, path: List[Any]) -> Any: - """Traverse data by path (strings and ints); return None if not found.""" + """Traverse data by path (strings and ints); return None if not found. + + Supports the iteration wildcard ``"*"`` as a path segment: when applied + to a list, the remainder of the path is mapped over each element and the + results are returned as a list (drops elements that resolve to ``None``). + This is the "typed Bindings-Resolver" iteration primitive defined for + Schicht 4 of the Typed Action Architecture. + """ current = data - for seg in path: + for i, seg in enumerate(path): if current is None: return None + if isinstance(seg, str) and seg == _WILDCARD_SEGMENT: + if not isinstance(current, (list, tuple)): + return None + tail = list(path[i + 1 :]) + if not tail: + return list(current) + mapped: List[Any] = [] + for item in current: + resolved = _get_by_path(item, tail) + if resolved is None: + continue + mapped.append(resolved) + return mapped if isinstance(current, dict) and isinstance(seg, str) and seg in current: current = current[seg] elif isinstance(current, (list, tuple)) and isinstance(seg, (int, str)): @@ -236,6 +284,52 @@ def _get_by_path(data: Any, path: List[Any]) -> Any: return current +def _pathContainsWildcard(path: List[Any]) -> bool: + """True if any segment is the iteration wildcard ``"*"``.""" + return any(isinstance(seg, str) and seg == _WILDCARD_SEGMENT for seg in path) + + +# --------------------------------------------------------------------------- +# Phase-5 Schicht-4 — Typed-Ref envelope unwrap +# --------------------------------------------------------------------------- +# +# Workflow params can carry a typed-ref envelope like +# ``{"$type": "FeatureInstanceRef", "id": "", "featureCode": "trustee"}``. +# Action implementations historically receive the canonical primitive (the +# referenced ``id``) as a string. ``_unwrapTypedRef`` extracts that primitive +# without losing the typed envelope shape on disk — the migration script +# (``featureInstanceRefMigration.materializeFeatureInstanceRefs``) writes the +# envelope, the resolver unwraps it on its way to the action. + +_TYPED_REF_PRIMARY_FIELD = { + "FeatureInstanceRef": "id", + "ConnectionRef": "id", + "PromptTemplateRef": "id", + "ClickUpListRef": "listId", + "SharePointFileRef": "filePath", + "SharePointFolderRef": "folderPath", +} + + +def _isTypedRefEnvelope(value: Any) -> bool: + """True if ``value`` looks like a typed-ref envelope ({\"$type\": \"\", ...}).""" + if not isinstance(value, dict): + return False + typeName = value.get("$type") + return isinstance(typeName, str) and typeName in _TYPED_REF_PRIMARY_FIELD + + +def _unwrapTypedRef(value: Any) -> Any: + """If ``value`` is a typed-ref envelope, return its primary primitive. + + Falls back to the original value for unknown / non-envelope inputs. + """ + if not _isTypedRefEnvelope(value): + return value + primary = _TYPED_REF_PRIMARY_FIELD[value["$type"]] + return value.get(primary, value) + + def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any: """ Resolve parameter references: @@ -247,6 +341,11 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any: import re if isinstance(value, dict): + # Phase-5 Schicht-4: typed-ref envelopes (FeatureInstanceRef etc.) on + # disk get unwrapped to their canonical primitive (e.g. ``id``) so + # legacy action signatures keep working. See ``_unwrapTypedRef``. + if _isTypedRefEnvelope(value): + return _unwrapTypedRef(value) if value.get("type") == "ref": node_id = value.get("nodeId") path = value.get("path") diff --git a/modules/workflows/automation2/pickNotPushMigration.py b/modules/workflows/automation2/pickNotPushMigration.py new file mode 100644 index 00000000..fe347761 --- /dev/null +++ b/modules/workflows/automation2/pickNotPushMigration.py @@ -0,0 +1,83 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Graph helpers for Pick-not-Push: materialize connectionReference as explicit DataRefs. + +Runtime: executeGraph deep-copies the version graph and applies materialize_connection_refs +so downstream nodes resolve connection UUIDs from upstream output.connection.id. +""" +from __future__ import annotations + +import copy +import logging +from typing import Any, Dict, List + +from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES +from modules.features.graphicalEditor.portTypes import resolve_output_schema_name +from modules.workflows.automation2.graphUtils import buildConnectionMap, getInputSources + +logger = logging.getLogger(__name__) + +_NODE_DEF_BY_ID = {n["id"]: n for n in STATIC_NODE_TYPES} + +_SCHEMAS_WITH_CONNECTION = frozenset( + {"FileList", "DocumentList", "EmailList", "TaskList", "EmailDraft", "UdmDocument"}, +) + + +def _data_ref(node_id: str, path: List[Any]) -> Dict[str, Any]: + return {"type": "ref", "nodeId": node_id, "path": list(path)} + + +def materializeConnectionRefs(graph: Dict[str, Any]) -> Dict[str, Any]: + """ + Deep-copy graph and set empty connectionReference (userConnection params) to + DataRef { nodeId: upstreamPort0, path: ['connection','id'] } when upstream + output schema carries connection provenance. + """ + g = copy.deepcopy(graph) + nodes: List[Dict[str, Any]] = g.get("nodes") or [] + connections = g.get("connections") or [] + if not nodes: + return g + + conn_map = buildConnectionMap(connections) + node_by_id = {n["id"]: n for n in nodes if n.get("id")} + + for node in nodes: + nid = node.get("id") + ntype = node.get("type") + if not nid or not ntype: + continue + node_def = _NODE_DEF_BY_ID.get(ntype) + if not node_def: + continue + pdefs = node_def.get("parameters") or [] + has_conn = any( + p.get("name") == "connectionReference" and p.get("frontendType") == "userConnection" + for p in pdefs + ) + if not has_conn: + continue + params = node.get("parameters") + if not isinstance(params, dict): + node["parameters"] = {} + params = node["parameters"] + cur = params.get("connectionReference") + if cur not in (None, "", {}): + continue + input_sources = getInputSources(nid, conn_map) + if 0 not in input_sources: + continue + src_id, _ = input_sources[0] + src_node = node_by_id.get(src_id) or {} + src_def = _NODE_DEF_BY_ID.get(src_node.get("type") or "") + if not src_def: + continue + out_port = (src_def.get("outputPorts") or {}).get(0, {}) or {} + out_schema = resolve_output_schema_name(src_node, out_port if isinstance(out_port, dict) else {}) + if out_schema not in _SCHEMAS_WITH_CONNECTION: + continue + params["connectionReference"] = _data_ref(src_id, ["connection", "id"]) + logger.debug("materializeConnectionRefs: %s.connectionReference -> ref %s.connection.id", nid, src_id) + + return g diff --git a/modules/workflows/automation2/udmUpstreamShapes.py b/modules/workflows/automation2/udmUpstreamShapes.py new file mode 100644 index 00000000..33dea176 --- /dev/null +++ b/modules/workflows/automation2/udmUpstreamShapes.py @@ -0,0 +1,36 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Pure shape coercion for UDM-related upstream payloads (tests + optional tooling). + +No runtime wire-handover — kept only so unit tests can assert stable normalisation rules. +""" +from __future__ import annotations + +from typing import Any, Dict + + +def _coerceUdmDocumentInput(upstream: Dict[str, Any]) -> Dict[str, Any]: + if upstream.get("children") is not None and upstream.get("sourceType"): + return upstream + udm = upstream.get("udm") + if isinstance(udm, dict) and udm.get("children") is not None: + return udm + return {} + + +def _coerceUdmNodeListInput(upstream: Dict[str, Any]) -> Dict[str, Any]: + nodes = upstream.get("nodes") + if isinstance(nodes, list): + return {"nodes": nodes, "count": len(nodes)} + children = upstream.get("children") + if isinstance(children, list): + return {"nodes": children, "count": len(children)} + return {} + + +def _coerceConsolidateResultInput(upstream: Dict[str, Any]) -> Dict[str, Any]: + result: Dict[str, Any] = {} + for key in ("result", "mode", "count"): + if key in upstream: + result[key] = upstream[key] + return result diff --git a/modules/workflows/methods/_actionSignatureValidator.py b/modules/workflows/methods/_actionSignatureValidator.py new file mode 100644 index 00000000..942ccb8a --- /dev/null +++ b/modules/workflows/methods/_actionSignatureValidator.py @@ -0,0 +1,177 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Action signature validator for the Typed Action Architecture (Phase 2). + +Verifies that every WorkflowActionDefinition exposed by a Method: + 1. Declares a parameter `type` that is either a primitive or a known + PORT_TYPE_CATALOG schema name. + 2. Declares an `outputType` that exists in PORT_TYPE_CATALOG. + 3. Declares container types (`List[X]`, `Dict[K,V]`) whose element types + are also primitives or catalog schemas. + +Used at startup (and in CI tests) to prevent silent drift between +backend method signatures and the type catalog. + +Plan: wiki/c-work/1-plan/2026-04-typed-action-architecture.md +""" + +from __future__ import annotations + +import logging +from typing import Dict, Iterable, List, Optional + +from modules.datamodels.datamodelWorkflowActions import ( + WorkflowActionDefinition, + WorkflowActionParameter, +) +from modules.features.graphicalEditor.portTypes import ( + PORT_TYPE_CATALOG, + PRIMITIVE_TYPES, + _stripContainer, +) + +logger = logging.getLogger(__name__) + + +# Catalog types that are explicitly allowed as fire-and-forget outputs +# (no typed payload expected by downstream nodes). +_ALLOWED_GENERIC_OUTPUTS = frozenset({"ActionResult", "Transit"}) + + +def _isKnownType(typeName: str) -> bool: + """Primitive or catalog-resolvable type name.""" + return typeName in PRIMITIVE_TYPES or typeName in PORT_TYPE_CATALOG + + +def _validateTypeRef(typeStr: str) -> List[str]: + """ + Validate a single type reference string (the value of `type` on a + WorkflowActionParameter or `outputType` on a WorkflowActionDefinition). + + Returns a list of human-readable error fragments (empty if OK). + """ + if not typeStr or not isinstance(typeStr, str): + return ["empty/non-string type"] + + # Backwards-compatible aliases (lowercase Python builtins) + if typeStr in {"list", "dict"}: + return [ + f"'{typeStr}' is too generic — use 'List[X]' / 'Dict[K,V]' or a " + f"catalog schema name" + ] + + parts = _stripContainer(typeStr) + if not parts: + return [f"could not parse type '{typeStr}'"] + + errors: List[str] = [] + for part in parts: + if not _isKnownType(part): + errors.append( + f"unknown type '{part}' (not a primitive and not in catalog)" + ) + return errors + + +def _validateActionParameter( + actionId: str, + paramName: str, + param: WorkflowActionParameter, +) -> List[str]: + """Validate a single parameter; returns prefixed error messages.""" + out: List[str] = [] + for err in _validateTypeRef(param.type): + out.append(f"{actionId}.{paramName}: {err}") + return out + + +def _validateActionDefinition( + actionDef: WorkflowActionDefinition, +) -> List[str]: + """Validate parameters + outputType of one action.""" + errors: List[str] = [] + actionId = actionDef.actionId or "" + + for paramName, param in (actionDef.parameters or {}).items(): + errors.extend(_validateActionParameter(actionId, paramName, param)) + + outputType = actionDef.outputType + if outputType not in _ALLOWED_GENERIC_OUTPUTS: + for err in _validateTypeRef(outputType): + errors.append(f"{actionId}.: {err}") + + return errors + + +def _validateActionsDict( + methodName: str, + actions: Dict[str, WorkflowActionDefinition], +) -> List[str]: + """Validate every action in a Method's _actions dict.""" + errors: List[str] = [] + if not actions: + return errors + for localName, actionDef in actions.items(): + if not isinstance(actionDef, WorkflowActionDefinition): + errors.append( + f"{methodName}.{localName}: not a WorkflowActionDefinition instance" + ) + continue + errors.extend(_validateActionDefinition(actionDef)) + return errors + + +# --------------------------------------------------------------------------- +# Public entry points +# --------------------------------------------------------------------------- + +def _validateMethods(methodInstances: Iterable) -> List[str]: + """ + Validate a sequence of Method instances. + + Each instance is expected to expose `_actions` (Dict[str, WorkflowActionDefinition]). + """ + errors: List[str] = [] + for method in methodInstances: + methodName = getattr(method, "name", method.__class__.__name__) + actions = getattr(method, "_actions", None) or {} + errors.extend(_validateActionsDict(methodName, actions)) + return errors + + +def _formatValidationReport(errors: List[str]) -> str: + """Build a multi-line human-readable error report.""" + if not errors: + return "Action signatures are healthy." + lines = [f"Found {len(errors)} action-signature drift(s):"] + lines.extend(f" - {e}" for e in errors) + return "\n".join(lines) + + +def _logValidationReport(errors: List[str], strict: bool = False) -> None: + """ + Log validation results. + + If `strict=True`, raises RuntimeError on any error (use in tests / CI). + Otherwise emits warnings (use at startup so the app keeps running but + operators see the drift in the log). + """ + report = _formatValidationReport(errors) + if errors: + if strict: + raise RuntimeError(report) + logger.warning(report) + else: + logger.info(report) + + +__all__ = [ + "_validateMethods", + "_validateActionsDict", + "_validateActionDefinition", + "_validateActionParameter", + "_validateTypeRef", + "_formatValidationReport", + "_logValidationReport", +] diff --git a/modules/workflows/methods/methodAi/methodAi.py b/modules/workflows/methods/methodAi/methodAi.py index eac1babe..5265f5c9 100644 --- a/modules/workflows/methods/methodAi/methodAi.py +++ b/modules/workflows/methods/methodAi/methodAi.py @@ -39,17 +39,19 @@ class MethodAi(MethodBase): actionId="ai.process", description="Universal AI document processing action - accepts multiple input documents in any format and processes them together with a prompt. If the prompt specifies document formats to deliver, include them in the prompt", dynamicMode=True, + outputType="AiResult", parameters={ "aiPrompt": WorkflowActionParameter( name="aiPrompt", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=True, description="Instruction for the AI describing what processing to perform" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Document reference(s) in any format to use as input/context" @@ -82,7 +84,7 @@ class MethodAi(MethodBase): ), "contentParts": WorkflowActionParameter( name="contentParts", - type="List[ContentPart]", + type="List[Any]", frontendType=FrontendType.HIDDEN, required=False, description="Pre-extracted content parts (internal parameter, typically passed between actions). If provided, these will be used instead of extracting from documentList. Can be a list of ContentPart objects or an object with a 'parts' attribute." @@ -94,10 +96,12 @@ class MethodAi(MethodBase): actionId="ai.webResearch", description="Web research with two-step process: search for URLs, then crawl content", dynamicMode=True, + outputType="AiResult", parameters={ "prompt": WorkflowActionParameter( name="prompt", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=True, description="Natural language research instruction" @@ -140,10 +144,11 @@ class MethodAi(MethodBase): actionId="ai.summarizeDocument", description="Summarize one or more documents, extracting key points and main ideas. If the prompt specifies document formats to deliver, include them in the prompt", dynamicMode=True, + outputType="DocumentList", parameters={ "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference(s) to summarize" @@ -180,10 +185,11 @@ class MethodAi(MethodBase): actionId="ai.translateDocument", description="Translate documents to a target language while preserving formatting and structure", dynamicMode=True, + outputType="DocumentList", parameters={ "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference(s) to translate" @@ -224,10 +230,11 @@ class MethodAi(MethodBase): actionId="ai.convertDocument", description="Convert documents between different formats (PDF→Word, Excel→CSV, etc.)", dynamicMode=True, + outputType="DocumentList", parameters={ "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference(s) to convert" @@ -255,17 +262,19 @@ class MethodAi(MethodBase): actionId="ai.generateDocument", description="Generate documents from scratch or based on templates/inputs. If the prompt specifies document formats to deliver, include them in the prompt", dynamicMode=True, + outputType="DocumentList", parameters={ "prompt": WorkflowActionParameter( name="prompt", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=True, description="Description of the document to generate" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Template documents or reference documents to use as a guide" @@ -293,17 +302,19 @@ class MethodAi(MethodBase): actionId="ai.generateCode", description="Generate one or multiple code files in a single action - explicitly sets intent to 'code'. This action can generate multiple files (e.g., config.json, customers.json, settings.json) when the prompt requests multiple files. If the prompt specifies file formats to deliver, include them in the prompt. IMPORTANT: When the user requests multiple files (e.g., 'generate 3 JSON files'), use a SINGLE ai.generateCode action with a prompt that describes ALL requested files, rather than splitting into multiple actions.", dynamicMode=True, + outputType="DocumentList", parameters={ "prompt": WorkflowActionParameter( name="prompt", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=True, description="Description of code to generate. If multiple files are requested, describe ALL files in this single prompt (e.g., 'Generate 3 JSON files: 1) config.json with..., 2) customers.json with..., 3) settings.json with...')." ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Reference documents" @@ -323,6 +334,7 @@ class MethodAi(MethodBase): actionId="ai.consolidate", description="AI-assisted consolidation of aggregated workflow results (summarize, classify, semantic merge)", dynamicMode=True, + outputType="ConsolidateResult", parameters={ "mode": WorkflowActionParameter( name="mode", @@ -336,6 +348,7 @@ class MethodAi(MethodBase): "prompt": WorkflowActionParameter( name="prompt", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=False, description="Optional extra instructions for the LLM", diff --git a/modules/workflows/methods/methodBase.py b/modules/workflows/methods/methodBase.py index 6a9f2956..02cae134 100644 --- a/modules/workflows/methods/methodBase.py +++ b/modules/workflows/methods/methodBase.py @@ -176,6 +176,7 @@ class MethodBase: 'default': param.default, 'frontendType': param.frontendType.value, 'frontendOptions': param.frontendOptions, + 'uiHint': param.uiHint, 'validation': param.validation } return result @@ -230,8 +231,18 @@ class MethodBase: return validated def _validateType(self, value: Any, expectedType: str) -> Any: - """Validate and convert value to expected type""" - # Type validation logic + """Validate and convert value to expected type. + + Catalog types (e.g. 'ConnectionRef', 'FeatureInstanceRef', + 'DocumentList', 'TrusteeProcessResult') pass through unchanged — + runtime structural validation is handled by the workflow engine / + port-schema layer, not at the action-call boundary. + """ + from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG + + if expectedType in PORT_TYPE_CATALOG: + return value + typeMap = { 'str': str, 'int': int, @@ -239,8 +250,12 @@ class MethodBase: 'bool': bool, 'list': list, 'dict': dict, + 'Any': lambda v: v, } - + + if expectedType == 'Any': + return value + # Handle List[str], List[int], etc. if expectedType.startswith('List['): if isinstance(value, str): diff --git a/modules/workflows/methods/methodChatbot/methodChatbot.py b/modules/workflows/methods/methodChatbot/methodChatbot.py index 4583e636..cc44428e 100644 --- a/modules/workflows/methods/methodChatbot/methodChatbot.py +++ b/modules/workflows/methods/methodChatbot/methodChatbot.py @@ -25,17 +25,19 @@ class MethodChatbot(MethodBase): actionId="chatbot.queryDatabase", description="Execute a SQL SELECT query via the preprocessor connector. Returns formatted query results.", dynamicMode=False, + outputType="QueryResult", parameters={ "sqlQuery": WorkflowActionParameter( name="sqlQuery", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=False, description="SQL SELECT query to execute. If not provided, will attempt to extract from analysis_result document in documentList." ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Document reference(s) containing analysis_result with sqlQuery field. Used if sqlQuery parameter is not provided." diff --git a/modules/workflows/methods/methodClickup/methodClickup.py b/modules/workflows/methods/methodClickup/methodClickup.py index 05eba50d..17f42300 100644 --- a/modules/workflows/methods/methodClickup/methodClickup.py +++ b/modules/workflows/methods/methodClickup/methodClickup.py @@ -34,10 +34,11 @@ class MethodClickup(MethodBase): actionId="clickup.listTasks", description="List tasks in a ClickUp list (virtual path /team/{id}/list/{id})", dynamicMode=True, + outputType="TaskList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="ClickUp connection", @@ -72,10 +73,11 @@ class MethodClickup(MethodBase): actionId="clickup.listFields", description="List custom and built-in field definitions for a ClickUp list (names, types, ids)", dynamicMode=True, + outputType="ActionResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="ClickUp connection", @@ -101,10 +103,11 @@ class MethodClickup(MethodBase): actionId="clickup.searchTasks", description="Search tasks in a ClickUp workspace (team)", dynamicMode=True, + outputType="TaskList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="ClickUp connection", @@ -172,10 +175,11 @@ class MethodClickup(MethodBase): actionId="clickup.getTask", description="Get a single task by ID", dynamicMode=True, + outputType="TaskResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="ClickUp connection", @@ -201,10 +205,11 @@ class MethodClickup(MethodBase): actionId="clickup.createTask", description="Create a task in a list", dynamicMode=True, + outputType="TaskResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="ClickUp connection", @@ -300,10 +305,11 @@ class MethodClickup(MethodBase): actionId="clickup.updateTask", description="Update a task (JSON body per ClickUp API)", dynamicMode=True, + outputType="TaskResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="ClickUp connection", @@ -336,10 +342,11 @@ class MethodClickup(MethodBase): actionId="clickup.uploadAttachment", description="Upload a file attachment to a task", dynamicMode=True, + outputType="TaskAttachmentRef", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="ClickUp connection", diff --git a/modules/workflows/methods/methodContext/methodContext.py b/modules/workflows/methods/methodContext/methodContext.py index 61afaf2e..d5f62772 100644 --- a/modules/workflows/methods/methodContext/methodContext.py +++ b/modules/workflows/methods/methodContext/methodContext.py @@ -36,6 +36,7 @@ class MethodContext(MethodBase): actionId="context.getDocumentIndex", description="Generate a comprehensive index of all documents available in the current workflow", dynamicMode=True, + outputType="DocumentList", parameters={ "resultType": WorkflowActionParameter( name="resultType", @@ -53,17 +54,18 @@ class MethodContext(MethodBase): actionId="context.extractContent", description="Extract raw content parts from documents without AI processing. Returns ContentParts with different typeGroups (text, image, table, structure, container). Images are returned as base64 data, not as extracted text. Text content is extracted from text-based formats (PDF text layers, Word docs, etc.) but NOT from images (no OCR). Use this action to prepare documents for subsequent AI processing actions.", dynamicMode=True, + outputType="UdmDocument", parameters={ "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference(s) to extract content from" ), "extractionOptions": WorkflowActionParameter( name="extractionOptions", - type="dict", + type="Dict[str,Any]", frontendType=FrontendType.JSON, required=False, description="Extraction options (if not provided, defaults are used). Note: This action does NOT use AI - it performs pure content extraction. Images are preserved as base64 data, not converted to text." @@ -74,10 +76,11 @@ class MethodContext(MethodBase): "neutralizeData": WorkflowActionDefinition( actionId="context.neutralizeData", description="Neutralize extracted data from ContentExtracted documents (for use after extractContent)", + outputType="DocumentList", parameters={ "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference(s) containing ContentExtracted objects to neutralize" @@ -88,6 +91,7 @@ class MethodContext(MethodBase): "triggerPreprocessingServer": WorkflowActionDefinition( actionId="context.triggerPreprocessingServer", description="Trigger preprocessing server at customer tenant to update database with configuration", + outputType="ActionResult", parameters={ "endpoint": WorkflowActionParameter( name="endpoint", diff --git a/modules/workflows/methods/methodFile/methodFile.py b/modules/workflows/methods/methodFile/methodFile.py index 072ca598..8724ab11 100644 --- a/modules/workflows/methods/methodFile/methodFile.py +++ b/modules/workflows/methods/methodFile/methodFile.py @@ -24,10 +24,11 @@ class MethodFile(MethodBase): actionId="file.create", description="Create a file from context (text/markdown from AI). Configurable format and style preset.", dynamicMode=True, + outputType="DocumentList", parameters={ "contentSources": WorkflowActionParameter( name="contentSources", - type="list", + type="List[str]", frontendType=FrontendType.HIDDEN, required=False, description="Array of context refs. Resolved and concatenated. Empty = from connected node.", diff --git a/modules/workflows/methods/methodJira/methodJira.py b/modules/workflows/methods/methodJira/methodJira.py index d7baacf0..0268d020 100644 --- a/modules/workflows/methods/methodJira/methodJira.py +++ b/modules/workflows/methods/methodJira/methodJira.py @@ -42,6 +42,7 @@ class MethodJira(MethodBase): "connectJira": WorkflowActionDefinition( actionId="jira.connectJira", description="Connect to JIRA instance and create ticket interface", + outputType="ActionResult", parameters={ "apiUsername": WorkflowActionParameter( name="apiUsername", @@ -81,6 +82,7 @@ class MethodJira(MethodBase): "taskSyncDefinition": WorkflowActionParameter( name="taskSyncDefinition", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=True, description="Field mapping definition as JSON string or dict" @@ -91,6 +93,7 @@ class MethodJira(MethodBase): "exportTicketsAsJson": WorkflowActionDefinition( actionId="jira.exportTicketsAsJson", description="Export tickets from JIRA as JSON list", + outputType="DocumentList", parameters={ "connectionId": WorkflowActionParameter( name="connectionId", @@ -112,6 +115,7 @@ class MethodJira(MethodBase): "importTicketsFromJson": WorkflowActionDefinition( actionId="jira.importTicketsFromJson", description="Import ticket data from JSON back to JIRA", + outputType="ActionResult", parameters={ "connectionId": WorkflowActionParameter( name="connectionId", @@ -122,7 +126,7 @@ class MethodJira(MethodBase): ), "ticketData": WorkflowActionParameter( name="ticketData", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference containing ticket data as JSON" @@ -140,17 +144,18 @@ class MethodJira(MethodBase): "mergeTicketData": WorkflowActionDefinition( actionId="jira.mergeTicketData", description="Merge JIRA export data with existing SharePoint data", + outputType="DocumentList", parameters={ "jiraData": WorkflowActionParameter( name="jiraData", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference containing JIRA ticket data as JSON array" ), "existingData": WorkflowActionParameter( name="existingData", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference containing existing SharePoint data as JSON array" @@ -176,10 +181,11 @@ class MethodJira(MethodBase): "parseCsvContent": WorkflowActionDefinition( actionId="jira.parseCsvContent", description="Parse CSV content with custom headers", + outputType="DocumentList", parameters={ "csvContent": WorkflowActionParameter( name="csvContent", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference containing CSV file content as bytes" @@ -207,10 +213,11 @@ class MethodJira(MethodBase): "parseExcelContent": WorkflowActionDefinition( actionId="jira.parseExcelContent", description="Parse Excel content with custom headers", + outputType="DocumentList", parameters={ "excelContent": WorkflowActionParameter( name="excelContent", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference containing Excel file content as bytes" @@ -238,17 +245,18 @@ class MethodJira(MethodBase): "createCsvContent": WorkflowActionDefinition( actionId="jira.createCsvContent", description="Create CSV content with custom headers", + outputType="DocumentList", parameters={ "data": WorkflowActionParameter( name="data", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference containing data as JSON (with data field from mergeTicketData)" ), "headers": WorkflowActionParameter( name="headers", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Document reference containing headers JSON (from parseCsvContent/parseExcelContent)" @@ -273,17 +281,18 @@ class MethodJira(MethodBase): "createExcelContent": WorkflowActionDefinition( actionId="jira.createExcelContent", description="Create Excel content with custom headers", + outputType="DocumentList", parameters={ "data": WorkflowActionParameter( name="data", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference containing data as JSON (with data field from mergeTicketData)" ), "headers": WorkflowActionParameter( name="headers", - type="str", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Document reference containing headers JSON (from parseExcelContent)" diff --git a/modules/workflows/methods/methodOutlook/methodOutlook.py b/modules/workflows/methods/methodOutlook/methodOutlook.py index 633f396d..4370b237 100644 --- a/modules/workflows/methods/methodOutlook/methodOutlook.py +++ b/modules/workflows/methods/methodOutlook/methodOutlook.py @@ -40,10 +40,11 @@ class MethodOutlook(MethodBase): actionId="outlook.readEmails", description="Read emails and metadata from a mailbox folder", dynamicMode=True, + outputType="EmailList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" @@ -89,10 +90,11 @@ class MethodOutlook(MethodBase): actionId="outlook.searchEmails", description="Search emails by query and return matching items with metadata", dynamicMode=True, + outputType="EmailList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" @@ -138,10 +140,11 @@ class MethodOutlook(MethodBase): actionId="outlook.composeAndDraftEmailWithContext", description="Compose email content using AI from context and optional documents, then create a draft", dynamicMode=True, + outputType="EmailDraft", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" @@ -156,20 +159,21 @@ class MethodOutlook(MethodBase): "context": WorkflowActionParameter( name="context", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=False, description="Detailed context for AI composition (omit when emailContent provided)" ), "emailContent": WorkflowActionParameter( name="emailContent", - type="dict", + type="Dict[str,Any]", frontendType=FrontendType.HIDDEN, required=False, description="Direct subject/body/to from upstream (skips AI composition)" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[Any]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Document references or inline ActionDocuments for attachments" @@ -213,17 +217,18 @@ class MethodOutlook(MethodBase): actionId="outlook.sendDraftEmail", description="Send draft email(s) using draft email JSON document(s) from action outlook.composeAndDraftEmailWithContext", dynamicMode=True, + outputType="ActionResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference(s) to draft emails in JSON format (outputs from outlook.composeAndDraftEmailWithContext function)" diff --git a/modules/workflows/methods/methodRedmine/methodRedmine.py b/modules/workflows/methods/methodRedmine/methodRedmine.py index 0dd8f461..6c40c951 100644 --- a/modules/workflows/methods/methodRedmine/methodRedmine.py +++ b/modules/workflows/methods/methodRedmine/methodRedmine.py @@ -43,10 +43,11 @@ class MethodRedmine(MethodBase): actionId="redmine.readTicket", description="Read a single Redmine ticket from the local mirror by ticketId.", dynamicMode=False, + outputType="RedmineTicket", parameters={ "featureInstanceId": WorkflowActionParameter( - name="featureInstanceId", type="str", frontendType=FrontendType.TEXT, - required=True, description="Redmine feature instance ID", + name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT, + required=True, description="Redmine feature instance", ), "ticketId": WorkflowActionParameter( name="ticketId", type="int", frontendType=FrontendType.TEXT, @@ -59,13 +60,14 @@ class MethodRedmine(MethodBase): actionId="redmine.listTickets", description="List tickets from the mirror with optional filters (tracker, status, period, assignee).", dynamicMode=False, + outputType="RedmineTicketList", parameters={ "featureInstanceId": WorkflowActionParameter( - name="featureInstanceId", type="str", frontendType=FrontendType.TEXT, - required=True, description="Redmine feature instance ID", + name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT, + required=True, description="Redmine feature instance", ), "trackerIds": WorkflowActionParameter( - name="trackerIds", type="list", frontendType=FrontendType.JSON, + name="trackerIds", type="List[int]", frontendType=FrontendType.JSON, required=False, description="Restrict to these tracker ids (list of int or comma-separated string).", ), "status": WorkflowActionParameter( @@ -95,10 +97,11 @@ class MethodRedmine(MethodBase): actionId="redmine.createTicket", description="Create a new Redmine ticket. Requires subject and trackerId.", dynamicMode=False, + outputType="RedmineTicket", parameters={ "featureInstanceId": WorkflowActionParameter( - name="featureInstanceId", type="str", frontendType=FrontendType.TEXT, - required=True, description="Redmine feature instance ID", + name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT, + required=True, description="Redmine feature instance", ), "subject": WorkflowActionParameter( name="subject", type="str", frontendType=FrontendType.TEXT, @@ -109,7 +112,7 @@ class MethodRedmine(MethodBase): required=True, description="Tracker id (Userstory, Feature, Task ...).", ), "description": WorkflowActionParameter( - name="description", type="str", frontendType=FrontendType.TEXTAREA, + name="description", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=False, description="Markdown/Textile description body.", ), "statusId": WorkflowActionParameter( @@ -133,7 +136,7 @@ class MethodRedmine(MethodBase): required=False, description="Target/fixed version id.", ), "customFields": WorkflowActionParameter( - name="customFields", type="dict", frontendType=FrontendType.JSON, + name="customFields", type="Dict[str,Any]", frontendType=FrontendType.JSON, required=False, description="Custom fields as {customFieldId: value}.", ), }, @@ -143,10 +146,11 @@ class MethodRedmine(MethodBase): actionId="redmine.updateTicket", description="Update a Redmine ticket. Only provided fields are sent.", dynamicMode=False, + outputType="RedmineTicket", parameters={ "featureInstanceId": WorkflowActionParameter( - name="featureInstanceId", type="str", frontendType=FrontendType.TEXT, - required=True, description="Redmine feature instance ID", + name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT, + required=True, description="Redmine feature instance", ), "ticketId": WorkflowActionParameter( name="ticketId", type="int", frontendType=FrontendType.TEXT, @@ -157,7 +161,7 @@ class MethodRedmine(MethodBase): required=False, description="New title.", ), "description": WorkflowActionParameter( - name="description", type="str", frontendType=FrontendType.TEXTAREA, + name="description", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=False, description="New description.", ), "trackerId": WorkflowActionParameter( @@ -185,11 +189,11 @@ class MethodRedmine(MethodBase): required=False, description="Change fixed version.", ), "notes": WorkflowActionParameter( - name="notes", type="str", frontendType=FrontendType.TEXTAREA, + name="notes", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=False, description="Journal entry (comment) added to the ticket.", ), "customFields": WorkflowActionParameter( - name="customFields", type="dict", frontendType=FrontendType.JSON, + name="customFields", type="Dict[str,Any]", frontendType=FrontendType.JSON, required=False, description="Custom fields as {customFieldId: value}.", ), }, @@ -199,10 +203,11 @@ class MethodRedmine(MethodBase): actionId="redmine.getStats", description="Aggregated stats (KPIs, throughput, status distribution, backlog) from the mirror.", dynamicMode=False, + outputType="RedmineStats", parameters={ "featureInstanceId": WorkflowActionParameter( - name="featureInstanceId", type="str", frontendType=FrontendType.TEXT, - required=True, description="Redmine feature instance ID", + name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT, + required=True, description="Redmine feature instance", ), "dateFrom": WorkflowActionParameter( name="dateFrom", type="str", frontendType=FrontendType.TEXT, @@ -217,7 +222,7 @@ class MethodRedmine(MethodBase): required=False, description="'day' | 'week' | 'month' (default 'week').", ), "trackerIds": WorkflowActionParameter( - name="trackerIds", type="list", frontendType=FrontendType.JSON, + name="trackerIds", type="List[int]", frontendType=FrontendType.JSON, required=False, description="Restrict to these tracker ids.", ), }, @@ -227,10 +232,11 @@ class MethodRedmine(MethodBase): actionId="redmine.runSync", description="Sync Redmine tickets and relations into the local mirror (incremental by default).", dynamicMode=False, + outputType="ActionResult", parameters={ "featureInstanceId": WorkflowActionParameter( - name="featureInstanceId", type="str", frontendType=FrontendType.TEXT, - required=True, description="Redmine feature instance ID", + name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT, + required=True, description="Redmine feature instance", ), "force": WorkflowActionParameter( name="force", type="bool", frontendType=FrontendType.CHECKBOX, diff --git a/modules/workflows/methods/methodSharepoint/methodSharepoint.py b/modules/workflows/methods/methodSharepoint/methodSharepoint.py index 0fa0aca8..78e462d7 100644 --- a/modules/workflows/methods/methodSharepoint/methodSharepoint.py +++ b/modules/workflows/methods/methodSharepoint/methodSharepoint.py @@ -51,10 +51,11 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.findDocumentPath", description="Find documents and folders by name/path across sites", dynamicMode=True, + outputType="DocumentList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" @@ -89,17 +90,18 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.readDocuments", description="Read documents from SharePoint and extract content/metadata", dynamicMode=True, + outputType="DocumentList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Document list reference(s) containing findDocumentPath result" @@ -126,17 +128,18 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.uploadDocument", description="Upload documents to SharePoint", dynamicMode=True, + outputType="ActionResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document reference(s) to upload. File names are taken from the documents" @@ -155,17 +158,18 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.listDocuments", description="List documents and folders in SharePoint paths across sites", dynamicMode=True, + outputType="FileList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=False, description="Document list reference(s) containing findDocumentPath result" @@ -192,17 +196,18 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.analyzeFolderUsage", description="Analyze usage intensity of folders and files in SharePoint", dynamicMode=True, + outputType="ActionResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" ), "documentList": WorkflowActionParameter( name="documentList", - type="List[str]", + type="DocumentList", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, description="Document list reference(s) containing findDocumentPath result" @@ -237,10 +242,11 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.findSiteByUrl", description="Find SharePoint site by hostname and site path", dynamicMode=True, + outputType="ActionResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" @@ -266,10 +272,11 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.downloadFileByPath", description="Download file from SharePoint by exact file path", dynamicMode=True, + outputType="DocumentList", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" @@ -302,10 +309,11 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.copyFile", description="Copy file within SharePoint", dynamicMode=True, + outputType="ActionResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" @@ -331,10 +339,11 @@ class MethodSharepoint(MethodBase): actionId="sharepoint.uploadFile", description="Upload raw file content (bytes) to SharePoint", dynamicMode=True, + outputType="ActionResult", parameters={ "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=True, description="Microsoft connection label" diff --git a/modules/workflows/methods/methodTrustee/actions/processDocuments.py b/modules/workflows/methods/methodTrustee/actions/processDocuments.py index 0d29c9bd..11e9aba1 100644 --- a/modules/workflows/methods/methodTrustee/actions/processDocuments.py +++ b/modules/workflows/methods/methodTrustee/actions/processDocuments.py @@ -2,10 +2,15 @@ # All rights reserved. """ Process extracted documents: create TrusteeDocument + TrusteePosition from extraction JSON. -Input: documentList (reference to extractFromFiles result). -Each document is JSON with documentType, extractedData, fileId, fileName. -extractedData is a list of expense/position records. -Output: one ActionDocument with JSON { positionIds, documentIds } for chaining to syncToAccounting. + +Input: documentList (DataRef on upstream DocumentList.documents — typically +trustee.extractFromFiles[documents]). Each item is an ActionDocument-dump dict +with `documentData` (JSON string) carrying { documentType, extractedData, fileId, +fileName }. extractedData is a list of expense/position records. + +Output: ActionResult with one ActionDocument containing JSON +{ positionIds, documentIds, autoMatchedPositionIds } for chaining to +syncToAccounting (via DataRef on documents[0]). """ import json diff --git a/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py b/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py index 555a8623..b9c99f2c 100644 --- a/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py +++ b/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py @@ -2,8 +2,10 @@ # All rights reserved. """ Sync trustee positions to accounting (Buha). -Input: featureInstanceId, documentList (reference to processDocuments result message). -Reads positionIds from the document and calls AccountingBridge.pushBatchToAccounting. + +Input: featureInstanceId, documentList (DataRef on processDocuments[documents] — +list with one ActionDocument carrying JSON { positionIds, documentIds, ... }). +Reads positionIds from the first document and calls AccountingBridge.pushBatchToAccounting. """ import json diff --git a/modules/workflows/methods/methodTrustee/methodTrustee.py b/modules/workflows/methods/methodTrustee/methodTrustee.py index ceb5849f..73e7d573 100644 --- a/modules/workflows/methods/methodTrustee/methodTrustee.py +++ b/modules/workflows/methods/methodTrustee/methodTrustee.py @@ -31,17 +31,21 @@ class MethodTrustee(MethodBase): actionId="trustee.extractFromFiles", description="Extract document type and data from PDF/JPG (fileIds or SharePoint folder)", dynamicMode=False, + # Runtime returns ActionResult.isSuccess(documents=[...]); see + # actions/extractFromFiles.py. Keep this in sync with the + # graphical-editor adapter (nodeDefinitions/trustee.py). + outputType="ActionResult", parameters={ "fileIds": WorkflowActionParameter( name="fileIds", - type="list", + type="List[str]", frontendType=FrontendType.JSON, required=False, description="List of file IDs already in DB (alternative to connectionReference + sharepointFolder)", ), "connectionReference": WorkflowActionParameter( name="connectionReference", - type="str", + type="ConnectionRef", frontendType=FrontendType.USER_CONNECTION, required=False, description="Microsoft connection for SharePoint (use with sharepointFolder)", @@ -55,14 +59,15 @@ class MethodTrustee(MethodBase): ), "featureInstanceId": WorkflowActionParameter( name="featureInstanceId", - type="str", + type="FeatureInstanceRef", frontendType=FrontendType.TEXT, required=True, - description="Trustee feature instance ID", + description="Trustee feature instance", ), "prompt": WorkflowActionParameter( name="prompt", type="str", + uiHint="textarea", frontendType=FrontendType.TEXTAREA, required=False, description="AI prompt for extraction (optional)", @@ -74,20 +79,24 @@ class MethodTrustee(MethodBase): actionId="trustee.processDocuments", description="Create TrusteeDocument + TrusteePosition from extraction result (documentList from previous action)", dynamicMode=False, + # Runtime returns ActionResult.isSuccess(documents=[...]). + outputType="ActionResult", parameters={ "documentList": WorkflowActionParameter( name="documentList", - type="list", + # Concrete shape consumed by _resolveDocumentList (list + # of dicts with documentName/documentData/mimeType). + type="List[ActionDocument]", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, - description="Reference to extractFromFiles result (e.g. docList:messageId:extract_result)", + description="DataRef to upstream documents (e.g. trustee.extractFromFiles → documents)", ), "featureInstanceId": WorkflowActionParameter( name="featureInstanceId", - type="str", + type="FeatureInstanceRef", frontendType=FrontendType.TEXT, required=True, - description="Trustee feature instance ID", + description="Trustee feature instance", ), }, execute=processDocuments.__get__(self, self.__class__), @@ -96,20 +105,24 @@ class MethodTrustee(MethodBase): actionId="trustee.syncToAccounting", description="Push trustee positions to accounting (documentList = processDocuments result)", dynamicMode=False, + # Runtime returns ActionResult.isSuccess(documents=[...]). + outputType="ActionResult", parameters={ "documentList": WorkflowActionParameter( name="documentList", - type="list", + # Concrete shape consumed by syncToAccounting._resolveDocumentList: + # list of ActionDocument dicts produced by processDocuments. + type="List[ActionDocument]", frontendType=FrontendType.DOCUMENT_REFERENCE, required=True, - description="Reference to processDocuments result message", + description="DataRef to upstream documents (e.g. trustee.processDocuments → documents)", ), "featureInstanceId": WorkflowActionParameter( name="featureInstanceId", - type="str", + type="FeatureInstanceRef", frontendType=FrontendType.TEXT, required=True, - description="Trustee feature instance ID", + description="Trustee feature instance", ), }, execute=syncToAccounting.__get__(self, self.__class__), @@ -118,13 +131,14 @@ class MethodTrustee(MethodBase): actionId="trustee.refreshAccountingData", description="Import/refresh accounting data from external system (e.g. Abacus) into local tables. Checks cache freshness; use forceRefresh to re-import.", dynamicMode=True, + outputType="TrusteeRefreshResult", parameters={ "featureInstanceId": WorkflowActionParameter( name="featureInstanceId", - type="str", + type="FeatureInstanceRef", frontendType=FrontendType.TEXT, required=True, - description="Trustee feature instance ID", + description="Trustee feature instance", ), "forceRefresh": WorkflowActionParameter( name="forceRefresh", @@ -154,13 +168,14 @@ class MethodTrustee(MethodBase): actionId="trustee.queryData", description="Read data from the Trustee DB (lookup tenant+rent, raw recordset, or aggregate). Does NOT trigger an external sync.", dynamicMode=False, + outputType="QueryResult", parameters={ "featureInstanceId": WorkflowActionParameter( name="featureInstanceId", - type="str", + type="FeatureInstanceRef", frontendType=FrontendType.TEXT, required=True, - description="Trustee feature instance ID", + description="Trustee feature instance", ), "mode": WorkflowActionParameter( name="mode", diff --git a/scripts/_listMandates.py b/scripts/_listMandates.py new file mode 100644 index 00000000..cf3e9bd2 --- /dev/null +++ b/scripts/_listMandates.py @@ -0,0 +1,25 @@ +import sys +from pathlib import Path +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) +import psycopg2, psycopg2.extras +from modules.shared.configuration import APP_CONFIG + +c = psycopg2.connect( + host=APP_CONFIG.get('DB_HOST','localhost'), + user=APP_CONFIG.get('DB_USER'), + password=APP_CONFIG.get('DB_PASSWORD_SECRET'), + port=int(APP_CONFIG.get('DB_PORT',5432)), + dbname='poweron_app', +) +cur = c.cursor(cursor_factory=psycopg2.extras.RealDictCursor) +cur.execute('SELECT id, name, label, enabled, "deletedAt", "sysCreatedAt" FROM "Mandate" ORDER BY "sysCreatedAt"') +print("All Mandates in poweron_app:") +for r in cur.fetchall(): + print(f" id={r['id']} name={r['name']} label={r['label']} enabled={r['enabled']} deletedAt={r['deletedAt']}") + +cur.execute('SELECT COUNT(*) AS n FROM "FeatureInstance" WHERE "featureCode" = %s', ("redmine",)) +print(f"\nTotal redmine FeatureInstances in poweron_app: {cur.fetchone()['n']}") + +cur.execute('SELECT id, "mandateId", label, enabled FROM "FeatureInstance" WHERE "featureCode" = %s ORDER BY "sysCreatedAt"', ("redmine",)) +for r in cur.fetchall(): + print(f" fi={r['id']} mandate={r['mandateId']} label={r['label']} enabled={r['enabled']}") diff --git a/scripts/check_orphan_featureinstance.py b/scripts/check_orphan_featureinstance.py new file mode 100644 index 00000000..c09de61b --- /dev/null +++ b/scripts/check_orphan_featureinstance.py @@ -0,0 +1,97 @@ +"""Quick-Check: existiert FeatureInstance-Row 6019e7d0-b23d-41ec-b9f7-3dd1293078f2 +in poweron_app, und welche Mandate/Instances stehen mit dem RedmineTicketMirror in Verbindung? + +Aufruf: python gateway/scripts/check_orphan_featureinstance.py +""" +from __future__ import annotations + +import sys +from pathlib import Path + +_GATEWAY = Path(__file__).resolve().parents[1] +if str(_GATEWAY) not in sys.path: + sys.path.insert(0, str(_GATEWAY)) + +import psycopg2 +import psycopg2.extras + +from modules.shared.configuration import APP_CONFIG + + +_TARGET_FI = "6019e7d0-b23d-41ec-b9f7-3dd1293078f2" +_TARGET_MANDATE = "674b1bc0-1d01-4696-a094-3374c450f6e2" + + +def _connect(dbName: str): + return psycopg2.connect( + host=APP_CONFIG.get("DB_HOST", "localhost"), + user=APP_CONFIG.get("DB_USER"), + password=APP_CONFIG.get("DB_PASSWORD_SECRET"), + port=int(APP_CONFIG.get("DB_PORT", 5432)), + dbname=dbName, + ) + + +def main() -> int: + print(f"Checking FeatureInstance {_TARGET_FI} ...\n") + + with _connect("poweron_app") as appConn: + with appConn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur: + cur.execute( + 'SELECT id, "mandateId", "featureCode", label, enabled, ' + '"sysCreatedAt", "sysModifiedAt" ' + 'FROM "FeatureInstance" WHERE id = %s', + (_TARGET_FI,), + ) + fi = cur.fetchone() + print(f"FeatureInstance row in poweron_app: {fi}\n") + + cur.execute( + 'SELECT id, "mandateId", "featureCode", label, enabled ' + 'FROM "FeatureInstance" ' + 'WHERE "mandateId" = %s AND "featureCode" = %s', + (_TARGET_MANDATE, "redmine"), + ) + sameMandateRedmine = cur.fetchall() + print( + f"All redmine FeatureInstances on mandate {_TARGET_MANDATE} " + f"({len(sameMandateRedmine)}):" + ) + for r in sameMandateRedmine: + print(f" {r}") + print() + + cur.execute( + 'SELECT id, name, label, enabled, "deletedAt", ' + '"sysCreatedAt", "sysModifiedAt" ' + 'FROM "Mandate" WHERE id = %s', + (_TARGET_MANDATE,), + ) + mandate = cur.fetchone() + print(f"Mandate row: {mandate}\n") + + with _connect("poweron_redmine") as rmConn: + with rmConn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur: + cur.execute( + 'SELECT COUNT(*) AS n ' + 'FROM "RedmineTicketMirror" WHERE "featureInstanceId" = %s', + (_TARGET_FI,), + ) + n = cur.fetchone()["n"] + print(f"RedmineTicketMirror rows with featureInstanceId={_TARGET_FI}: {n}") + + cur.execute( + 'SELECT DISTINCT "featureInstanceId", "mandateId", COUNT(*) AS n ' + 'FROM "RedmineTicketMirror" ' + 'GROUP BY "featureInstanceId", "mandateId" ORDER BY n DESC LIMIT 20' + ) + distribution = cur.fetchall() + print(f"\nRedmineTicketMirror distribution (top 20):") + for r in distribution: + print(f" fi={r['featureInstanceId']} mandate={r['mandateId']} count={r['n']}") + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/script_migrate_feature_instance_refs.py b/scripts/script_migrate_feature_instance_refs.py new file mode 100644 index 00000000..40f723c1 --- /dev/null +++ b/scripts/script_migrate_feature_instance_refs.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Persistent DB migration: rewrite raw ``featureInstanceId`` UUIDs in stored +workflow graphs to typed ``FeatureInstanceRef`` envelopes. + +Why +--- +The runtime engine (``executeGraph``) already calls +``materializeFeatureInstanceRefs`` on every run, so legacy graphs *execute* +correctly today. The Editor however reads the persisted ``graph`` field +directly and shows whatever shape is on disk — until a workflow is saved +again it still displays the old plain-string format. + +What this script does +--------------------- +Walks every row of: + +* ``poweron_graphicaleditor.Automation2Workflow`` (legacy ``graph`` column) +* ``poweron_graphicaleditor.AutoVersion`` (canonical ``graph`` column) + +For each row, it: + +1. Loads the JSONB ``graph`` column. +2. Applies :func:`materializeFeatureInstanceRefs`. +3. Persists the result if (and only if) it differs from the input. + +Idempotent — re-runs are no-ops. + +Usage +----- +:: + + python scripts/script_migrate_feature_instance_refs.py --dry-run + python scripts/script_migrate_feature_instance_refs.py + +Plan: ``wiki/c-work/1-plan/2026-04-typed-action-followups.md`` (Track C1). +""" +from __future__ import annotations + +import argparse +import json +import logging +import os +import sys +from pathlib import Path +from typing import Any, Dict, Iterable, List, Tuple + +_scriptPath = Path(__file__).resolve() +_gatewayPath = _scriptPath.parent.parent +sys.path.insert(0, str(_gatewayPath)) +os.chdir(str(_gatewayPath)) + +import psycopg2 # noqa: E402 +from psycopg2.extras import Json, RealDictCursor # noqa: E402 + +from modules.shared.configuration import APP_CONFIG # noqa: E402 +from modules.workflows.automation2.featureInstanceRefMigration import ( # noqa: E402 + materializeFeatureInstanceRefs, +) + +logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s") +logger = logging.getLogger("script_migrate_feature_instance_refs") + + +_DB_NAME = "poweron_graphicaleditor" +_TABLES_AND_PK: List[Tuple[str, str]] = [ + ('"Automation2Workflow"', "id"), + ('"AutoVersion"', "id"), +] + + +def _connect() -> "psycopg2.extensions.connection": + cfg = { + "host": APP_CONFIG.get("DB_HOST", "localhost"), + "port": int(APP_CONFIG.get("DB_PORT", "5432")), + "user": APP_CONFIG.get("DB_USER"), + "password": ( + APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD") + ), + "database": _DB_NAME, + } + if not cfg["user"] or not cfg["password"]: + raise SystemExit("DB_USER and DB_PASSWORD/DB_PASSWORD_SECRET must be set") + return psycopg2.connect(**cfg) + + +def _loadGraph(value: Any) -> Dict[str, Any]: + """psycopg2 returns JSONB as a Python dict, but legacy data may be a JSON string.""" + if isinstance(value, dict): + return value + if isinstance(value, (bytes, bytearray)): + value = value.decode("utf-8", errors="replace") + if isinstance(value, str) and value.strip(): + try: + return json.loads(value) + except json.JSONDecodeError: + return {} + return {} + + +def _countMigrations(before: Dict[str, Any], after: Dict[str, Any]) -> int: + """Count how many ``featureInstanceId`` values were rewritten.""" + if before == after: + return 0 + bnodes = before.get("nodes") if isinstance(before, dict) else None + anodes = after.get("nodes") if isinstance(after, dict) else None + if not isinstance(bnodes, list) or not isinstance(anodes, list): + return 0 + count = 0 + for bn, an in zip(bnodes, anodes): + bp = (bn.get("parameters") or {}) if isinstance(bn, dict) else {} + ap = (an.get("parameters") or {}) if isinstance(an, dict) else {} + if bp.get("featureInstanceId") != ap.get("featureInstanceId"): + count += 1 + return count + + +def _migrateOneTable( + conn, + table: str, + pk: str, + *, + dryRun: bool, +) -> Dict[str, int]: + """Process one table; returns counts dict.""" + counts = {"scanned": 0, "rowsChanged": 0, "fieldsRewritten": 0} + with conn.cursor(cursor_factory=RealDictCursor) as cur: + cur.execute(f'SELECT {pk} AS pk, "graph" AS graph FROM {table}') + rows: Iterable[Dict[str, Any]] = cur.fetchall() + for row in rows: + counts["scanned"] += 1 + before = _loadGraph(row.get("graph")) + if not before: + continue + after = materializeFeatureInstanceRefs(before) + if before == after: + continue + rewritten = _countMigrations(before, after) + if rewritten == 0: + continue + counts["rowsChanged"] += 1 + counts["fieldsRewritten"] += rewritten + logger.info( + "%s id=%s: %d featureInstanceId value(s) %s", + table, + row["pk"], + rewritten, + "would be migrated [dry-run]" if dryRun else "migrated", + ) + if not dryRun: + with conn.cursor() as updCur: + updCur.execute( + f'UPDATE {table} SET "graph" = %s WHERE {pk} = %s', + (Json(after), row["pk"]), + ) + if not dryRun: + conn.commit() + return counts + + +def migrate(dryRun: bool = False) -> Dict[str, Dict[str, int]]: + """Walk all tracked tables and migrate. Returns per-table counts.""" + summary: Dict[str, Dict[str, int]] = {} + conn = _connect() + try: + for table, pk in _TABLES_AND_PK: + summary[table] = _migrateOneTable(conn, table, pk, dryRun=dryRun) + finally: + conn.close() + return summary + + +def main() -> int: + parser = argparse.ArgumentParser( + description="Persist materializeFeatureInstanceRefs into stored workflow graphs." + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Report what would be migrated without writing back.", + ) + args = parser.parse_args() + + logger.info( + "Starting featureInstanceRef DB migration (dry-run=%s, db=%s)", + args.dry_run, + _DB_NAME, + ) + summary = migrate(dryRun=args.dry_run) + totalRows = sum(s["rowsChanged"] for s in summary.values()) + totalFields = sum(s["fieldsRewritten"] for s in summary.values()) + for table, counts in summary.items(): + logger.info( + "%s: scanned=%d rowsChanged=%d fieldsRewritten=%d", + table, + counts["scanned"], + counts["rowsChanged"], + counts["fieldsRewritten"], + ) + logger.info( + "%s: %d row(s) %s, %d featureInstanceId value(s) total.", + "Dry-run summary" if args.dry_run else "Migration summary", + totalRows, + "would be updated" if args.dry_run else "updated", + totalFields, + ) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tests/integration/automation2/__init__.py b/tests/integration/automation2/__init__.py new file mode 100644 index 00000000..d30846a4 --- /dev/null +++ b/tests/integration/automation2/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2025 Patrick Motsch +"""Integration tests for automation2 typed bindings (Phase-5 Schicht-4).""" diff --git a/tests/integration/automation2/test_pick_not_push_migration_v2.py b/tests/integration/automation2/test_pick_not_push_migration_v2.py new file mode 100644 index 00000000..9b98e0ec --- /dev/null +++ b/tests/integration/automation2/test_pick_not_push_migration_v2.py @@ -0,0 +1,189 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Phase-5 Schicht-4 integration test (T11): the typed-bindings pipeline must +produce identical action-call parameters whether a workflow stores +``featureInstanceId`` as a legacy raw UUID or as a typed +``FeatureInstanceRef`` envelope. + +The pipeline under test:: + + saved graph + -> materializeFeatureInstanceRefs (Phase-5, this test) + -> materializeConnectionRefs (existing pick-not-push helper) + -> resolveParameterReferences (typed bindings + envelope unwrap) + -> action params (what the action implementation would receive) + +This is the integration counterpart to the focused unit tests in +``tests/unit/workflows/test_featureInstanceRefMigration.py``. + +Plan: ``wiki/c-work/1-plan/2026-04-typed-action-architecture.md``. +""" +from __future__ import annotations + +import copy +from typing import Any, Dict + +import pytest + +from modules.workflows.automation2.featureInstanceRefMigration import ( + materializeFeatureInstanceRefs, +) +from modules.workflows.automation2.graphUtils import resolveParameterReferences +from modules.workflows.automation2.pickNotPushMigration import materializeConnectionRefs + + +_TRUSTEE_INSTANCE_UUID = "f1e2d3c4-b5a6-7890-1234-567890abcdef" + + +def _resolveActionParams(graph: Dict[str, Any], nodeId: str) -> Dict[str, Any]: + """Apply the full Schicht-4 pipeline and return the resolved action params + that ``ActionNodeExecutor`` would forward to ``ActionExecutor.executeAction``.""" + g = materializeFeatureInstanceRefs(graph) + g = materializeConnectionRefs(g) + targetNode = next(n for n in g["nodes"] if n["id"] == nodeId) + rawParams = dict(targetNode.get("parameters") or {}) + return resolveParameterReferences(rawParams, nodeOutputs={}) + + +def _legacyTrusteeGraph() -> Dict[str, Any]: + """Trustee Spesenbelege-shape graph with raw UUIDs (pre-migration).""" + return { + "nodes": [ + {"id": "n1", "type": "trigger.manual", "parameters": {}}, + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": { + "featureInstanceId": _TRUSTEE_INSTANCE_UUID, + "prompt": "extract expenses", + }, + }, + { + "id": "n6", + "type": "trustee.processDocuments", + "parameters": { + "featureInstanceId": _TRUSTEE_INSTANCE_UUID, + "documentList": { + "type": "ref", + "nodeId": "n5", + "path": ["documents"], + }, + }, + }, + { + "id": "n7", + "type": "trustee.syncToAccounting", + "parameters": { + "featureInstanceId": _TRUSTEE_INSTANCE_UUID, + "documentList": { + "type": "ref", + "nodeId": "n6", + "path": ["documents"], + }, + }, + }, + ], + "connections": [ + {"source": "n1", "target": "n5"}, + {"source": "n5", "target": "n6"}, + {"source": "n6", "target": "n7"}, + ], + } + + +def _migratedTrusteeGraph() -> Dict[str, Any]: + """The same graph but already in the migrated (typed envelope) shape.""" + g = _legacyTrusteeGraph() + envelope = { + "$type": "FeatureInstanceRef", + "id": _TRUSTEE_INSTANCE_UUID, + "featureCode": "trustee", + } + for node in g["nodes"]: + if node.get("type", "").startswith("trustee."): + node["parameters"]["featureInstanceId"] = copy.deepcopy(envelope) + return g + + +# --------------------------------------------------------------------------- +# Round-trip: legacy + migrated graphs produce identical action params +# --------------------------------------------------------------------------- + + +class TestTrusteeBindingsPipeline: + @pytest.mark.parametrize("nodeId", ["n5", "n6", "n7"]) + def test_legacyAndMigratedGraphsResolveToSameFeatureInstanceId(self, nodeId): + legacyParams = _resolveActionParams(_legacyTrusteeGraph(), nodeId) + migratedParams = _resolveActionParams(_migratedTrusteeGraph(), nodeId) + assert legacyParams["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID + assert migratedParams["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID + assert legacyParams == migratedParams + + def test_legacyGraphIsConvertedToTypedEnvelopeInPlaceOfRawUuid(self): + legacy = _legacyTrusteeGraph() + migrated = materializeFeatureInstanceRefs(legacy) + for node in migrated["nodes"]: + if not node.get("type", "").startswith("trustee."): + continue + param = node["parameters"]["featureInstanceId"] + assert isinstance(param, dict), f"node {node['id']} not migrated" + assert param["$type"] == "FeatureInstanceRef" + assert param["id"] == _TRUSTEE_INSTANCE_UUID + assert param["featureCode"] == "trustee" + + def test_migrationIsIdempotentAcrossPipeline(self): + once = materializeFeatureInstanceRefs(_legacyTrusteeGraph()) + twice = materializeFeatureInstanceRefs(once) + assert once == twice + + def test_otherParamsArePreservedAcrossMigration(self): + legacy = _legacyTrusteeGraph() + migrated = materializeFeatureInstanceRefs(legacy) + n5 = next(n for n in migrated["nodes"] if n["id"] == "n5") + assert n5["parameters"]["prompt"] == "extract expenses" + n6 = next(n for n in migrated["nodes"] if n["id"] == "n6") + # documentList DataRef must survive untouched (only the + # featureInstanceId key is rewritten). + assert n6["parameters"]["documentList"] == { + "type": "ref", + "nodeId": "n5", + "path": ["documents"], + } + + +# --------------------------------------------------------------------------- +# Cross-feature: same migration handles redmine / clickup / sharepoint +# --------------------------------------------------------------------------- + + +class TestCrossFeatureMigration: + @pytest.mark.parametrize( + "nodeType,expectedCode", + [ + ("redmine.createIssue", "redmine"), + ("clickup.createTask", "clickup"), + ("sharepoint.listFiles", "sharepoint"), + ], + ) + def test_nonTrusteeNodesAreMigratedWithCorrectFeatureCode( + self, nodeType, expectedCode + ): + graph = { + "nodes": [ + { + "id": "n", + "type": nodeType, + "parameters": {"featureInstanceId": "uuid-x"}, + } + ] + } + out = materializeFeatureInstanceRefs(graph) + env = out["nodes"][0]["parameters"]["featureInstanceId"] + assert env == { + "$type": "FeatureInstanceRef", + "id": "uuid-x", + "featureCode": expectedCode, + } + # And the resolver still hands back the raw UUID for legacy actions. + resolved = resolveParameterReferences(env, nodeOutputs={}) + assert resolved == "uuid-x" diff --git a/tests/integration/trustee/__init__.py b/tests/integration/trustee/__init__.py new file mode 100644 index 00000000..d02d6efc --- /dev/null +++ b/tests/integration/trustee/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +# +# Trustee feature integration tests. diff --git a/tests/integration/trustee/test_spesenbelege_workflow_e2e.py b/tests/integration/trustee/test_spesenbelege_workflow_e2e.py new file mode 100644 index 00000000..a1143063 --- /dev/null +++ b/tests/integration/trustee/test_spesenbelege_workflow_e2e.py @@ -0,0 +1,474 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Plan #2 Track A2 (T4): Trustee Spesenbelege Live-E2E Integration-Test. + +Runs the canonical Trustee Spesenbelege chain end-to-end through +``executeGraph``:: + + trigger.manual + -> trustee.processDocuments (real action) + -> trustee.syncToAccounting (real action) + +with: + +* an in-memory **TrusteeInterface** fake (records createDocument / + createPosition / updatePosition calls and assigns deterministic IDs), +* an in-memory **AccountingBridge** fake (records pushBatchToAccounting + calls and returns one success result per positionId), +* a literal upstream ``documentList`` (no AI / SharePoint involved — the + extraction step is replaced by a canned ActionDocument list so this + test focuses on the bindings + action layer, exactly as the Track A2 + plan requires: "Mock SharePoint + AI + Trustee-DB, echtes + processDocuments + syncToAccounting"). + +The test exercises the **Schicht-4 typed bindings pipeline** end-to-end: + +* ``featureInstanceId`` is provided as a typed ``FeatureInstanceRef`` + envelope on the producer node and as a raw legacy UUID on the consumer + node — both must reach the action layer as the bare UUID string after + ``materializeFeatureInstanceRefs`` + ``resolveParameterReferences``. +* ``documentList`` on ``trustee.syncToAccounting`` is a ``DataRef`` on + ``processDocuments[documents]`` (Pick-not-Push) — must resolve to the + ActionDocument list produced by ``processDocuments``. + +Plan: ``wiki/c-work/1-plan/2026-04-typed-action-followups.md`` (A2 / T4). +""" +from __future__ import annotations + +import copy +import json +import uuid +from typing import Any, Dict, List, Optional + +import pytest + +from modules.workflows.automation2.executionEngine import executeGraph +from modules.workflows.automation2.runEnvelope import default_run_envelope + + +_TRUSTEE_INSTANCE_UUID = "11111111-2222-3333-4444-555555555555" +_MANDATE_ID = "mandate-zh-001" + + +# --------------------------------------------------------------------------- +# In-memory fakes for the Trustee feature +# --------------------------------------------------------------------------- + + +class _FakeTrusteeDocument: + """Minimal stand-in for ``TrusteeDocument`` with attribute access.""" + + def __init__(self, payload: Dict[str, Any]): + self.id = str(uuid.uuid4()) + for k, v in payload.items(): + setattr(self, k, v) + + def model_dump(self) -> Dict[str, Any]: + return {k: v for k, v in self.__dict__.items()} + + +class _FakeTrusteePosition: + """Minimal stand-in for ``TrusteePosition`` with attribute access.""" + + def __init__(self, payload: Dict[str, Any]): + self.id = str(uuid.uuid4()) + for k, v in payload.items(): + setattr(self, k, v) + + def model_dump(self) -> Dict[str, Any]: + return {k: v for k, v in self.__dict__.items()} + + +class _FakeTrusteeDb: + """Captures ``getRecordset`` calls so processDocuments' bank-match + auto-linking path can be exercised without a real DB.""" + + def __init__(self, positions: List[_FakeTrusteePosition]): + self._positions = positions + self.calls: List[Dict[str, Any]] = [] + + def getRecordset(self, model, recordFilter=None): + self.calls.append({"model": getattr(model, "__name__", str(model)), + "filter": recordFilter}) + return list(self._positions) + + +class _FakeTrusteeInterface: + """In-memory replacement for the live trustee interface.""" + + def __init__(self, mandateId: str, featureInstanceId: str): + self.mandateId = mandateId + self.featureInstanceId = featureInstanceId + self.documents: List[_FakeTrusteeDocument] = [] + self.positions: List[_FakeTrusteePosition] = [] + self.updates: List[Dict[str, Any]] = [] + self.db = _FakeTrusteeDb(self.positions) + + def createDocument(self, payload: Dict[str, Any]) -> _FakeTrusteeDocument: + doc = _FakeTrusteeDocument({ + "fileId": payload.get("fileId"), + "documentName": payload.get("documentName"), + "documentMimeType": payload.get("documentMimeType"), + "sourceType": payload.get("sourceType"), + "documentType": payload.get("documentType"), + "mandateId": self.mandateId, + "featureInstanceId": self.featureInstanceId, + }) + self.documents.append(doc) + return doc + + def createPosition(self, payload: Dict[str, Any]) -> _FakeTrusteePosition: + pos = _FakeTrusteePosition({**payload}) + self.positions.append(pos) + return pos + + def updatePosition(self, positionId: str, patch: Dict[str, Any]) -> Optional[_FakeTrusteePosition]: + self.updates.append({"id": positionId, "patch": dict(patch)}) + for pos in self.positions: + if getattr(pos, "id", None) == positionId: + for k, v in patch.items(): + setattr(pos, k, v) + return pos + return None + + +class _FakeAccountingResult: + def __init__(self, success: bool = True, errorMessage: Optional[str] = None): + self.success = success + self.errorMessage = errorMessage + + +class _FakeAccountingBridge: + """Records pushBatchToAccounting invocations and returns one success + per positionId.""" + + pushBatchCalls: List[Dict[str, Any]] = [] + + def __init__(self, trusteeInterface): + self.trusteeInterface = trusteeInterface + + async def pushBatchToAccounting(self, featureInstanceId: str, + positionIds: List[str]): + type(self).pushBatchCalls.append({ + "featureInstanceId": featureInstanceId, + "positionIds": list(positionIds), + }) + return [_FakeAccountingResult(success=True) for _ in positionIds] + + +# --------------------------------------------------------------------------- +# Test fixtures: mock services + module-level patches +# --------------------------------------------------------------------------- + + +@pytest.fixture +def trusteeInterface(): + return _FakeTrusteeInterface(_MANDATE_ID, _TRUSTEE_INSTANCE_UUID) + + +@pytest.fixture(autouse=True) +def resetAccountingBridgeCalls(): + _FakeAccountingBridge.pushBatchCalls = [] + yield + _FakeAccountingBridge.pushBatchCalls = [] + + +@pytest.fixture +def patchTrustee(monkeypatch, trusteeInterface): + """Patches ``getInterface`` + ``AccountingBridge`` in both action + modules so the real action code runs against the in-memory fakes.""" + from modules.workflows.methods.methodTrustee.actions import ( + processDocuments as _procMod, + syncToAccounting as _syncMod, + ) + from modules.features.trustee import ( + interfaceFeatureTrustee as _ifaceMod, + ) + from modules.features.trustee.accounting import accountingBridge as _bridgeMod + + def _fakeGetInterface(*_args, **_kwargs): + return trusteeInterface + + monkeypatch.setattr(_ifaceMod, "getInterface", _fakeGetInterface, raising=True) + monkeypatch.setattr(_bridgeMod, "AccountingBridge", _FakeAccountingBridge, raising=True) + return trusteeInterface + + +def _services(): + """Minimal services container for executeGraph. + + The ``ActionExecutor`` only needs ``services`` to be passed through to + the trustee actions. The trustee actions only touch + ``services.mandateId`` and ``services.featureInstanceId`` directly + (everything else is provided via ``parameters``); ``services.chat`` is + looked up but only used as a fallback that we do not exercise here. + """ + class _S: + mandateId = _MANDATE_ID + featureInstanceId = _TRUSTEE_INSTANCE_UUID + user = None + chat = None + return _S() + + +# --------------------------------------------------------------------------- +# Canned upstream extraction result +# --------------------------------------------------------------------------- + + +def _expenseReceiptExtraction() -> Dict[str, Any]: + return { + "documentType": "EXPENSE_RECEIPT", + "fileId": "file-001", + "fileName": "tankbeleg.pdf", + "extractedData": [ + { + "documentType": "expense_receipt", + "valuta": "2026-04-12", + "transactionDateTime": 1744675200, + "company": "Migrolino Tankstelle Zürich AG", + "desc": "Tankfüllung Bleifrei 95, 42.30 L à 1.799 CHF/L", + "bookingCurrency": "CHF", + "bookingAmount": "76.10", + "originalCurrency": "CHF", + "originalAmount": "76.10", + "vatPercentage": "8.1", + "vatAmount": "5.71", + "debitAccountNumber": "6200 Fahrzeugaufwand", + "creditAccountNumber": "1020 Bank", + "tags": ["fuel", "vehicle"], + "bookingReference": "RB-2026-04-12-001", + } + ], + } + + +def _bankDocumentExtraction() -> Dict[str, Any]: + return { + "documentType": "BANK_DOCUMENT", + "fileId": "file-002", + "fileName": "kontoauszug_april.pdf", + "extractedData": [ + { + "documentType": "bank_document", + "valuta": "2026-04-13", + "company": "Migrolino Tankstelle Zürich AG", + "desc": "Lastschrift Tankfüllung 12.04.2026, Ref RB-2026-04-12-001", + "bookingCurrency": "CHF", + "bookingAmount": "-76.10", + "creditAccountNumber": "1020 Bank", + "bookingReference": "RB-2026-04-12-001", + } + ], + } + + +def _cannedExtractionDocuments() -> List[Dict[str, Any]]: + """Two ActionDocument-shaped dicts: one expense receipt + one bank + document. processDocuments' ``_resolveDocumentList`` accepts this + shape directly when ``documentName`` / ``documentData`` are present.""" + return [ + { + "documentName": "tankbeleg.json", + "documentData": json.dumps(_expenseReceiptExtraction()), + "mimeType": "application/json", + }, + { + "documentName": "kontoauszug_april.json", + "documentData": json.dumps(_bankDocumentExtraction()), + "mimeType": "application/json", + }, + ] + + +# --------------------------------------------------------------------------- +# Graph builder +# --------------------------------------------------------------------------- + + +def _buildGraph(featureInstanceIdOnProcess, featureInstanceIdOnSync) -> Dict[str, Any]: + """Trustee Spesenbelege chain. + + The ``trigger.manual`` node emits an ``ActionResult`` port, which is + not assignable into ``trustee.processDocuments[in:0]`` (accepts only + ``DocumentList`` / ``Transit``). Production graphs solve this by + going through ``trustee.extractFromFiles`` (DocumentList output) + first; this test bypasses that step (we ship a literal canned + extraction list instead of running AI/SharePoint), so we simply + leave ``trigger.manual`` orphaned and start the data plane at + ``process``.""" + return { + "nodes": [ + {"id": "trigger", "type": "trigger.manual", "parameters": {}}, + { + "id": "process", + "type": "trustee.processDocuments", + "parameters": { + "featureInstanceId": featureInstanceIdOnProcess, + "documentList": _cannedExtractionDocuments(), + }, + }, + { + "id": "sync", + "type": "trustee.syncToAccounting", + "parameters": { + "featureInstanceId": featureInstanceIdOnSync, + "documentList": { + "type": "ref", + "nodeId": "process", + "path": ["documents"], + }, + }, + }, + ], + "connections": [ + {"source": "process", "target": "sync"}, + ], + } + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestSpesenbelegeEndToEnd: + """End-to-end Trustee Spesenbelege graph through executeGraph.""" + + @pytest.mark.asyncio + async def test_processAndSyncWritesDocumentsPositionsAndAccountingPush( + self, patchTrustee + ): + """Happy-path: 1 expense receipt + 1 bank document. + + Asserts at all three layers: bindings, action results, and side + effects on the (faked) trustee + accounting infrastructure.""" + trustee = patchTrustee + envelope = { + "$type": "FeatureInstanceRef", + "id": _TRUSTEE_INSTANCE_UUID, + "featureCode": "trustee", + } + graph = _buildGraph( + featureInstanceIdOnProcess=copy.deepcopy(envelope), + featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID, + ) + runEnvelope = default_run_envelope("manual", payload={}) + + result = await executeGraph( + graph, + services=_services(), + run_envelope=runEnvelope, + userId="test-user", + mandateId=_MANDATE_ID, + instanceId=_TRUSTEE_INSTANCE_UUID, + ) + + assert result.get("success") is True, result + + # --- Layer 1: bindings — both nodes must see the unwrapped UUID --- + assert len(trustee.documents) == 2 + for doc in trustee.documents: + assert doc.featureInstanceId == _TRUSTEE_INSTANCE_UUID + + # --- Layer 2: action results ----------------------------------- + nodeOutputs = result["nodeOutputs"] + processOut = nodeOutputs["process"] + assert processOut.get("success") is True + assert processOut.get("error") in (None, "", False) + assert isinstance(processOut.get("documents"), list) + assert len(processOut["documents"]) == 1 + processedDoc = processOut["documents"][0] + assert processedDoc.get("documentName") == "process_documents_result.json" + payload = json.loads(processedDoc["documentData"]) + assert len(payload["documentIds"]) == 2 + assert len(payload["positionIds"]) == 2 + # Bank document auto-link found the matching expense (same + # bookingReference RB-2026-04-12-001), so exactly one position + # was matched. + assert len(payload["autoMatchedPositionIds"]) == 1 + + syncOut = nodeOutputs["sync"] + assert syncOut.get("success") is True + assert syncOut.get("error") in (None, "", False) + syncDoc = syncOut["documents"][0] + syncSummary = json.loads(syncDoc["documentData"]) + assert syncSummary["pushed"] == 2 + assert syncSummary["total"] == 2 + assert all(r["success"] is True for r in syncSummary["results"]) + + # --- Layer 3: side effects ------------------------------------- + assert len(trustee.positions) == 2 + # Bank document update propagated through updatePosition + assert len(trustee.updates) == 1 + assert "bankDocumentId" in trustee.updates[0]["patch"] + + # Accounting bridge was called once with the resolved positionIds + # and the unwrapped UUID, NOT the typed envelope. + assert len(_FakeAccountingBridge.pushBatchCalls) == 1 + call = _FakeAccountingBridge.pushBatchCalls[0] + assert call["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID + assert sorted(call["positionIds"]) == sorted(payload["positionIds"]) + + @pytest.mark.asyncio + async def test_legacyRawUuidFeatureInstanceIdAlsoWorks(self, patchTrustee): + """A pre-Schicht-4 graph storing ``featureInstanceId`` as a raw + UUID must produce the same end-to-end behaviour after the + runtime ``materializeFeatureInstanceRefs`` migration.""" + trustee = patchTrustee + graph = _buildGraph( + featureInstanceIdOnProcess=_TRUSTEE_INSTANCE_UUID, + featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID, + ) + result = await executeGraph( + graph, + services=_services(), + run_envelope=default_run_envelope("manual", payload={}), + userId="test-user", + mandateId=_MANDATE_ID, + instanceId=_TRUSTEE_INSTANCE_UUID, + ) + assert result.get("success") is True, result + assert len(trustee.documents) == 2 + assert len(trustee.positions) == 2 + assert _FakeAccountingBridge.pushBatchCalls[0]["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID + + @pytest.mark.asyncio + async def test_emptyExtractionListIsHandledGracefully(self, patchTrustee): + """When processDocuments receives no documents, syncToAccounting + must surface a graceful "No positionIds in document" message and + never call the accounting bridge.""" + trustee = patchTrustee + graph = _buildGraph( + featureInstanceIdOnProcess=_TRUSTEE_INSTANCE_UUID, + featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID, + ) + # Replace the canned documents with a no-records extraction. + emptyExtraction = { + "documentType": "EXPENSE_RECEIPT", + "fileId": "file-empty", + "fileName": "empty.json", + "extractedData": [], + } + graph["nodes"][1]["parameters"]["documentList"] = [{ + "documentName": "empty.json", + "documentData": json.dumps(emptyExtraction), + "mimeType": "application/json", + }] + result = await executeGraph( + graph, + services=_services(), + run_envelope=default_run_envelope("manual", payload={}), + userId="test-user", + mandateId=_MANDATE_ID, + instanceId=_TRUSTEE_INSTANCE_UUID, + ) + assert result.get("success") is True, result + assert len(trustee.documents) == 0 + assert len(trustee.positions) == 0 + syncSummary = json.loads( + result["nodeOutputs"]["sync"]["documents"][0]["documentData"] + ) + assert syncSummary["pushed"] == 0 + assert _FakeAccountingBridge.pushBatchCalls == [] diff --git a/tests/unit/graphicalEditor/test_action_node_connection_provenance.py b/tests/unit/graphicalEditor/test_action_node_connection_provenance.py new file mode 100644 index 00000000..b04dd594 --- /dev/null +++ b/tests/unit/graphicalEditor/test_action_node_connection_provenance.py @@ -0,0 +1,9 @@ +# Copyright (c) 2025 Patrick Motsch +from modules.workflows.automation2.executors.actionNodeExecutor import _buildConnectionRefDict + + +def test_build_connection_ref_dict_from_logical_string(): + d = _buildConnectionRefDict("connection:msft:user@example.com", None, None) + assert d is not None + assert d["authority"] == "msft" + assert d["label"] == "connection:msft:user@example.com" diff --git a/tests/unit/graphicalEditor/test_adapter_validator.py b/tests/unit/graphicalEditor/test_adapter_validator.py new file mode 100644 index 00000000..5f8091fd --- /dev/null +++ b/tests/unit/graphicalEditor/test_adapter_validator.py @@ -0,0 +1,352 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Tests for the Schicht-3 Adapter Validator (Phase 3). + +Validates the 5 drift rules between Editor-Node Adapters and the +Schicht-2 Actions they bind to: + + Rule 1: every userParams.actionArg exists in the Action + Rule 2: every required Action arg is covered (userParams or contextParams) + Rule 3: every Action parameter type exists in PORT_TYPE_CATALOG + Rule 4: Action outputType exists in PORT_TYPE_CATALOG + Rule 5: every Action with dynamicMode=False has an Editor adapter + +Plus a healthy-state test that runs the validator against the live +STATIC_NODE_TYPES + every shipping Method instance, and asserts no drift. +""" +from __future__ import annotations + +import importlib +import sys +import types + +import pytest + +from modules.datamodels.datamodelWorkflowActions import ( + WorkflowActionDefinition, + WorkflowActionParameter, +) +from modules.features.graphicalEditor.adapterValidator import ( + AdapterValidationReport, + _buildActionsRegistryFromMethods, + _formatAdapterReport, + _validateAdapterAgainstAction, + _validateAllAdapters, +) +from modules.features.graphicalEditor.nodeAdapter import ( + NodeAdapter, + UserParamMapping, +) +from modules.shared.frontendTypes import FrontendType + + +# --------------------------------------------------------------------------- +# Test factories +# --------------------------------------------------------------------------- + +def _makeParam(typeStr: str, *, required: bool = False, **kwargs) -> WorkflowActionParameter: + defaults = { + "name": "p", + "type": typeStr, + "frontendType": FrontendType.TEXT, + "required": required, + "description": "", + } + defaults.update(kwargs) + return WorkflowActionParameter(**defaults) + + +def _makeAction( + actionId: str = "trustee.processDocuments", + parameters: dict | None = None, + outputType: str = "TrusteeProcessResult", + dynamicMode: bool = False, +) -> WorkflowActionDefinition: + return WorkflowActionDefinition( + actionId=actionId, + description="t", + parameters=parameters or {}, + outputType=outputType, + dynamicMode=dynamicMode, + execute=lambda *a, **k: None, + ) + + +def _makeAdapter( + *, + userArgs: list[str] | None = None, + contextArgs: list[str] | None = None, +) -> NodeAdapter: + return NodeAdapter( + nodeId="trustee.processDocuments", + bindsAction="trustee.processDocuments", + category="trustee", + label="Verarbeiten", + description="...", + userParams=[UserParamMapping(actionArg=a) for a in (userArgs or [])], + contextParams={k: f"$session.{k}" for k in (contextArgs or [])}, + ) + + +# --------------------------------------------------------------------------- +# Per-rule unit tests +# --------------------------------------------------------------------------- + +class TestRule1_UserParamArgExistsInAction: + def test_okWhenAllArgsExist(self): + action = _makeAction(parameters={ + "documentList": _makeParam("DocumentList", required=True), + "featureInstanceId": _makeParam("FeatureInstanceRef", required=True), + }) + adapter = _makeAdapter(userArgs=["documentList", "featureInstanceId"]) + report = _validateAdapterAgainstAction(adapter, action) + assert report.isHealthy, report.errors + + def test_failsWhenAdapterReferencesUnknownArg(self): + action = _makeAction(parameters={"documentList": _makeParam("DocumentList", required=True), + "featureInstanceId": _makeParam("FeatureInstanceRef", required=True)}) + adapter = _makeAdapter(userArgs=["documentList", "featureInstanceId", "ghostArg"]) + report = _validateAdapterAgainstAction(adapter, action) + assert any("ghostArg" in e for e in report.errors) + + +class TestRule2_RequiredArgsCovered: + def test_failsWhenRequiredArgMissing(self): + action = _makeAction(parameters={ + "documentList": _makeParam("DocumentList", required=True), + "featureInstanceId": _makeParam("FeatureInstanceRef", required=True), + }) + adapter = _makeAdapter(userArgs=["documentList"]) # missing featureInstanceId + report = _validateAdapterAgainstAction(adapter, action) + assert any("featureInstanceId" in e for e in report.errors) + + def test_okWhenRequiredArgInContext(self): + action = _makeAction(parameters={ + "documentList": _makeParam("DocumentList", required=True), + "mandateId": _makeParam("str", required=True), + }) + adapter = _makeAdapter(userArgs=["documentList"], contextArgs=["mandateId"]) + report = _validateAdapterAgainstAction(adapter, action) + assert report.isHealthy, report.errors + + def test_optionalArgMayBeUnset(self): + action = _makeAction(parameters={ + "documentList": _makeParam("DocumentList", required=True), + "prompt": _makeParam("str", required=False), + }) + adapter = _makeAdapter(userArgs=["documentList"]) + report = _validateAdapterAgainstAction(adapter, action) + assert report.isHealthy, report.errors + + +class TestRule3_ActionParamTypesInCatalog: + def test_failsForUnknownType(self): + action = _makeAction(parameters={"documentList": _makeParam("Foobar", required=True)}) + adapter = _makeAdapter(userArgs=["documentList"]) + report = _validateAdapterAgainstAction(adapter, action) + assert any("Foobar" in e for e in report.errors) + + +class TestRule4_OutputTypeInCatalog: + def test_failsForUnknownOutputType(self): + action = _makeAction(outputType="Nonsense") + adapter = _makeAdapter() + report = _validateAdapterAgainstAction(adapter, action) + assert any("Nonsense" in e for e in report.errors) + + def test_okForActionResult(self): + action = _makeAction(outputType="ActionResult") + adapter = _makeAdapter() + report = _validateAdapterAgainstAction(adapter, action) + assert report.isHealthy, report.errors + + +class TestRule5_OrphanActionsAcrossRegistry: + def test_warnsForActionWithoutAdapter(self): + action = _makeAction(actionId="trustee.queryData") + registry = {"trustee": {"queryData": action}} + report = _validateAllAdapters([], registry) + assert any("trustee.queryData" in w for w in report.warnings) + + def test_dynamicModeActionDoesNotWarn(self): + action = _makeAction(actionId="trustee.queryData", dynamicMode=True) + registry = {"trustee": {"queryData": action}} + report = _validateAllAdapters([], registry) + assert report.warnings == [] + + +# --------------------------------------------------------------------------- +# Aggregator + report formatter +# --------------------------------------------------------------------------- + +class TestValidateAllAdapters: + def test_passesWithFullCoverage(self): + node = { + "id": "trustee.processDocuments", + "category": "trustee", + "label": "X", "description": "Y", + "parameters": [ + {"name": "documentList", "type": "DocumentList"}, + {"name": "featureInstanceId", "type": "FeatureInstanceRef"}, + ], + "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList"]}}, + "_method": "trustee", "_action": "processDocuments", + } + action = _makeAction(parameters={ + "documentList": _makeParam("DocumentList", required=True), + "featureInstanceId": _makeParam("FeatureInstanceRef", required=True), + }) + registry = {"trustee": {"processDocuments": action}} + report = _validateAllAdapters([node], registry) + assert report.isHealthy, report.errors + + def test_reportsMissingAction(self): + node = { + "id": "trustee.processDocuments", + "_method": "trustee", "_action": "ghostAction", + "parameters": [], "inputs": 0, + } + report = _validateAllAdapters([node], {"trustee": {}}) + assert any("ghostAction" in e for e in report.errors) + + +class TestFormatReport: + def test_healthy(self): + out = _formatAdapterReport(AdapterValidationReport()) + assert "healthy" in out.lower() + + def test_withErrorsAndWarnings(self): + rep = AdapterValidationReport(errors=["e1"], warnings=["w1"]) + out = _formatAdapterReport(rep) + assert "ERROR" in out and "WARN" in out + + +# --------------------------------------------------------------------------- +# Healthy-state: live methods + STATIC_NODE_TYPES +# --------------------------------------------------------------------------- + +class _NullRbac: + def getUserPermissions(self, **kwargs): + class _P: + view = read = create = update = delete = True + return _P() + + +class _StubServices: + def __init__(self): + self.rbac = _NullRbac() + self.user = type("U", (), {"id": "test-user", "roleLabels": []})() + self.mandateId = None + self.featureInstanceId = None + + +def _ensureOptionalDeps(): + class _AnyAttrModule(types.ModuleType): + def __getattr__(self, name): + return type(name, (), {}) + + for name in ("aiohttp",): + if name not in sys.modules: + sys.modules[name] = _AnyAttrModule(name) + + +_LIVE_METHODS = [ + ("modules.workflows.methods.methodTrustee.methodTrustee", "MethodTrustee", "trustee"), + ("modules.workflows.methods.methodRedmine.methodRedmine", "MethodRedmine", "redmine"), + ("modules.workflows.methods.methodSharepoint.methodSharepoint", "MethodSharepoint", "sharepoint"), + ("modules.workflows.methods.methodOutlook.methodOutlook", "MethodOutlook", "outlook"), + ("modules.workflows.methods.methodAi.methodAi", "MethodAi", "ai"), + ("modules.workflows.methods.methodClickup.methodClickup", "MethodClickup", "clickup"), + ("modules.workflows.methods.methodFile.methodFile", "MethodFile", "file"), + ("modules.workflows.methods.methodContext.methodContext", "MethodContext", "context"), +] + + +def _instantiateLiveMethods() -> dict: + """Best-effort instantiation of every shipping Method with stub services. + + Returns {shortName: instance}. Methods that can't be instantiated in the + test env (missing dependencies) are skipped silently — Phase 2 has its + own healthy-state test that catches per-method drift. + """ + _ensureOptionalDeps() + out: dict = {} + for modulePath, className, shortName in _LIVE_METHODS: + try: + module = importlib.import_module(modulePath) + cls = getattr(module, className, None) + if cls is None: + continue + instance = cls(_StubServices()) + out[shortName] = instance + except Exception: + continue + return out + + +# Snapshot of pre-Phase-3 drift discovered when the validator was first run +# against the live STATIC_NODE_TYPES + live Method registry. +# +# After Phase-4 Adapter-Drift-Cleanup (Plan #4) this set is intentionally +# empty: every Editor adapter must align cleanly with its Schicht-2 Action, +# and the regression net below now uses `assert report.errors == []`. +# +# History of removed drifts: +# wiki/c-work/4-done/2026-04-adapter-drift-cleanup.md +# +# Rule: this set MUST stay empty. New drift => fix the adapter or the action, +# not the snapshot. +_KNOWN_ADAPTER_DRIFTS: frozenset[tuple[str, str]] = frozenset() + + +def _extractDriftKey(errorMessage: str) -> tuple[str, str] | None: + """Parse a validator error message into a (nodeId, fieldName) drift key. + + Recognises both rule-1 ("userParams.actionArg 'X' does not exist…") and + rule-2 ("required action arg 'X' is neither in userParams…") patterns. + """ + import re + m = re.search( + r"adapter '([^']+)' bindsAction '[^']+': userParams\.actionArg '([^']+)'", + errorMessage, + ) + if m: + return (m.group(1), m.group(2)) + m = re.search( + r"adapter '([^']+)' bindsAction '[^']+': required action arg '([^']+)'", + errorMessage, + ) + if m: + return (m.group(1), m.group(2)) + return None + + +def test_staticNodesHaveNoDriftAgainstLiveMethods(): + """Strict regression: every Editor adapter in STATIC_NODE_TYPES must align + with its Schicht-2 Action signature. + + Phase 3 shipped the validator with a tracked drift snapshot + (`_KNOWN_ADAPTER_DRIFTS`); Phase 4 cleaned the backlog so the snapshot is + empty and we now demand zero errors. Any new drift fails immediately — + fix the adapter or the action, never the assertion. + + History: wiki/c-work/4-done/2026-04-adapter-drift-cleanup.md + """ + from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES + + instances = _instantiateLiveMethods() + if not instances: + pytest.skip("no methods could be instantiated in this test env") + + registry = _buildActionsRegistryFromMethods(instances) + report = _validateAllAdapters(list(STATIC_NODE_TYPES), registry) + + assert _KNOWN_ADAPTER_DRIFTS == frozenset(), ( + "_KNOWN_ADAPTER_DRIFTS must stay empty after Phase-4 cleanup. " + "Do not add new entries — fix the drift instead." + ) + assert report.errors == [], ( + "Adapter↔Action drift detected:\n" + "\n".join(report.errors) + ) diff --git a/tests/unit/graphicalEditor/test_node_adapter.py b/tests/unit/graphicalEditor/test_node_adapter.py new file mode 100644 index 00000000..7b24b01a --- /dev/null +++ b/tests/unit/graphicalEditor/test_node_adapter.py @@ -0,0 +1,170 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Tests for the Schicht-3 NodeAdapter projection (Phase 3). + +Covers the pure projection helpers in nodeAdapter.py: + - identifying method-bound vs framework-primitive nodes + - extracting bindsAction + - building UserParamMapping from legacy parameter dicts + - converting inputPorts dict-of-dicts into per-port accepts lists + - end-to-end legacy-node → NodeAdapter projection + +These tests do NOT touch live methods; they verify the projection logic +in isolation so it is robust before the adapterValidator composes with it. +""" +from __future__ import annotations + +import pytest + +from modules.features.graphicalEditor.nodeAdapter import ( + NodeAdapter, + UserParamMapping, + _adapterFromLegacyNode, + _bindsActionFromLegacy, + _extractVisibleWhen, + _isMethodBoundNode, + _projectAllAdapters, + _projectInputAccepts, + _userParamFromLegacyParam, +) + + +def _legacyMethodNode(**overrides): + base = { + "id": "trustee.processDocuments", + "category": "trustee", + "label": "Verarbeiten", + "description": "...", + "parameters": [ + {"name": "documentList", "type": "DocumentList", "required": True, + "frontendType": "dataRef", "description": "Eingabe"}, + {"name": "featureInstanceId", "type": "FeatureInstanceRef", "required": True, + "frontendType": "hidden", "description": "Trustee-Instanz"}, + ], + "inputs": 1, + "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "TrusteeProcessResult"}}, + "meta": {"icon": "mdi-x", "color": "#000", "usesAi": False}, + "_method": "trustee", + "_action": "processDocuments", + } + base.update(overrides) + return base + + +def _primitiveNode(**overrides): + base = { + "id": "flow.loop", + "category": "flow", + "label": "Schleife", + "parameters": [{"name": "items", "type": "string", "required": True}], + "inputs": 1, + "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "executor": "flow", + } + base.update(overrides) + return base + + +class TestIsMethodBound: + def test_methodBoundIsTrue(self): + assert _isMethodBoundNode(_legacyMethodNode()) is True + + def test_primitiveIsFalse(self): + assert _isMethodBoundNode(_primitiveNode()) is False + + @pytest.mark.parametrize("partial", [ + {"_method": "trustee"}, # missing _action + {"_action": "processDocuments"}, # missing _method + {}, + ]) + def test_partialBindingIsFalse(self, partial): + node = _primitiveNode(**partial) + assert _isMethodBoundNode(node) is False + + +class TestBindsActionFromLegacy: + def test_returnsCanonicalFqn(self): + assert _bindsActionFromLegacy(_legacyMethodNode()) == "trustee.processDocuments" + + def test_returnsNoneForPrimitive(self): + assert _bindsActionFromLegacy(_primitiveNode()) is None + + +class TestUserParamFromLegacy: + def test_carriesEditorOverridesOnly(self): + legacy = {"name": "documentList", "type": "DocumentList", "required": True, + "frontendType": "dataRef", "description": "Eingabe", "default": []} + mapping = _userParamFromLegacyParam(legacy) + assert isinstance(mapping, UserParamMapping) + assert mapping.actionArg == "documentList" + assert mapping.uiHint == "dataRef" + assert mapping.description == "Eingabe" + assert mapping.defaultValue == [] + assert mapping.frontendOptions is None + + def test_extractsConditionalVisibility(self): + legacy = { + "name": "filterJson", + "type": "string", + "frontendType": "textarea", + "frontendOptions": {"dependsOn": "mode", "showWhen": ["raw", "aggregate"]}, + } + mapping = _userParamFromLegacyParam(legacy) + assert mapping.visibleWhen == {"actionArg": "mode", "in": ["raw", "aggregate"]} + + +class TestExtractVisibleWhen: + def test_returnsNoneForMissingHint(self): + assert _extractVisibleWhen(None) is None + assert _extractVisibleWhen({}) is None + assert _extractVisibleWhen({"dependsOn": "x"}) is None + + def test_normalizesScalarShowWhen(self): + out = _extractVisibleWhen({"dependsOn": "entity", "showWhen": "tenant"}) + assert out == {"actionArg": "entity", "in": ["tenant"]} + + +class TestProjectInputAccepts: + def test_perPortAcceptsList(self): + node = _legacyMethodNode() + assert _projectInputAccepts(node) == [["DocumentList", "Transit"]] + + def test_emptyForZeroInputs(self): + node = _legacyMethodNode(inputs=0, inputPorts={}) + assert _projectInputAccepts(node) == [] + + def test_handlesStringKeys(self): + node = _legacyMethodNode(inputPorts={"0": {"accepts": ["Transit"]}}) + assert _projectInputAccepts(node) == [["Transit"]] + + def test_missingPortReturnsEmptyList(self): + node = _legacyMethodNode(inputs=2, inputPorts={0: {"accepts": ["Transit"]}}) + assert _projectInputAccepts(node) == [["Transit"], []] + + +class TestAdapterFromLegacyNode: + def test_buildsAdapter(self): + adapter = _adapterFromLegacyNode(_legacyMethodNode()) + assert isinstance(adapter, NodeAdapter) + assert adapter.nodeId == "trustee.processDocuments" + assert adapter.bindsAction == "trustee.processDocuments" + assert adapter.category == "trustee" + assert len(adapter.userParams) == 2 + assert adapter.userParams[0].actionArg == "documentList" + assert adapter.inputAccepts == [["DocumentList", "Transit"]] + assert adapter.contextParams == {} + assert adapter.meta.get("icon") == "mdi-x" + + def test_returnsNoneForPrimitive(self): + assert _adapterFromLegacyNode(_primitiveNode()) is None + + +class TestProjectAllAdapters: + def test_skipsPrimitives(self): + nodes = [_legacyMethodNode(), _primitiveNode()] + out = _projectAllAdapters(nodes) + assert list(out.keys()) == ["trustee.processDocuments"] diff --git a/tests/unit/graphicalEditor/test_portTypes_catalog.py b/tests/unit/graphicalEditor/test_portTypes_catalog.py new file mode 100644 index 00000000..11967376 --- /dev/null +++ b/tests/unit/graphicalEditor/test_portTypes_catalog.py @@ -0,0 +1,257 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Catalog integrity + new Phase-1 schemas +(see wiki/c-work/1-plan/2026-04-typed-action-architecture.md). +""" + +import pytest + +from modules.features.graphicalEditor.portTypes import ( + PORT_TYPE_CATALOG, + PRIMITIVE_TYPES, + PortField, + PortSchema, + _stripContainer, + _validateCatalog, +) + + +# --------------------------------------------------------------------------- +# Validator behaviour +# --------------------------------------------------------------------------- + +def test_catalogIsHealthy(): + """The shipped catalog must validate without errors.""" + errors = _validateCatalog() + assert errors == [], "Catalog has integrity errors:\n - " + "\n - ".join(errors) + + +def test_validatorDetectsUnknownType(monkeypatch): + """Inject a bad schema and ensure it is reported.""" + bad = PortSchema(name="_BadTest", fields=[ + PortField(name="x", type="DoesNotExist"), + ]) + monkeypatch.setitem(PORT_TYPE_CATALOG, "_BadTest", bad) + errors = _validateCatalog() + assert any("DoesNotExist" in e for e in errors) + + +def test_validatorDetectsBadDiscriminatorType(monkeypatch): + bad = PortSchema(name="_BadDisc", fields=[ + PortField(name="x", type="int", discriminator=True), + ]) + monkeypatch.setitem(PORT_TYPE_CATALOG, "_BadDisc", bad) + errors = _validateCatalog() + assert any("discriminator must be 'str'" in e for e in errors) + + +def test_validatorDetectsMultipleDiscriminators(monkeypatch): + bad = PortSchema(name="_DoubleDisc", fields=[ + PortField(name="a", type="str", discriminator=True), + PortField(name="b", type="str", discriminator=True), + ]) + monkeypatch.setitem(PORT_TYPE_CATALOG, "_DoubleDisc", bad) + errors = _validateCatalog() + assert any("max 1 allowed" in e for e in errors) + + +def test_validatorDetectsKeyNameMismatch(monkeypatch): + bad = PortSchema(name="DifferentName", fields=[ + PortField(name="x", type="str"), + ]) + monkeypatch.setitem(PORT_TYPE_CATALOG, "_KeyMismatch", bad) + errors = _validateCatalog() + assert any("does not match schema.name" in e for e in errors) + + +# --------------------------------------------------------------------------- +# _stripContainer helper +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("typeStr,expected", [ + ("str", ["str"]), + ("int", ["int"]), + ("ConnectionRef", ["ConnectionRef"]), + ("List[Document]", ["Document"]), + ("List[ProcessError]", ["ProcessError"]), + ("Dict[str,Any]", ["str", "Any"]), + ("Dict[str,int]", ["str", "int"]), + ("", []), +]) +def test_stripContainer(typeStr, expected): + assert _stripContainer(typeStr) == expected + + +# --------------------------------------------------------------------------- +# Phase-1 new Refs +# --------------------------------------------------------------------------- + +def test_featureInstanceRefHasDiscriminator(): + s = PORT_TYPE_CATALOG["FeatureInstanceRef"] + names = {f.name for f in s.fields} + assert names == {"id", "featureCode", "label", "mandateId"} + discriminators = [f for f in s.fields if f.discriminator] + assert len(discriminators) == 1 + assert discriminators[0].name == "featureCode" + assert discriminators[0].type == "str" + + +def test_connectionRefAuthorityIsDiscriminator(): + s = PORT_TYPE_CATALOG["ConnectionRef"] + discriminators = [f for f in s.fields if f.discriminator] + assert len(discriminators) == 1 + assert discriminators[0].name == "authority" + + +def test_clickUpListRefExists(): + s = PORT_TYPE_CATALOG["ClickUpListRef"] + names = {f.name for f in s.fields} + assert "listId" in names + assert "connection" in names + + +def test_promptTemplateRefExists(): + s = PORT_TYPE_CATALOG["PromptTemplateRef"] + names = {f.name for f in s.fields} + assert "id" in names + + +# --------------------------------------------------------------------------- +# Phase-1 Trustee Result Schemas +# --------------------------------------------------------------------------- + +def test_trusteeRefreshResultStructure(): + s = PORT_TYPE_CATALOG["TrusteeRefreshResult"] + names = {f.name for f in s.fields} + assert {"syncCounts", "oldestBookingDate", "newestBookingDate", + "featureInstance", "errors"}.issubset(names) + + +def test_trusteeProcessResultExposesDocuments(): + s = PORT_TYPE_CATALOG["TrusteeProcessResult"] + docField = next((f for f in s.fields if f.name == "documents"), None) + assert docField is not None + assert docField.type == "List[Document]" + assert docField.required is True + + +def test_trusteeSyncResultHasJournalLines(): + s = PORT_TYPE_CATALOG["TrusteeSyncResult"] + names = {f.name for f in s.fields} + assert "syncedCount" in names + assert "journalLines" in names + + +def test_journalLineHasAccountingFields(): + s = PORT_TYPE_CATALOG["JournalLine"] + names = {f.name for f in s.fields} + for required in ("bookingDate", "account", "amount"): + assert required in names + + +def test_processErrorHasStageAndMessage(): + s = PORT_TYPE_CATALOG["ProcessError"] + names = {f.name for f in s.fields} + assert {"stage", "message"}.issubset(names) + + +# --------------------------------------------------------------------------- +# Phase-1 Redmine Result Schemas +# --------------------------------------------------------------------------- + +def test_redmineTicketHasCoreFields(): + s = PORT_TYPE_CATALOG["RedmineTicket"] + names = {f.name for f in s.fields} + for required in ("id", "subject", "status"): + assert required in names + + +def test_redmineTicketListReferencesTicket(): + s = PORT_TYPE_CATALOG["RedmineTicketList"] + ticketsField = next((f for f in s.fields if f.name == "tickets"), None) + assert ticketsField is not None + assert ticketsField.type == "List[RedmineTicket]" + + +def test_redmineStatsExists(): + s = PORT_TYPE_CATALOG["RedmineStats"] + names = {f.name for f in s.fields} + assert "kpis" in names + + +# --------------------------------------------------------------------------- +# Phase-1 Expressions / Misc +# --------------------------------------------------------------------------- + +def test_cronExpressionExists(): + s = PORT_TYPE_CATALOG["CronExpression"] + names = {f.name for f in s.fields} + assert "expression" in names + assert "timezone" in names + + +def test_conditionExpressionHasSyntaxEnum(): + s = PORT_TYPE_CATALOG["ConditionExpression"] + syntaxField = next((f for f in s.fields if f.name == "syntax"), None) + assert syntaxField is not None + assert syntaxField.enumValues + assert "jmespath" in syntaxField.enumValues + + +def test_attachmentSpecHasSourceEnum(): + s = PORT_TYPE_CATALOG["AttachmentSpec"] + sourceField = next((f for f in s.fields if f.name == "source"), None) + assert sourceField is not None + assert set(sourceField.enumValues or []) == {"path", "document", "url"} + + +def test_taskAttachmentRefExists(): + s = PORT_TYPE_CATALOG["TaskAttachmentRef"] + names = {f.name for f in s.fields} + assert "taskId" in names + + +def test_dateTimeAndUrlSemanticPrimitivesExist(): + assert "DateTime" in PORT_TYPE_CATALOG + assert "Url" in PORT_TYPE_CATALOG + + +# --------------------------------------------------------------------------- +# Cross-cutting: every Trustee/Redmine result references FeatureInstanceRef +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("schemaName", [ + "TrusteeRefreshResult", + "TrusteeProcessResult", + "TrusteeSyncResult", + "RedmineTicket", + "RedmineTicketList", + "RedmineStats", +]) +def test_resultSchemasReferenceFeatureInstance(schemaName): + s = PORT_TYPE_CATALOG[schemaName] + fiField = next((f for f in s.fields if f.name == "featureInstance"), None) + assert fiField is not None, f"{schemaName} should expose featureInstance for traceability" + assert fiField.type == "FeatureInstanceRef" + + +# --------------------------------------------------------------------------- +# Serialization stays compatible (frontend reads model_dump output) +# --------------------------------------------------------------------------- + +def test_portFieldDumpsDiscriminatorFlag(): + f = PortField(name="x", type="str", discriminator=True) + dumped = f.model_dump() + assert dumped["discriminator"] is True + + +def test_defaultDiscriminatorIsFalse(): + f = PortField(name="x", type="str") + dumped = f.model_dump() + assert dumped["discriminator"] is False + + +def test_primitiveTypesFrozenSet(): + assert "str" in PRIMITIVE_TYPES + assert "Any" in PRIMITIVE_TYPES + assert "DoesNotExist" not in PRIMITIVE_TYPES diff --git a/tests/unit/graphicalEditor/test_port_schema_recursive.py b/tests/unit/graphicalEditor/test_port_schema_recursive.py new file mode 100644 index 00000000..b3ae22c6 --- /dev/null +++ b/tests/unit/graphicalEditor/test_port_schema_recursive.py @@ -0,0 +1,24 @@ +# Copyright (c) 2025 Patrick Motsch +"""Port type catalog: nested provenance schemas (Typed Generic Handover).""" + +from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, _defaultForType + + +def test_connection_ref_in_catalog(): + s = PORT_TYPE_CATALOG["ConnectionRef"] + names = {f.name for f in s.fields} + assert names == {"id", "authority", "label"} + + +def test_document_list_has_provenance_fields(): + s = PORT_TYPE_CATALOG["DocumentList"] + names = {f.name for f in s.fields} + assert "documents" in names + assert "connection" in names + assert "source" in names + assert "count" in names + + +def test_default_for_nested_schema_type(): + assert _defaultForType("ConnectionRef") == {} + assert _defaultForType("List[Document]") == [] diff --git a/tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py b/tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py new file mode 100644 index 00000000..16aec90d --- /dev/null +++ b/tests/unit/graphicalEditor/test_upstream_paths_and_graph_schema.py @@ -0,0 +1,67 @@ +# Copyright (c) 2025 Patrick Motsch +from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths +from modules.workflows.automation2.graphUtils import parse_graph_defined_schema, validateGraph +from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES + + +def test_compute_upstream_paths_includes_form_dynamic_fields(): + graph = { + "nodes": [ + { + "id": "form1", + "type": "input.form", + "parameters": { + "fields": [{"name": "custName", "type": "str", "label": "Name", "required": True}], + }, + }, + {"id": "ai1", "type": "ai.prompt", "parameters": {"aiPrompt": "hi"}}, + ], + "connections": [ + {"source": "form1", "target": "ai1", "sourceOutput": 0, "targetInput": 0}, + ], + } + paths = compute_upstream_paths(graph, "ai1") + labels = [p["label"] for p in paths if p.get("producerNodeId") == "form1"] + assert any("custName" in lbl for lbl in labels), labels + + +def test_parse_graph_defined_schema_fields(): + node = {"parameters": {"fields": [{"name": "a", "type": "str", "label": "A", "required": False}]}} + sch = parse_graph_defined_schema(node, "fields") + assert sch and sch["name"] == "FormPayload_dynamic" + assert sch["fields"][0]["name"] == "a" + + +def test_parse_graph_defined_schema_nested_group(): + node = { + "parameters": { + "fields": [ + { + "name": "addr", + "type": "group", + "label": "Addr", + "fields": [{"name": "zip", "type": "str", "label": "ZIP"}], + }, + ], + }, + } + sch = parse_graph_defined_schema(node, "fields") + names = [f["name"] for f in sch["fields"]] + assert "addr.zip" in names + + +def test_validate_graph_port_mismatch_errors(): + node_type_ids = {n["id"] for n in STATIC_NODE_TYPES} + graph = { + "nodes": [ + {"id": "t1", "type": "trigger.manual", "parameters": {}}, + {"id": "e1", "type": "email.checkEmail", "parameters": {"connectionReference": "x"}}, + {"id": "a1", "type": "ai.prompt", "parameters": {"aiPrompt": "summarize"}}, + ], + "connections": [ + {"source": "t1", "target": "e1", "sourceOutput": 0, "targetInput": 0}, + {"source": "e1", "target": "a1", "sourceOutput": 0, "targetInput": 0}, + ], + } + errors = validateGraph(graph, node_type_ids) + assert any("Port mismatch" in e for e in errors), errors diff --git a/tests/unit/methods/__init__.py b/tests/unit/methods/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/methods/test_action_signature_validator.py b/tests/unit/methods/test_action_signature_validator.py new file mode 100644 index 00000000..8e54fdcf --- /dev/null +++ b/tests/unit/methods/test_action_signature_validator.py @@ -0,0 +1,289 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Tests for the action-signature validator (Phase 2 of the Typed Action +Architecture, see wiki/c-work/1-plan/2026-04-typed-action-architecture.md). + +Two parts: + A) Unit tests for the validator itself (positive + negative cases) + B) Healthy-state test: every Method discovered by methodDiscovery passes + validation. This is the regression net that catches drift between an + action's declared types and the type catalog. +""" +from __future__ import annotations + +import pytest + +from modules.datamodels.datamodelWorkflowActions import ( + WorkflowActionDefinition, + WorkflowActionParameter, +) +from modules.shared.frontendTypes import FrontendType +from modules.workflows.methods._actionSignatureValidator import ( + _formatValidationReport, + _validateActionDefinition, + _validateActionParameter, + _validateActionsDict, + _validateMethods, + _validateTypeRef, +) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _makeParam(typeStr: str, **kwargs) -> WorkflowActionParameter: + defaults = { + "name": "p", + "type": typeStr, + "frontendType": FrontendType.TEXT, + "required": False, + "description": "", + } + defaults.update(kwargs) + return WorkflowActionParameter(**defaults) + + +def _makeAction( + actionId: str = "test.x", + parameters: dict | None = None, + outputType: str = "ActionResult", +) -> WorkflowActionDefinition: + return WorkflowActionDefinition( + actionId=actionId, + description="t", + parameters=parameters or {}, + outputType=outputType, + execute=lambda *a, **k: None, + ) + + +# --------------------------------------------------------------------------- +# A) Unit tests +# --------------------------------------------------------------------------- + +class TestValidateTypeRef: + """Single-type validation.""" + + @pytest.mark.parametrize("t", [ + "str", "int", "bool", "float", "Any", + "ConnectionRef", "FeatureInstanceRef", "DocumentList", + "TrusteeProcessResult", "RedmineTicket", + "List[str]", "List[int]", "List[Any]", + "Dict[str,Any]", "Dict[str,Document]", + "List[FeatureInstanceRef]", + ]) + def test_validTypes(self, t): + assert _validateTypeRef(t) == [] + + @pytest.mark.parametrize("t", [ + "list", # too generic + "dict", # too generic + "Foobar", # unknown schema + "List[Foo]", # unknown inner + "Dict[str,Foo]", # unknown inner value + "", # empty + ]) + def test_invalidTypes(self, t): + errors = _validateTypeRef(t) + assert errors, f"expected validation errors for {t!r}" + + +class TestValidateActionParameter: + def test_validParam(self): + p = _makeParam("ConnectionRef") + assert _validateActionParameter("ai.x", "p", p) == [] + + def test_invalidParam(self): + p = _makeParam("Foobar") + errors = _validateActionParameter("ai.x", "myParam", p) + assert errors and errors[0].startswith("ai.x.myParam:") + + +class TestValidateActionDefinition: + def test_valid(self): + action = _makeAction( + parameters={"a": _makeParam("ConnectionRef", name="a")}, + outputType="DocumentList", + ) + assert _validateActionDefinition(action) == [] + + def test_invalidOutputType(self): + action = _makeAction(outputType="DoesNotExist") + errors = _validateActionDefinition(action) + assert any("" in e for e in errors) + + def test_genericOutputAllowed(self): + # ActionResult and Transit are allowed as fire-and-forget outputs + for t in ("ActionResult", "Transit"): + assert _validateActionDefinition(_makeAction(outputType=t)) == [] + + +class TestValidateActionsDict: + def test_emptyDictOk(self): + assert _validateActionsDict("m", {}) == [] + + def test_nonActionDefinitionRejected(self): + errors = _validateActionsDict("m", {"x": "not an action"}) + assert any("not a WorkflowActionDefinition" in e for e in errors) + + def test_collectsErrorsAcrossActions(self): + actions = { + "good": _makeAction( + parameters={"a": _makeParam("str", name="a")}, + outputType="DocumentList", + ), + "bad": _makeAction( + actionId="m.bad", + parameters={"x": _makeParam("Foobar", name="x")}, + outputType="AlsoUnknown", + ), + } + errors = _validateActionsDict("m", actions) + # bad action contributes 2 errors, good contributes 0 + assert len(errors) == 2 + + +class TestValidateMethods: + def test_emptyOk(self): + assert _validateMethods([]) == [] + + def test_methodLikeObject(self): + class FakeMethod: + name = "fake" + + def __init__(self): + self._actions = { + "a": _makeAction( + parameters={"p": _makeParam("ConnectionRef", name="p")}, + outputType="DocumentList", + ), + } + + assert _validateMethods([FakeMethod()]) == [] + + def test_methodWithDrift(self): + class FakeMethod: + name = "fake" + + def __init__(self): + self._actions = { + "broken": _makeAction( + actionId="fake.broken", + parameters={"p": _makeParam("Unknown", name="p")}, + outputType="ActionResult", + ), + } + + errors = _validateMethods([FakeMethod()]) + assert errors and "fake.broken.p" in errors[0] + + +class TestFormatValidationReport: + def test_healthyMessage(self): + assert "healthy" in _formatValidationReport([]).lower() + + def test_errorReport(self): + msg = _formatValidationReport(["a.x: bad", "b.y: also bad"]) + assert "Found 2 action-signature drift" in msg + assert "a.x: bad" in msg + assert "b.y: also bad" in msg + + +# --------------------------------------------------------------------------- +# B) Healthy-state test for the real Method registry +# --------------------------------------------------------------------------- + +class _NullRbac: + """Minimal RBAC stub so MethodBase.__init__ does not crash.""" + + def getUserPermissions(self, **kwargs): # noqa: D401 + class _P: + view = True + read = True + create = True + update = True + delete = True + return _P() + + +class _StubServices: + """Minimal services container required by MethodBase.__init__.""" + + def __init__(self): + self.rbac = _NullRbac() + self.user = type("U", (), {"id": "test-user", "roleLabels": []})() + self.mandateId = None + self.featureInstanceId = None + + +def _ensureOptionalDeps(): + """Patch sys.modules with stubs for optional deps that some Methods + import at module-load time but that the test env might not have. + + This is purely so the validator can inspect the action signatures — + no real network calls happen in these tests. + """ + import sys + import types + + class _AnyAttrModule(types.ModuleType): + """Module stub that lazily creates dummy classes for any attribute, + so type annotations like `aiohttp.ClientSession` resolve.""" + + def __getattr__(self, name): # noqa: D401 + return type(name, (), {}) + + for name in ("aiohttp",): + if name not in sys.modules: + sys.modules[name] = _AnyAttrModule(name) + + +def _instantiateMethod(methodCls): + """Try to instantiate a Method with a stub services object. + + Some Methods do extra work in __init__ (e.g. helper imports). We + accept failures and return None; missing Methods are skipped. + """ + _ensureOptionalDeps() + try: + return methodCls(_StubServices()) + except Exception as exc: # pragma: no cover - environment dependent + pytest.skip(f"could not instantiate {methodCls.__name__}: {exc}") + return None + + +@pytest.mark.parametrize("modulePath,className", [ + ("modules.workflows.methods.methodTrustee.methodTrustee", "MethodTrustee"), + ("modules.workflows.methods.methodRedmine.methodRedmine", "MethodRedmine"), + ("modules.workflows.methods.methodSharepoint.methodSharepoint", "MethodSharepoint"), + ("modules.workflows.methods.methodOutlook.methodOutlook", "MethodOutlook"), + ("modules.workflows.methods.methodAi.methodAi", "MethodAi"), + ("modules.workflows.methods.methodClickup.methodClickup", "MethodClickup"), + ("modules.workflows.methods.methodFile.methodFile", "MethodFile"), + ("modules.workflows.methods.methodContext.methodContext", "MethodContext"), + ("modules.workflows.methods.methodJira.methodJira", "MethodJira"), + ("modules.workflows.methods.methodChatbot.methodChatbot", "MethodChatbot"), +]) +def test_methodSignaturesAreHealthy(modulePath, className): + """Each shipping Method's _actions must validate against the catalog.""" + import importlib + + try: + module = importlib.import_module(modulePath) + except ImportError as exc: + pytest.skip(f"module not importable: {exc}") + return + + cls = getattr(module, className, None) + if cls is None: + pytest.skip(f"{className} not found in {modulePath}") + return + + instance = _instantiateMethod(cls) + if instance is None: + return + + errors = _validateMethods([instance]) + assert errors == [], _formatValidationReport(errors) diff --git a/tests/unit/nodeDefinitions/test_trustee_schema_compliance.py b/tests/unit/nodeDefinitions/test_trustee_schema_compliance.py new file mode 100644 index 00000000..d1b6397c --- /dev/null +++ b/tests/unit/nodeDefinitions/test_trustee_schema_compliance.py @@ -0,0 +1,188 @@ +# Copyright (c) 2025 Patrick Motsch +"""Trustee node schema-compliance under the Pick-not-Push typed port system. + +Verifies that: + - All three trustee actions (extractFromFiles, processDocuments, + syncToAccounting) declare ``ActionResult`` as output, matching what the + Python implementations actually return at runtime + (``ActionResult.isSuccess(documents=[...])``). + - processDocuments / syncToAccounting accept ``ActionResult`` (the producer + schema) plus ``DocumentList`` and ``Transit`` for back-compat. + - The ``documentList`` parameter is required, typed ``List[ActionDocument]`` + (the concrete shape consumed by ``_resolveDocumentList``) and rendered via + the dataRef picker so the user can bind it to ``upstream → documents``. + - The end-to-end Trustee pipeline graph (extract -> process -> sync) passes + hard port-compat validation (validateGraph). + - actionNodeExecutor produces canonical ``documents`` field — no legacy + ``documentList`` alias — so that DataRef path=['documents'] is the single + source of truth. +""" + +import inspect + +from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES +from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG +from modules.workflows.automation2.executors import actionNodeExecutor as _actionExec +from modules.workflows.automation2.graphUtils import validateGraph + + +def _node(nodeId: str) -> dict: + return next(n for n in STATIC_NODE_TYPES if n["id"] == nodeId) + + +def test_extractFromFiles_outputs_ActionResult(): + """Runtime returns ActionResult.isSuccess(documents=[...]) — see + actions/extractFromFiles.py. The adapter must declare the same.""" + n = _node("trustee.extractFromFiles") + assert n["outputPorts"][0]["schema"] == "ActionResult" + + +def test_processDocuments_outputs_ActionResult(): + n = _node("trustee.processDocuments") + assert n["outputPorts"][0]["schema"] == "ActionResult" + + +def test_syncToAccounting_outputs_ActionResult(): + n = _node("trustee.syncToAccounting") + assert n["outputPorts"][0]["schema"] == "ActionResult" + + +def test_processDocuments_accepts_ActionResult_and_legacy(): + """processDocuments must accept ActionResult (the new producer schema for + extractFromFiles) plus DocumentList / Transit for back-compat.""" + n = _node("trustee.processDocuments") + accepts = n["inputPorts"][0]["accepts"] + assert "ActionResult" in accepts + assert "DocumentList" in accepts + assert "Transit" in accepts + assert "UdmDocument" not in accepts, ( + "UdmDocument was dropped from accepts during the Pick-not-Push schema cleanup." + ) + + +def test_syncToAccounting_accepts_ActionResult_and_legacy(): + n = _node("trustee.syncToAccounting") + accepts = n["inputPorts"][0]["accepts"] + assert "ActionResult" in accepts + assert "DocumentList" in accepts + assert "Transit" in accepts + + +def test_processDocuments_documentList_param_typed_required_dataRef(): + """documentList is a Pick-not-Push DataRef parameter — must be visible + and typed exactly like the producer field (``ActionResult.documents`` is + ``List[ActionDocument]``) so DataPicker's strict-filter accepts it. + """ + params = {p["name"]: p for p in _node("trustee.processDocuments")["parameters"]} + p = params["documentList"] + assert p["type"] == "List[ActionDocument]", ( + "documentList must declare the concrete producer type so the DataPicker " + "strict-filter resolves upstream ActionResult.documents as compatible." + ) + assert p["required"] is True + assert p["frontendType"] == "dataRef", ( + "documentList must use the dataRef renderer so the binding is visible" + ) + + +def test_syncToAccounting_documentList_param_typed_required_dataRef(): + params = {p["name"]: p for p in _node("trustee.syncToAccounting")["parameters"]} + p = params["documentList"] + assert p["type"] == "List[ActionDocument]", ( + "documentList must declare the concrete producer type so the DataPicker " + "strict-filter resolves upstream ActionResult.documents as compatible." + ) + assert p["required"] is True + assert p["frontendType"] == "dataRef", ( + "documentList must use the dataRef renderer so the binding is visible" + ) + + +def test_trustee_pipeline_graph_passes_hard_port_validation(): + """End-to-end pipeline: trigger.manual -> extract -> process -> sync. + + Mirrors what frontend_nyla/.../trusteePipelineGraph.ts builds for + _buildScanUploadGraph. Port-compat must hold without warnings. + """ + graph = { + "nodes": [ + {"id": "trigger-manual", "type": "trigger.manual", "parameters": {}}, + { + "id": "extract", + "type": "trustee.extractFromFiles", + "parameters": { + "fileIds": ["f1"], + "featureInstanceId": "inst-1", + "prompt": "", + }, + }, + { + "id": "process", + "type": "trustee.processDocuments", + "parameters": { + "documentList": {"type": "ref", "nodeId": "extract", "path": ["documents"]}, + "featureInstanceId": "inst-1", + }, + }, + { + "id": "sync", + "type": "trustee.syncToAccounting", + "parameters": { + "documentList": {"type": "ref", "nodeId": "process", "path": ["documents"]}, + "featureInstanceId": "inst-1", + }, + }, + ], + "connections": [ + {"source": "trigger-manual", "sourceOutput": 0, "target": "extract", "targetInput": 0}, + {"source": "extract", "sourceOutput": 0, "target": "process", "targetInput": 0}, + {"source": "process", "sourceOutput": 0, "target": "sync", "targetInput": 0}, + ], + } + nodeTypeIds = {n["id"] for n in STATIC_NODE_TYPES} + errors = validateGraph(graph, nodeTypeIds) + portMismatches = [e for e in errors if "Port mismatch" in e] + assert not portMismatches, f"Trustee pipeline must be port-compatible: {portMismatches}" + + +def test_catalog_ActionResult_exposes_documents_field(): + """Without ``documents`` on the ActionResult schema the DataPicker cannot + surface the canonical list-of-documents path that every downstream node + (processDocuments, syncToAccounting, AI consumers, ...) needs to bind to. + """ + schema = PORT_TYPE_CATALOG.get("ActionResult") + assert schema is not None + fieldNames = {f.name for f in schema.fields} + assert "documents" in fieldNames, ( + "ActionResult.documents must be in PORT_TYPE_CATALOG so the frontend " + "DataPicker can offer it as a bindable path." + ) + + +def test_catalog_ActionDocument_is_registered(): + """ActionResult.documents is List[ActionDocument]; the inner schema must + be registered so the picker can drill down to ``documents → * → documentName``. + """ + schema = PORT_TYPE_CATALOG.get("ActionDocument") + assert schema is not None + fieldNames = {f.name for f in schema.fields} + assert {"documentName", "documentData", "mimeType"}.issubset(fieldNames), ( + "ActionDocument schema must mirror datamodelChat.ActionDocument." + ) + + +def test_actionNodeExecutor_does_not_emit_legacy_documentList_alias(): + """Source-code assertion: out dict in execute() must not write documentList alias. + + Pick-not-Push canonicalises on ``documents``. Removing the alias prevents + DataRefs from drifting back to the legacy field name. + """ + src = inspect.getsource(_actionExec) + assert '"documentList": docsList' not in src, ( + "Legacy alias ``documentList`` must be removed from actionNodeExecutor " + "out-dict (use canonical ``documents`` only — see issues.md " + "'Trustee Schema-Compliance')." + ) + assert '"documents": docsList' in src, ( + "Canonical ``documents`` field missing from actionNodeExecutor out-dict." + ) diff --git a/tests/unit/scripts/__init__.py b/tests/unit/scripts/__init__.py new file mode 100644 index 00000000..fdcc4f0e --- /dev/null +++ b/tests/unit/scripts/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. diff --git a/tests/unit/scripts/test_migrate_feature_instance_refs.py b/tests/unit/scripts/test_migrate_feature_instance_refs.py new file mode 100644 index 00000000..80367b4e --- /dev/null +++ b/tests/unit/scripts/test_migrate_feature_instance_refs.py @@ -0,0 +1,289 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""Tests for ``scripts/script_migrate_feature_instance_refs.py``. + +The script touches the live ``poweron_graphicaleditor`` DB. Tests run against +an in-memory fake psycopg2 connection so we exercise the full code path +(SELECT -> migrate -> UPDATE) without requiring a real Postgres server. +""" +from __future__ import annotations + +import importlib +import json +import sys +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import pytest + +_gatewayPath = Path(__file__).resolve().parents[3] +_scriptsPath = _gatewayPath / "scripts" +if str(_scriptsPath) not in sys.path: + sys.path.insert(0, str(_scriptsPath)) + +migrationModule = importlib.import_module("script_migrate_feature_instance_refs") + + +# --------------------------------------------------------------------------- +# Fake psycopg2 connection / cursor +# --------------------------------------------------------------------------- + +class _FakeCursor: + """Mimics enough of psycopg2's RealDictCursor + plain cursor for the script.""" + + def __init__(self, rowsByTable: Dict[str, List[Dict[str, Any]]], updates: List[Tuple[str, str, Any]]): + self._rowsByTable = rowsByTable + self._updates = updates + self._lastFetch: List[Dict[str, Any]] = [] + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc, tb): + return False + + def execute(self, query: str, params: Any = None): + if query.strip().upper().startswith("SELECT"): + for table, rows in self._rowsByTable.items(): + if table in query: + self._lastFetch = list(rows) + return + self._lastFetch = [] + return + if query.strip().upper().startswith("UPDATE"): + for table in self._rowsByTable: + if table in query: + graphValue, pk = params + if hasattr(graphValue, "adapted"): + graphValue = graphValue.adapted + self._updates.append((table, pk, graphValue)) + return + return + + def fetchall(self): + return self._lastFetch + + +class _FakeConn: + def __init__(self, rowsByTable: Dict[str, List[Dict[str, Any]]]): + self._rowsByTable = rowsByTable + self.updates: List[Tuple[str, str, Any]] = [] + self.committed = False + self.closed = False + + def cursor(self, cursor_factory: Any = None): + return _FakeCursor(self._rowsByTable, self.updates) + + def commit(self): + self.committed = True + + def close(self): + self.closed = True + + +@pytest.fixture +def graphsByTable() -> Dict[str, List[Dict[str, Any]]]: + return { + '"Automation2Workflow"': [ + { + "pk": "wf-legacy", + "graph": { + "nodes": [ + { + "id": "n1", + "type": "trustee.processDocuments", + "parameters": {"featureInstanceId": "11111111-1111-1111-1111-111111111111"}, + }, + { + "id": "n2", + "type": "redmine.createIssue", + "parameters": {"featureInstanceId": "22222222-2222-2222-2222-222222222222"}, + }, + ], + "connections": [], + }, + }, + { + "pk": "wf-already-typed", + "graph": { + "nodes": [ + { + "id": "n1", + "type": "trustee.processDocuments", + "parameters": { + "featureInstanceId": { + "$type": "FeatureInstanceRef", + "id": "33333333-3333-3333-3333-333333333333", + "featureCode": "trustee", + } + }, + } + ], + "connections": [], + }, + }, + { + "pk": "wf-empty-graph", + "graph": {}, + }, + { + "pk": "wf-graph-as-string", + "graph": json.dumps({ + "nodes": [ + { + "id": "n1", + "type": "outlook.sendMail", + "parameters": {"featureInstanceId": "44444444-4444-4444-4444-444444444444"}, + } + ], + "connections": [], + }), + }, + ], + '"AutoVersion"': [ + { + "pk": "ver-legacy", + "graph": { + "nodes": [ + { + "id": "n1", + "type": "ai.runPrompt", + "parameters": {"featureInstanceId": "55555555-5555-5555-5555-555555555555"}, + } + ], + "connections": [], + }, + } + ], + } + + +# --------------------------------------------------------------------------- +# Helper-level tests +# --------------------------------------------------------------------------- + +class TestLoadGraph: + def test_dictPassesThrough(self): + assert migrationModule._loadGraph({"a": 1}) == {"a": 1} + + def test_jsonStringIsParsed(self): + assert migrationModule._loadGraph('{"a": 2}') == {"a": 2} + + def test_emptyOrInvalidYieldsEmptyDict(self): + assert migrationModule._loadGraph(None) == {} + assert migrationModule._loadGraph("") == {} + assert migrationModule._loadGraph("not json") == {} + + def test_bytesStringIsParsed(self): + assert migrationModule._loadGraph(b'{"a": 3}') == {"a": 3} + + +class TestCountMigrations: + def test_zeroWhenIdentical(self): + g = {"nodes": [{"id": "n", "parameters": {"featureInstanceId": "uuid"}}]} + assert migrationModule._countMigrations(g, g) == 0 + + def test_countsMigratedFields(self): + before = { + "nodes": [ + {"id": "n1", "parameters": {"featureInstanceId": "u1"}}, + {"id": "n2", "parameters": {"featureInstanceId": "u2"}}, + {"id": "n3", "parameters": {"featureInstanceId": "u3"}}, + ] + } + after = { + "nodes": [ + { + "id": "n1", + "parameters": { + "featureInstanceId": {"$type": "FeatureInstanceRef", "id": "u1"} + }, + }, + {"id": "n2", "parameters": {"featureInstanceId": "u2"}}, + { + "id": "n3", + "parameters": { + "featureInstanceId": {"$type": "FeatureInstanceRef", "id": "u3"} + }, + }, + ] + } + assert migrationModule._countMigrations(before, after) == 2 + + +# --------------------------------------------------------------------------- +# End-to-end migrate() tests +# --------------------------------------------------------------------------- + +class TestMigrate: + def test_dryRunDoesNotWriteOrCommit(self, monkeypatch, graphsByTable): + conn = _FakeConn(graphsByTable) + monkeypatch.setattr(migrationModule, "_connect", lambda: conn) + + summary = migrationModule.migrate(dryRun=True) + + assert conn.updates == [] + assert conn.committed is False + assert conn.closed is True + assert summary['"Automation2Workflow"']["scanned"] == 4 + assert summary['"Automation2Workflow"']["rowsChanged"] == 2 + assert summary['"Automation2Workflow"']["fieldsRewritten"] == 3 + assert summary['"AutoVersion"']["rowsChanged"] == 1 + assert summary['"AutoVersion"']["fieldsRewritten"] == 1 + + def test_liveRunWritesAndCommits(self, monkeypatch, graphsByTable): + conn = _FakeConn(graphsByTable) + monkeypatch.setattr(migrationModule, "_connect", lambda: conn) + + summary = migrationModule.migrate(dryRun=False) + + assert conn.committed is True + assert conn.closed is True + + updatesByPk = {pk: graph for (_table, pk, graph) in conn.updates} + assert set(updatesByPk.keys()) == {"wf-legacy", "wf-graph-as-string", "ver-legacy"} + + legacyGraph = updatesByPk["wf-legacy"] + n1Param = legacyGraph["nodes"][0]["parameters"]["featureInstanceId"] + n2Param = legacyGraph["nodes"][1]["parameters"]["featureInstanceId"] + assert n1Param["$type"] == "FeatureInstanceRef" + assert n1Param["featureCode"] == "trustee" + assert n1Param["id"] == "11111111-1111-1111-1111-111111111111" + assert n2Param["featureCode"] == "redmine" + + verParam = updatesByPk["ver-legacy"]["nodes"][0]["parameters"]["featureInstanceId"] + assert verParam["featureCode"] == "ai" + + stringSourcedGraph = updatesByPk["wf-graph-as-string"] + outlookParam = stringSourcedGraph["nodes"][0]["parameters"]["featureInstanceId"] + assert outlookParam["featureCode"] == "outlook" + + assert summary['"Automation2Workflow"']["fieldsRewritten"] == 3 + assert summary['"AutoVersion"']["fieldsRewritten"] == 1 + + def test_idempotency(self, monkeypatch, graphsByTable): + conn1 = _FakeConn(graphsByTable) + monkeypatch.setattr(migrationModule, "_connect", lambda: conn1) + migrationModule.migrate(dryRun=False) + + firstUpdates = {pk: graph for (_t, pk, graph) in conn1.updates} + nextRows = { + '"Automation2Workflow"': [ + {"pk": pk, "graph": graph} + for pk, graph in firstUpdates.items() + if pk.startswith("wf") + ], + '"AutoVersion"': [ + {"pk": pk, "graph": graph} + for pk, graph in firstUpdates.items() + if pk.startswith("ver") + ], + } + conn2 = _FakeConn(nextRows) + monkeypatch.setattr(migrationModule, "_connect", lambda: conn2) + summary2 = migrationModule.migrate(dryRun=False) + + assert conn2.updates == [] + for table, counts in summary2.items(): + assert counts["rowsChanged"] == 0, f"{table} not idempotent" + assert counts["fieldsRewritten"] == 0, f"{table} not idempotent" diff --git a/tests/unit/serviceAgent/test_action_tool_adapter_typed.py b/tests/unit/serviceAgent/test_action_tool_adapter_typed.py new file mode 100644 index 00000000..06edc01c --- /dev/null +++ b/tests/unit/serviceAgent/test_action_tool_adapter_typed.py @@ -0,0 +1,127 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Tests for the catalog-driven JSON-Schema generator in actionToolAdapter +(Phase 3 of the Typed Action Architecture). + +Covers `_catalogTypeToJsonSchema` and `_convertParameterSchema` with: + - Primitives (str/int/bool/float/Any) + - Catalog object schemas (recursive expansion, required fields, enums) + - List[X] (array with typed items) + - Dict[K,V] (object with typed additionalProperties) + - Unknown type → safe fallback (string) +""" +from __future__ import annotations + +from modules.serviceCenter.services.serviceAgent.actionToolAdapter import ( + _catalogTypeToJsonSchema, + _convertParameterSchema, +) + + +class TestPrimitives: + def test_str(self): + assert _catalogTypeToJsonSchema("str") == {"type": "string"} + + def test_int(self): + assert _catalogTypeToJsonSchema("int") == {"type": "integer"} + + def test_bool(self): + assert _catalogTypeToJsonSchema("bool") == {"type": "boolean"} + + def test_float(self): + assert _catalogTypeToJsonSchema("float") == {"type": "number"} + + def test_anyHasNoTypeField(self): + # JSON Schema "any" is best expressed as an empty schema. + assert _catalogTypeToJsonSchema("Any") == {} + + +class TestContainers: + def test_listOfPrimitive(self): + assert _catalogTypeToJsonSchema("List[str]") == { + "type": "array", + "items": {"type": "string"}, + } + + def test_listOfCatalogSchema(self): + out = _catalogTypeToJsonSchema("List[Document]") + assert out["type"] == "array" + assert out["items"]["type"] == "object" + # Recursive expansion delivered Document fields: + propsName = out["items"]["properties"].get("name", {}) + assert propsName.get("type") == "string" + + def test_dictWithPrimitiveValue(self): + assert _catalogTypeToJsonSchema("Dict[str,Any]") == { + "type": "object", + "additionalProperties": {}, + } + + def test_dictWithCatalogValue(self): + out = _catalogTypeToJsonSchema("Dict[str,Document]") + assert out["type"] == "object" + assert out["additionalProperties"]["type"] == "object" + assert "properties" in out["additionalProperties"] + + +class TestCatalogObjects: + def test_connectionRefExpands(self): + out = _catalogTypeToJsonSchema("ConnectionRef") + assert out["type"] == "object" + assert "properties" in out + # ConnectionRef has 'id' (required) and 'authority' (required, discriminator) + assert "id" in out["properties"] + assert "authority" in out["properties"] + assert "id" in out["required"] + assert "authority" in out["required"] + + def test_featureInstanceRefExpands(self): + out = _catalogTypeToJsonSchema("FeatureInstanceRef") + assert out["type"] == "object" + # mandateId is optional → must NOT be in required + assert "mandateId" not in out.get("required", []) + assert "id" in out["required"] + + def test_descriptionPreserved(self): + out = _catalogTypeToJsonSchema("ConnectionRef") + assert "description" in out + assert "ConnectionRef" in out["description"] + + +class TestUnknownFallback: + def test_unknownDefaultsToString(self): + out = _catalogTypeToJsonSchema("CompletelyUnknownType") + assert out["type"] == "string" + assert "unknown" in out.get("description", "").lower() + + def test_emptyStringDefaultsToString(self): + assert _catalogTypeToJsonSchema("") == {"type": "string"} + + +class TestConvertParameterSchema: + def test_buildsObjectSchemaWithRequiredList(self): + actionParams = { + "documentList": {"type": "DocumentList", "required": True, "description": "Eingabe"}, + "prompt": {"type": "str", "required": False, "description": "Prompt-Text"}, + } + schema = _convertParameterSchema(actionParams) + assert schema["type"] == "object" + assert "documentList" in schema["properties"] + assert "prompt" in schema["properties"] + assert schema["required"] == ["documentList"] + assert schema["properties"]["documentList"]["description"] == "Eingabe" + # documentList expands to an object schema (DocumentList is a catalog object) + assert schema["properties"]["documentList"]["type"] == "object" + + def test_handlesMalformedParamsGracefully(self): + actionParams = {"weird": "not-a-dict"} + schema = _convertParameterSchema(actionParams) + assert schema["properties"]["weird"]["type"] == "string" + + def test_typedRefProducesObjectNotString(self): + """Regression: pre-Phase-3 behaviour collapsed catalog refs to 'string'.""" + actionParams = {"connection": {"type": "ConnectionRef", "required": True}} + schema = _convertParameterSchema(actionParams) + assert schema["properties"]["connection"]["type"] == "object" + assert "id" in schema["properties"]["connection"]["properties"] diff --git a/tests/unit/teamsbot/__init__.py b/tests/unit/teamsbot/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/teamsbot/test_directorPrompts.py b/tests/unit/teamsbot/test_directorPrompts.py new file mode 100644 index 00000000..f136438a --- /dev/null +++ b/tests/unit/teamsbot/test_directorPrompts.py @@ -0,0 +1,604 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""Unit tests for Teamsbot Director Prompts (Plan #5). + +Covers: +* Datamodel limits, defaults, enum-validation +* SpeechTeamsResponse needsAgent / agentReason fields +* TeamsbotService._buildPersistentDirectorContext rendering +* TeamsbotService.submitDirectorPrompt: queues, emits SSE event, returns created +* TeamsbotService._processDirectorPrompt lifecycle: + queued -> running -> succeeded/consumed (one-shot vs persistent) +* TeamsbotService._processDirectorPrompt failure path drops persistent prompt +* TeamsbotService.removePersistentPrompt +* getActiveService / _activeServices registry +* TeamsbotObjects.getActivePersistentPrompts filtering + +The TeamsbotService constructor instantiates BrowserBotConnector, which is +harmless (no network until joinMeeting). All DB / agent / SSE side-effects +are stubbed via monkeypatch. +""" + +from __future__ import annotations + +import asyncio +from typing import Any, Dict, List, Optional +from unittest.mock import MagicMock + +import pytest +from pydantic import ValidationError + +from modules.features.teamsbot import service as serviceModule +from modules.features.teamsbot.datamodelTeamsbot import ( + DIRECTOR_PROMPT_FILE_LIMIT, + DIRECTOR_PROMPT_TEXT_LIMIT, + SpeechTeamsResponse, + TeamsbotConfig, + TeamsbotDirectorPrompt, + TeamsbotDirectorPromptCreateRequest, + TeamsbotDirectorPromptMode, + TeamsbotDirectorPromptStatus, +) +from modules.features.teamsbot.service import ( + TeamsbotService, + _activeServices, + _sessionEvents, + getActiveService, +) + + +# ============================================================================ +# Helpers +# ============================================================================ + +class _FakeUser: + """Minimal stand-in for modules.datamodels.datamodelUam.User used by the + service layer. TeamsbotService only needs ``id`` for logging / interface + keying.""" + + def __init__(self, userId: str = "user-op-1") -> None: + self.id = userId + + +class _FakeInterface: + """In-memory stand-in for TeamsbotObjects (only the director-prompt API). + + Behaves like the real DB interface for the calls used by the service: + ``createDirectorPrompt``, ``updateDirectorPrompt``, ``getDirectorPrompt``, + ``getActivePersistentPrompts``, ``getActiveSystemBot``. + """ + + def __init__(self) -> None: + self.prompts: Dict[str, Dict[str, Any]] = {} + self.created: List[Dict[str, Any]] = [] + self.updates: List[Dict[str, Any]] = [] + self.deleted: List[str] = [] + + def createDirectorPrompt(self, data: Dict[str, Any]) -> Dict[str, Any]: + record = dict(data) + if "id" not in record: + record["id"] = f"prompt-{len(self.prompts)+1}" + self.prompts[record["id"]] = record + self.created.append(record) + return record + + def updateDirectorPrompt(self, promptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if promptId not in self.prompts: + return None + self.prompts[promptId].update(updates) + self.updates.append({"id": promptId, **updates}) + return self.prompts[promptId] + + def getDirectorPrompt(self, promptId: str) -> Optional[Dict[str, Any]]: + return self.prompts.get(promptId) + + def getActivePersistentPrompts(self, sessionId: str) -> List[Dict[str, Any]]: + terminal = { + TeamsbotDirectorPromptStatus.CONSUMED.value, + TeamsbotDirectorPromptStatus.FAILED.value, + } + return [ + p + for p in self.prompts.values() + if p.get("sessionId") == sessionId + and p.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value + and p.get("status") not in terminal + ] + + def getActiveSystemBot(self, mandateId: str) -> Optional[Dict[str, Any]]: + return None + + +class _CapturedEvents(list): + """Helper to collect SSE events emitted by ``_emitSessionEvent``.""" + + async def append_event(self, sessionId: str, eventType: str, data: Any) -> None: + self.append({"sessionId": sessionId, "type": eventType, "data": data}) + + +def _patchInterface(monkeypatch: pytest.MonkeyPatch, fakeInterface: _FakeInterface) -> None: + """Replace ``getInterface`` in the teamsbot service module so the service + talks to our in-memory fake instead of PostgreSQL.""" + from modules.features.teamsbot import interfaceFeatureTeamsbot as interfaceDb + + monkeypatch.setattr(interfaceDb, "getInterface", lambda *args, **kwargs: fakeInterface) + + +def _patchEmit(monkeypatch: pytest.MonkeyPatch) -> _CapturedEvents: + captured = _CapturedEvents() + + async def _stubEmit(sessionId: str, eventType: str, data: Any) -> None: + await captured.append_event(sessionId, eventType, data) + + monkeypatch.setattr(serviceModule, "_emitSessionEvent", _stubEmit) + return captured + + +def _buildService() -> TeamsbotService: + """Build a TeamsbotService with a minimal config. BrowserBotConnector is + instantiated but never reached in these tests.""" + config = TeamsbotConfig(botName="UnitTest Bot") + svc = TeamsbotService( + currentUser=_FakeUser(), + mandateId="mandate-x", + instanceId="instance-y", + config=config, + ) + svc._activeSessionId = "session-1" + return svc + + +@pytest.fixture(autouse=True) +def _resetGlobals(): + """Avoid cross-test bleed in module-level globals.""" + _activeServices.clear() + _sessionEvents.clear() + yield + _activeServices.clear() + _sessionEvents.clear() + + +# ============================================================================ +# 1) Datamodel +# ============================================================================ + +class TestDirectorPromptDatamodel: + def test_directorPromptDefaults(self): + prompt = TeamsbotDirectorPrompt( + sessionId="s1", + instanceId="i1", + operatorUserId="u1", + text="Hello world", + ) + assert prompt.mode == TeamsbotDirectorPromptMode.ONE_SHOT + assert prompt.status == TeamsbotDirectorPromptStatus.QUEUED + assert prompt.fileIds == [] + assert prompt.consumedAt is None + assert prompt.responseText is None + assert prompt.id # uuid auto-filled + assert prompt.createdAt # iso timestamp auto-filled + + def test_directorPromptTextLimitEnforced(self): + with pytest.raises(ValidationError): + TeamsbotDirectorPrompt( + sessionId="s1", + instanceId="i1", + operatorUserId="u1", + text="x" * (DIRECTOR_PROMPT_TEXT_LIMIT + 1), + ) + + def test_directorPromptCreateRequestDefaults(self): + body = TeamsbotDirectorPromptCreateRequest(text="quick prompt") + assert body.mode == TeamsbotDirectorPromptMode.ONE_SHOT + assert body.fileIds == [] + + def test_directorPromptCreateRequestEmptyTextRejected(self): + with pytest.raises(ValidationError): + TeamsbotDirectorPromptCreateRequest(text="") + + def test_directorPromptCreateRequestTooLongRejected(self): + with pytest.raises(ValidationError): + TeamsbotDirectorPromptCreateRequest(text="x" * (DIRECTOR_PROMPT_TEXT_LIMIT + 1)) + + def test_directorPromptStatusEnum(self): + assert TeamsbotDirectorPromptStatus.QUEUED.value == "queued" + assert TeamsbotDirectorPromptStatus.RUNNING.value == "running" + assert TeamsbotDirectorPromptStatus.SUCCEEDED.value == "succeeded" + assert TeamsbotDirectorPromptStatus.CONSUMED.value == "consumed" + assert TeamsbotDirectorPromptStatus.FAILED.value == "failed" + + def test_directorPromptModeEnum(self): + assert TeamsbotDirectorPromptMode.ONE_SHOT.value == "oneShot" + assert TeamsbotDirectorPromptMode.PERSISTENT.value == "persistent" + + def test_fileLimitConstantHasSaneValue(self): + assert DIRECTOR_PROMPT_FILE_LIMIT == 10 + assert DIRECTOR_PROMPT_TEXT_LIMIT == 8000 + + +class TestSpeechTeamsResponseHybrid: + def test_needsAgentDefaultFalse(self): + resp = SpeechTeamsResponse(shouldRespond=False) + assert resp.needsAgent is False + assert resp.agentReason is None + + def test_needsAgentEscalation(self): + resp = SpeechTeamsResponse( + shouldRespond=True, + responseText="Moment, ich recherchiere.", + needsAgent=True, + agentReason="webSearch SBB Schweiz", + detectedIntent="addressed", + ) + assert resp.needsAgent is True + assert resp.agentReason == "webSearch SBB Schweiz" + + +# ============================================================================ +# 2) Persistent Director Context Renderer +# ============================================================================ + +class TestBuildPersistentDirectorContext: + def test_emptyWhenNoPrompts(self): + svc = _buildService() + svc._activePersistentPrompts = [] + assert svc._buildPersistentDirectorContext() == "" + + def test_singlePrompt(self): + svc = _buildService() + svc._activePersistentPrompts = [ + {"id": "p1", "text": "Antworte immer in Englisch."}, + ] + rendered = svc._buildPersistentDirectorContext() + assert "OPERATOR_DIRECTIVES" in rendered + assert "- Antworte immer in Englisch." in rendered + assert "private" in rendered + + def test_skipsBlankText(self): + svc = _buildService() + svc._activePersistentPrompts = [ + {"id": "p1", "text": " "}, + {"id": "p2", "text": "Sei hoeflich."}, + ] + rendered = svc._buildPersistentDirectorContext() + assert "- Sei hoeflich." in rendered + assert "p1" not in rendered # the blank one is filtered out + + def test_allBlankPromptsResultInEmpty(self): + svc = _buildService() + svc._activePersistentPrompts = [ + {"id": "p1", "text": ""}, + {"id": "p2", "text": " "}, + ] + assert svc._buildPersistentDirectorContext() == "" + + +# ============================================================================ +# 3) submitDirectorPrompt +# ============================================================================ + +class TestSubmitDirectorPrompt: + @pytest.mark.asyncio + async def test_oneShotQueuesAndEmits(self, monkeypatch): + fake = _FakeInterface() + events = _patchEmit(monkeypatch) + _patchInterface(monkeypatch, fake) + + # Block the auto-process task from running, otherwise it would call + # the real agent service. We replace the coroutine factory. + async def _noProcess(prompt): + return None + + svc = _buildService() + monkeypatch.setattr(svc, "_processDirectorPrompt", _noProcess) + + created = await svc.submitDirectorPrompt( + sessionId="session-1", + operatorUserId="user-op-1", + text="Recherchier das im Internet.", + mode=TeamsbotDirectorPromptMode.ONE_SHOT, + fileIds=[], + ) + + assert created["status"] == TeamsbotDirectorPromptStatus.QUEUED.value + assert created["mode"] == TeamsbotDirectorPromptMode.ONE_SHOT.value + assert created["text"] == "Recherchier das im Internet." + assert created["sessionId"] == "session-1" + assert created["instanceId"] == "instance-y" + assert created["operatorUserId"] == "user-op-1" + + # SSE event with the queued lifecycle marker + assert any( + e["type"] == "directorPrompt" + and e["data"]["status"] == TeamsbotDirectorPromptStatus.QUEUED.value + and e["data"]["mode"] == TeamsbotDirectorPromptMode.ONE_SHOT.value + for e in events + ) + + # In-memory persistent registry remains empty for one-shot. + assert svc._activePersistentPrompts == [] + + # Allow the (no-op) background task to settle so the loop is clean. + await asyncio.sleep(0) + + @pytest.mark.asyncio + async def test_persistentPromptAppendsToInMemoryRegistry(self, monkeypatch): + fake = _FakeInterface() + _patchEmit(monkeypatch) + _patchInterface(monkeypatch, fake) + + async def _noProcess(prompt): + return None + + svc = _buildService() + monkeypatch.setattr(svc, "_processDirectorPrompt", _noProcess) + + created = await svc.submitDirectorPrompt( + sessionId="session-1", + operatorUserId="user-op-1", + text="Antworte immer in Englisch.", + mode=TeamsbotDirectorPromptMode.PERSISTENT, + fileIds=["file-a", "file-b"], + ) + + assert created["mode"] == TeamsbotDirectorPromptMode.PERSISTENT.value + assert created["fileIds"] == ["file-a", "file-b"] + assert len(svc._activePersistentPrompts) == 1 + assert svc._activePersistentPrompts[0]["id"] == created["id"] + + await asyncio.sleep(0) + + +# ============================================================================ +# 4) _processDirectorPrompt lifecycle +# ============================================================================ + +class TestProcessDirectorPromptLifecycle: + @pytest.mark.asyncio + async def test_oneShotSuccessTransitionsRunningThenConsumed(self, monkeypatch): + fake = _FakeInterface() + prompt = fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + id="prompt-success-1", + sessionId="session-1", + instanceId="instance-y", + operatorUserId="user-op-1", + text="Was ist die Hauptstadt von Frankreich?", + mode=TeamsbotDirectorPromptMode.ONE_SHOT, + ).model_dump() + ) + events = _patchEmit(monkeypatch) + _patchInterface(monkeypatch, fake) + + svc = _buildService() + + async def _stubAgent(**kwargs): + return "Paris." + + monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgent) + + await svc._processDirectorPrompt(prompt) + + statuses = [u.get("status") for u in fake.updates if u["id"] == prompt["id"]] + assert TeamsbotDirectorPromptStatus.RUNNING.value in statuses + assert TeamsbotDirectorPromptStatus.CONSUMED.value in statuses + + final = fake.prompts[prompt["id"]] + assert final["status"] == TeamsbotDirectorPromptStatus.CONSUMED.value + assert final["responseText"] == "Paris." + assert final.get("consumedAt") + + emittedStatuses = [ + e["data"].get("status") for e in events if e["type"] == "directorPrompt" + ] + assert TeamsbotDirectorPromptStatus.RUNNING.value in emittedStatuses + assert TeamsbotDirectorPromptStatus.CONSUMED.value in emittedStatuses + + @pytest.mark.asyncio + async def test_persistentSuccessStaysSucceededNotConsumed(self, monkeypatch): + fake = _FakeInterface() + prompt = fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + id="prompt-persist-1", + sessionId="session-1", + instanceId="instance-y", + operatorUserId="user-op-1", + text="Antworte immer in Englisch.", + mode=TeamsbotDirectorPromptMode.PERSISTENT, + ).model_dump() + ) + _patchEmit(monkeypatch) + _patchInterface(monkeypatch, fake) + + svc = _buildService() + + async def _stubAgent(**kwargs): + return "Acknowledged." + + monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgent) + + await svc._processDirectorPrompt(prompt) + + final = fake.prompts[prompt["id"]] + assert final["status"] == TeamsbotDirectorPromptStatus.SUCCEEDED.value + assert final["responseText"] == "Acknowledged." + # Persistent prompts must stay alive beyond the run. + assert final.get("consumedAt") is None + + @pytest.mark.asyncio + async def test_failureMarksFailedAndDropsFromActivePersistent(self, monkeypatch): + fake = _FakeInterface() + prompt = fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + id="prompt-fail-1", + sessionId="session-1", + instanceId="instance-y", + operatorUserId="user-op-1", + text="Mach was Komplexes.", + mode=TeamsbotDirectorPromptMode.PERSISTENT, + ).model_dump() + ) + events = _patchEmit(monkeypatch) + _patchInterface(monkeypatch, fake) + + svc = _buildService() + svc._activePersistentPrompts = [prompt] + + async def _stubAgentBoom(**kwargs): + raise RuntimeError("agent down") + + monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgentBoom) + + await svc._processDirectorPrompt(prompt) + + final = fake.prompts[prompt["id"]] + assert final["status"] == TeamsbotDirectorPromptStatus.FAILED.value + assert "RuntimeError" in (final.get("statusMessage") or "") + + # The failed persistent prompt is removed from the in-memory directives. + assert all(p["id"] != prompt["id"] for p in svc._activePersistentPrompts) + + emittedStatuses = [ + e["data"].get("status") for e in events if e["type"] == "directorPrompt" + ] + assert TeamsbotDirectorPromptStatus.FAILED.value in emittedStatuses + + +# ============================================================================ +# 5) removePersistentPrompt +# ============================================================================ + +class TestRemovePersistentPrompt: + @pytest.mark.asyncio + async def test_removePersistentPromptMarksConsumedAndDrops(self, monkeypatch): + fake = _FakeInterface() + prompt = fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + id="prompt-rm-1", + sessionId="session-1", + instanceId="instance-y", + operatorUserId="user-op-1", + text="Bleib hoeflich.", + mode=TeamsbotDirectorPromptMode.PERSISTENT, + status=TeamsbotDirectorPromptStatus.SUCCEEDED, + ).model_dump() + ) + events = _patchEmit(monkeypatch) + _patchInterface(monkeypatch, fake) + + svc = _buildService() + svc._activePersistentPrompts = [prompt] + + ok = await svc.removePersistentPrompt(prompt["id"]) + assert ok is True + + final = fake.prompts[prompt["id"]] + assert final["status"] == TeamsbotDirectorPromptStatus.CONSUMED.value + assert final.get("consumedAt") + assert final.get("statusMessage") == "Removed by operator" + assert svc._activePersistentPrompts == [] + + assert any( + e["type"] == "directorPrompt" + and e["data"].get("removed") is True + and e["data"].get("status") == TeamsbotDirectorPromptStatus.CONSUMED.value + for e in events + ) + + @pytest.mark.asyncio + async def test_removeUnknownPromptReturnsFalse(self, monkeypatch): + fake = _FakeInterface() + _patchEmit(monkeypatch) + _patchInterface(monkeypatch, fake) + svc = _buildService() + ok = await svc.removePersistentPrompt("unknown-id") + assert ok is False + + +# ============================================================================ +# 6) _activeServices Registry +# ============================================================================ + +class TestActiveServicesRegistry: + def test_getActiveServiceReturnsNoneByDefault(self): + assert getActiveService("not-active") is None + + def test_getActiveServiceReturnsRegistered(self): + svc = _buildService() + _activeServices["session-XYZ"] = svc + assert getActiveService("session-XYZ") is svc + + def test_distinctSessionsMapToDistinctServices(self): + a = _buildService() + b = _buildService() + _activeServices["s1"] = a + _activeServices["s2"] = b + assert getActiveService("s1") is a + assert getActiveService("s2") is b + assert getActiveService("s1") is not getActiveService("s2") + + +# ============================================================================ +# 7) Interface-level filtering for active persistent prompts +# ============================================================================ + +class TestGetActivePersistentPromptsFiltering: + """The interface-level helper is the source of truth for what gets + re-loaded into _activePersistentPrompts on (re)connect.""" + + def test_onlyPersistentNonTerminal(self): + fake = _FakeInterface() + # All four lifecycle states for the same session + for status in TeamsbotDirectorPromptStatus: + fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + sessionId="s1", + instanceId="i1", + operatorUserId="u1", + text=f"persist-{status.value}", + mode=TeamsbotDirectorPromptMode.PERSISTENT, + status=status, + ).model_dump() + ) + # one-shot persistent-failure-irrelevant + fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + sessionId="s1", + instanceId="i1", + operatorUserId="u1", + text="oneShot-running", + mode=TeamsbotDirectorPromptMode.ONE_SHOT, + status=TeamsbotDirectorPromptStatus.RUNNING, + ).model_dump() + ) + + active = fake.getActivePersistentPrompts("s1") + statuses = {p.get("status") for p in active} + + # CONSUMED and FAILED are terminal; ONE_SHOT is not persistent. + assert TeamsbotDirectorPromptStatus.CONSUMED.value not in statuses + assert TeamsbotDirectorPromptStatus.FAILED.value not in statuses + # All returned prompts are persistent + assert all( + p.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value for p in active + ) + # Non-terminal persistent: QUEUED, RUNNING, SUCCEEDED -> 3 records + assert len(active) == 3 + + def test_filtersBySession(self): + fake = _FakeInterface() + fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + sessionId="s1", instanceId="i1", operatorUserId="u1", + text="A", mode=TeamsbotDirectorPromptMode.PERSISTENT, + ).model_dump() + ) + fake.createDirectorPrompt( + TeamsbotDirectorPrompt( + sessionId="s2", instanceId="i1", operatorUserId="u1", + text="B", mode=TeamsbotDirectorPromptMode.PERSISTENT, + ).model_dump() + ) + assert len(fake.getActivePersistentPrompts("s1")) == 1 + assert len(fake.getActivePersistentPrompts("s2")) == 1 + assert fake.getActivePersistentPrompts("ghost") == [] diff --git a/tests/unit/workflow/test_phase3_context_node.py b/tests/unit/workflow/test_phase3_context_node.py index 300d861f..7172c6e7 100644 --- a/tests/unit/workflow/test_phase3_context_node.py +++ b/tests/unit/workflow/test_phase3_context_node.py @@ -2,12 +2,11 @@ import pytest from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES -from modules.features.graphicalEditor.portTypes import ( - PORT_TYPE_CATALOG, - INPUT_EXTRACTORS, - _extractUdmDocument, - _extractUdmNodeList, - _extractConsolidateResult, +from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG +from modules.workflows.automation2.udmUpstreamShapes import ( + _coerceConsolidateResultInput, + _coerceUdmDocumentInput, + _coerceUdmNodeListInput, ) @@ -32,34 +31,28 @@ def test_udm_port_types_registered(): assert "ConsolidateResult" in PORT_TYPE_CATALOG -def test_udm_extractors_registered(): - assert "UdmDocument" in INPUT_EXTRACTORS - assert "UdmNodeList" in INPUT_EXTRACTORS - assert "ConsolidateResult" in INPUT_EXTRACTORS - - -def test_extractUdmDocument_from_direct(): +def test_coerceUdmDocument_from_direct(): upstream = {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []} - result = _extractUdmDocument(upstream) + result = _coerceUdmDocumentInput(upstream) assert result["sourceType"] == "pdf" -def test_extractUdmDocument_from_nested(): +def test_coerceUdmDocument_from_nested(): upstream = {"udm": {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []}, "other": 1} - result = _extractUdmDocument(upstream) + result = _coerceUdmDocumentInput(upstream) assert result["sourceType"] == "pdf" -def test_extractUdmNodeList(): +def test_coerceUdmNodeList(): upstream = {"nodes": [{"id": "n1"}, {"id": "n2"}], "count": 2} - result = _extractUdmNodeList(upstream) + result = _coerceUdmNodeListInput(upstream) assert result["count"] == 2 assert len(result["nodes"]) == 2 -def test_extractConsolidateResult(): +def test_coerceConsolidateResult(): upstream = {"result": {"headers": [], "rows": []}, "mode": "table", "count": 3} - result = _extractConsolidateResult(upstream) + result = _coerceConsolidateResultInput(upstream) assert result["mode"] == "table" assert result["count"] == 3 diff --git a/tests/unit/workflows/test_automation2_graphUtils.py b/tests/unit/workflows/test_automation2_graphUtils.py index 78077987..ff5df2cc 100644 --- a/tests/unit/workflows/test_automation2_graphUtils.py +++ b/tests/unit/workflows/test_automation2_graphUtils.py @@ -65,3 +65,102 @@ class TestResolveParameterReferences: node_outputs = {"n1": {"country": "CH"}} value = "Land: {{n1.country}}" assert resolveParameterReferences(value, node_outputs) == "Land: CH" + + +class TestWildcardIteration: + """Phase-4 typed Bindings-Resolver: ``*`` segment iterates over a list. + + Path semantics: + ["docs", "*", "name"] ⇒ map "name" over each item in docs + ["docs", "*"] ⇒ the docs list itself (after passing through *) + Drops items whose remainder resolves to ``None`` (missing field). + """ + + def test_wildcard_maps_over_list_to_field(self): + node_outputs = { + "src": { + "documents": [ + {"name": "a.pdf", "size": 10}, + {"name": "b.pdf", "size": 20}, + ], + } + } + value = { + "type": "ref", + "nodeId": "src", + "path": ["documents", "*", "name"], + } + assert resolveParameterReferences(value, node_outputs) == ["a.pdf", "b.pdf"] + + def test_wildcard_terminal_returns_list_copy(self): + node_outputs = {"src": {"items": ["x", "y", "z"]}} + value = {"type": "ref", "nodeId": "src", "path": ["items", "*"]} + assert resolveParameterReferences(value, node_outputs) == ["x", "y", "z"] + + def test_wildcard_drops_missing_fields(self): + node_outputs = { + "src": { + "rows": [ + {"name": "a"}, + {"otherField": 1}, + {"name": "c"}, + ] + } + } + value = {"type": "ref", "nodeId": "src", "path": ["rows", "*", "name"]} + assert resolveParameterReferences(value, node_outputs) == ["a", "c"] + + def test_wildcard_on_non_list_returns_none(self): + node_outputs = {"src": {"docs": {"not": "a list"}}} + value = {"type": "ref", "nodeId": "src", "path": ["docs", "*", "name"]} + assert resolveParameterReferences(value, node_outputs) is None + + def test_wildcard_nested(self): + node_outputs = { + "src": { + "groups": [ + {"items": [{"v": 1}, {"v": 2}]}, + {"items": [{"v": 3}]}, + ] + } + } + value = { + "type": "ref", + "nodeId": "src", + "path": ["groups", "*", "items", "*", "v"], + } + assert resolveParameterReferences(value, node_outputs) == [[1, 2], [3]] + + def test_wildcard_inside_transit_envelope(self): + node_outputs = { + "src": { + "_transit": True, + "data": {"documents": [{"name": "p.pdf"}, {"name": "q.pdf"}]}, + } + } + value = { + "type": "ref", + "nodeId": "src", + "path": ["documents", "*", "name"], + } + assert resolveParameterReferences(value, node_outputs) == ["p.pdf", "q.pdf"] + + +class TestPathContainsWildcard: + """``_pathContainsWildcard`` lets the engine decide between a scalar bind + and an iteration target (e.g. wrap a Loop container around the consumer). + """ + + def test_detects_wildcard(self): + from modules.workflows.automation2.graphUtils import _pathContainsWildcard + assert _pathContainsWildcard(["docs", "*", "name"]) is True + assert _pathContainsWildcard(["*"]) is True + + def test_no_wildcard(self): + from modules.workflows.automation2.graphUtils import _pathContainsWildcard + assert _pathContainsWildcard(["docs", 0, "name"]) is False + assert _pathContainsWildcard([]) is False + + def test_literal_star_in_int_segment_does_not_match(self): + from modules.workflows.automation2.graphUtils import _pathContainsWildcard + assert _pathContainsWildcard([1, 2, 3]) is False diff --git a/tests/unit/workflows/test_featureInstanceRefMigration.py b/tests/unit/workflows/test_featureInstanceRefMigration.py new file mode 100644 index 00000000..573f7b66 --- /dev/null +++ b/tests/unit/workflows/test_featureInstanceRefMigration.py @@ -0,0 +1,310 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Phase-5 Schicht-4 — unit tests for ``materializeFeatureInstanceRefs`` and the +runtime envelope unwrap in ``graphUtils.resolveParameterReferences``. + +Plan: ``wiki/c-work/1-plan/2026-04-typed-action-architecture.md`` (T11). +""" +from __future__ import annotations + +import copy + +import pytest + +from modules.workflows.automation2.featureInstanceRefMigration import ( + materializeFeatureInstanceRefs, +) +from modules.workflows.automation2.graphUtils import ( + _isTypedRefEnvelope, + _unwrapTypedRef, + resolveParameterReferences, +) + + +# --------------------------------------------------------------------------- +# Migration: raw UUID -> typed envelope +# --------------------------------------------------------------------------- + + +class TestMaterializeFeatureInstanceRefs: + def test_emptyGraphIsReturnedAsIs(self): + out = materializeFeatureInstanceRefs({}) + assert out == {} + + def test_nonDictInputIsPassthrough(self): + # Defensive: callers may pass a None / list by accident. + assert materializeFeatureInstanceRefs(None) is None + assert materializeFeatureInstanceRefs([]) == [] + + def test_graphWithoutFeatureInstanceIdIsUnchanged(self): + graph = {"nodes": [{"id": "n1", "type": "trigger.manual", "parameters": {}}]} + original = copy.deepcopy(graph) + out = materializeFeatureInstanceRefs(graph) + assert out == original + + def test_inputIsNotMutated(self): + graph = { + "nodes": [ + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": {"featureInstanceId": "abc-123"}, + } + ] + } + snapshot = copy.deepcopy(graph) + materializeFeatureInstanceRefs(graph) + assert graph == snapshot + + def test_rawUuidIsConvertedToEnvelope(self): + graph = { + "nodes": [ + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": {"featureInstanceId": "abc-123"}, + } + ] + } + out = materializeFeatureInstanceRefs(graph) + param = out["nodes"][0]["parameters"]["featureInstanceId"] + assert param == { + "$type": "FeatureInstanceRef", + "id": "abc-123", + "featureCode": "trustee", + } + + def test_rawUuidPreservedWhitespaceIsTrimmed(self): + graph = { + "nodes": [ + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": {"featureInstanceId": " abc-123 "}, + } + ] + } + out = materializeFeatureInstanceRefs(graph) + assert out["nodes"][0]["parameters"]["featureInstanceId"]["id"] == "abc-123" + + def test_emptyStringIsLeftUntouched(self): + # Empty featureInstanceId is the editor placeholder for "not yet bound"; + # the migration must NOT pretend an empty value is a real UUID. + graph = { + "nodes": [ + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": {"featureInstanceId": ""}, + } + ] + } + out = materializeFeatureInstanceRefs(graph) + assert out["nodes"][0]["parameters"]["featureInstanceId"] == "" + + def test_alreadyTypedEnvelopeIsIdempotent(self): + envelope = { + "$type": "FeatureInstanceRef", + "id": "abc-123", + "featureCode": "trustee", + } + graph = { + "nodes": [ + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": {"featureInstanceId": envelope}, + } + ] + } + out = materializeFeatureInstanceRefs(graph) + assert out["nodes"][0]["parameters"]["featureInstanceId"] == envelope + + def test_runMigrationTwiceProducesSameResult(self): + graph = { + "nodes": [ + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": {"featureInstanceId": "abc-123"}, + } + ] + } + once = materializeFeatureInstanceRefs(graph) + twice = materializeFeatureInstanceRefs(once) + assert once == twice + + @pytest.mark.parametrize( + "nodeType,expectedFeatureCode", + [ + ("trustee.extractFromFiles", "trustee"), + ("trustee.processDocuments", "trustee"), + ("redmine.createIssue", "redmine"), + ("clickup.createTask", "clickup"), + ("sharepoint.listFiles", "sharepoint"), + ("outlook.readEmails", "outlook"), + ("email.searchEmail", "outlook"), + ], + ) + def test_featureCodeIsDerivedFromNodeTypePrefix( + self, nodeType, expectedFeatureCode + ): + graph = { + "nodes": [ + { + "id": "n", + "type": nodeType, + "parameters": {"featureInstanceId": "uuid-x"}, + } + ] + } + out = materializeFeatureInstanceRefs(graph) + env = out["nodes"][0]["parameters"]["featureInstanceId"] + assert env["featureCode"] == expectedFeatureCode + + def test_unknownNodeTypePrefixOmitsFeatureCode(self): + graph = { + "nodes": [ + { + "id": "n", + "type": "weird.unknown.action", + "parameters": {"featureInstanceId": "uuid-x"}, + } + ] + } + out = materializeFeatureInstanceRefs(graph) + env = out["nodes"][0]["parameters"]["featureInstanceId"] + assert env == {"$type": "FeatureInstanceRef", "id": "uuid-x"} + + def test_multipleNodesAreAllMigrated(self): + graph = { + "nodes": [ + { + "id": "n5", + "type": "trustee.extractFromFiles", + "parameters": {"featureInstanceId": "uuid-1"}, + }, + { + "id": "n6", + "type": "trustee.queryData", + "parameters": {"featureInstanceId": "uuid-2"}, + }, + { + "id": "n9", + "type": "trustee.processDocuments", + "parameters": {"featureInstanceId": "uuid-3"}, + }, + ] + } + out = materializeFeatureInstanceRefs(graph) + ids = [n["parameters"]["featureInstanceId"]["id"] for n in out["nodes"]] + assert ids == ["uuid-1", "uuid-2", "uuid-3"] + + def test_nodesWithoutParametersAreSkipped(self): + graph = { + "nodes": [ + {"id": "n1", "type": "trigger.manual"}, + {"id": "n2", "type": "trustee.queryData"}, # no parameters key + { + "id": "n3", + "type": "trustee.processDocuments", + "parameters": None, + }, + ] + } + out = materializeFeatureInstanceRefs(graph) + assert out == graph + + +# --------------------------------------------------------------------------- +# Runtime envelope unwrap (graphUtils._unwrapTypedRef + resolveParameterReferences) +# --------------------------------------------------------------------------- + + +class TestIsTypedRefEnvelope: + def test_recognisesFeatureInstanceRef(self): + env = {"$type": "FeatureInstanceRef", "id": "abc"} + assert _isTypedRefEnvelope(env) is True + + def test_recognisesConnectionRef(self): + env = {"$type": "ConnectionRef", "id": "abc"} + assert _isTypedRefEnvelope(env) is True + + def test_rejectsRawDict(self): + assert _isTypedRefEnvelope({"id": "abc"}) is False + + def test_rejectsUnknownType(self): + assert _isTypedRefEnvelope({"$type": "Foobar", "id": "abc"}) is False + + def test_rejectsNonDict(self): + assert _isTypedRefEnvelope("abc") is False + assert _isTypedRefEnvelope(None) is False + assert _isTypedRefEnvelope(["abc"]) is False + + +class TestUnwrapTypedRef: + def test_unwrapsFeatureInstanceRefToId(self): + env = {"$type": "FeatureInstanceRef", "id": "uuid-x", "featureCode": "trustee"} + assert _unwrapTypedRef(env) == "uuid-x" + + def test_unwrapsConnectionRefToId(self): + env = {"$type": "ConnectionRef", "id": "conn-y", "authority": "msft"} + assert _unwrapTypedRef(env) == "conn-y" + + def test_unwrapsSharePointFileRefToFilePath(self): + env = {"$type": "SharePointFileRef", "filePath": "/Sites/X/file.pdf"} + assert _unwrapTypedRef(env) == "/Sites/X/file.pdf" + + def test_passthroughForNonEnvelope(self): + assert _unwrapTypedRef("plain-string") == "plain-string" + assert _unwrapTypedRef({"id": "abc"}) == {"id": "abc"} + assert _unwrapTypedRef(None) is None + + def test_returnsEnvelopeIfPrimaryFieldMissing(self): + # Defensive: malformed envelope without ``id`` falls back to itself + # rather than silently dropping data. + env = {"$type": "FeatureInstanceRef", "featureCode": "trustee"} + assert _unwrapTypedRef(env) == env + + +class TestResolveParameterReferencesUnwrap: + def test_typedEnvelopeAtTopLevelIsUnwrapped(self): + env = {"$type": "FeatureInstanceRef", "id": "uuid-z", "featureCode": "trustee"} + out = resolveParameterReferences(env, nodeOutputs={}) + assert out == "uuid-z" + + def test_typedEnvelopeNestedInDictIsUnwrapped(self): + params = { + "featureInstanceId": { + "$type": "FeatureInstanceRef", + "id": "uuid-z", + "featureCode": "trustee", + }, + "mode": "lookup", + } + out = resolveParameterReferences(params, nodeOutputs={}) + assert out == {"featureInstanceId": "uuid-z", "mode": "lookup"} + + def test_typedEnvelopesInListAreUnwrappedElementwise(self): + params = [ + {"$type": "FeatureInstanceRef", "id": "u1"}, + {"$type": "FeatureInstanceRef", "id": "u2"}, + "static", + ] + out = resolveParameterReferences(params, nodeOutputs={}) + assert out == ["u1", "u2", "static"] + + def test_typedEnvelopeIsResolvedBeforeRefLookup(self): + # If a workflow somehow contains both shapes, the typed envelope wins; + # ref-resolution is for upstream-bound DataRefs which never carry + # ``$type`` at the top level. + env = { + "$type": "FeatureInstanceRef", + "id": "uuid-z", + # nonsensical ``type: ref`` shadow — must be ignored. + "type": "ref", + "nodeId": "nope", + "path": ["whatever"], + } + out = resolveParameterReferences(env, nodeOutputs={"nope": {"whatever": "x"}}) + assert out == "uuid-z" From 24f0c3e2ebec7dd27d40dfb732a40de13079c6d3 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sun, 26 Apr 2026 08:31:35 +0200 Subject: [PATCH 3/7] cleanup internal marked exports --- app.py | 14 +- modules/connectors/connectorDbPostgre.py | 98 ++++--- modules/datamodels/datamodelBase.py | 12 +- modules/datamodels/datamodelFiles.py | 6 +- modules/datamodels/datamodelMembership.py | 22 +- modules/datamodels/datamodelRbac.py | 12 +- modules/datamodels/datamodelSubscription.py | 2 +- modules/datamodels/datamodelUam.py | 6 +- modules/datamodels/datamodelUdm.py | 6 +- modules/datamodels/datamodelUtils.py | 2 + modules/demoConfigs/__init__.py | 8 +- modules/demoConfigs/investorDemo2026.py | 8 +- modules/demoConfigs/pwgDemo2026.py | 8 +- .../chatbot/interfaceFeatureChatbot.py | 12 +- .../features/chatbot/routeFeatureChatbot.py | 27 +- .../commcoach/routeFeatureCommcoach.py | 10 +- .../features/commcoach/serviceCommcoach.py | 22 +- .../commcoach/serviceCommcoachScheduler.py | 6 +- .../datamodelFeatureGraphicalEditor.py | 13 +- .../features/graphicalEditor/nodeAdapter.py | 4 +- .../nodeDefinitions/redmine.py | 31 ++- .../nodeDefinitions/trustee.py | 38 +-- .../features/graphicalEditor/nodeRegistry.py | 4 +- modules/features/graphicalEditor/portTypes.py | 20 +- .../routeFeatureGraphicalEditor.py | 77 +++++- modules/features/redmine/serviceRedmine.py | 4 +- .../features/redmine/serviceRedmineStats.py | 4 +- .../redmine/serviceRedmineStatsCache.py | 2 +- .../features/redmine/serviceRedmineSync.py | 8 +- .../features/teamsbot/routeFeatureTeamsbot.py | 18 +- modules/features/teamsbot/service.py | 44 +-- .../trustee/accounting/accountingBridge.py | 4 +- .../accounting/accountingConnectorBase.py | 45 +++ .../trustee/accounting/accountingDataSync.py | 236 +++++++++++++--- .../trustee/accounting/accountingRegistry.py | 2 +- .../connectors/accountingConnectorAbacus.py | 183 +++++++++++- .../connectors/accountingConnectorBexio.py | 175 +++++++++++- .../connectors/accountingConnectorRma.py | 254 +++++++++++++++++ .../trustee/interfaceFeatureTrustee.py | 31 ++- modules/features/trustee/mainTrustee.py | 10 +- .../features/trustee/routeFeatureTrustee.py | 194 +++++++------ .../workspace/routeFeatureWorkspace.py | 8 +- modules/interfaces/interfaceBootstrap.py | 8 +- modules/interfaces/interfaceDbApp.py | 16 +- modules/interfaces/interfaceDbBilling.py | 84 +++--- modules/interfaces/interfaceDbChat.py | 8 +- modules/interfaces/interfaceDbKnowledge.py | 8 +- modules/interfaces/interfaceDbManagement.py | 13 +- modules/interfaces/interfaceDbSubscription.py | 6 +- modules/interfaces/interfaceRbac.py | 64 +++-- modules/routes/routeAdminDemoConfig.py | 12 +- modules/routes/routeAdminFeatures.py | 8 +- modules/routes/routeAudit.py | 70 +++-- modules/routes/routeBilling.py | 174 ++++++------ modules/routes/routeDataConnections.py | 61 +--- modules/routes/routeDataFiles.py | 36 ++- modules/routes/routeDataMandates.py | 28 +- modules/routes/routeDataPrompts.py | 31 ++- modules/routes/routeDataUsers.py | 73 +++-- modules/routes/routeHelpers.py | 196 ++++++++++--- modules/routes/routeI18n.py | 14 +- modules/routes/routeInvitations.py | 33 +-- modules/routes/routeNotifications.py | 6 +- modules/routes/routeSecurityLocal.py | 14 +- modules/routes/routeStore.py | 4 +- modules/routes/routeSubscription.py | 69 +++-- modules/routes/routeSystem.py | 4 +- modules/routes/routeVoiceUser.py | 4 +- modules/routes/routeWorkflowDashboard.py | 260 ++++++++++-------- .../services/serviceAgent/agentLoop.py | 2 +- .../services/serviceAgent/mainServiceAgent.py | 4 +- .../services/serviceExtraction/subPipeline.py | 4 +- .../services/serviceExtraction/subRegistry.py | 6 +- .../serviceKnowledge/mainServiceKnowledge.py | 4 +- .../mainServiceSubscription.py | 16 +- modules/shared/aiAuditLogger.py | 4 +- modules/shared/attributeUtils.py | 48 ++-- modules/shared/dbRegistry.py | 2 +- modules/shared/debugLogger.py | 12 +- modules/shared/fkRegistry.py | 20 +- modules/shared/i18nRegistry.py | 20 +- modules/shared/notifyMandateAdmins.py | 8 +- modules/shared/timeUtils.py | 2 +- modules/system/databaseHealth.py | 14 +- modules/system/mainSystem.py | 2 +- .../workflows/automation2/executionEngine.py | 12 +- .../executors/actionNodeExecutor.py | 6 +- .../automation2/executors/dataExecutor.py | 10 +- .../automation2/executors/flowExecutor.py | 16 +- modules/workflows/automation2/graphUtils.py | 8 +- modules/workflows/scheduler/mainScheduler.py | 2 +- tests/demo/conftest.py | 4 +- tests/demo/test_demo_api.py | 16 +- tests/demo/test_pwg_demo_bootstrap.py | 4 +- tests/test_service_redmine_stats_cache.py | 6 +- tests/unit/datamodels/test_udm_bridge.py | 6 +- .../test_trustee_template_workflows.py | 59 ++++ ...test_accountingConnectorAbacus_balances.py | 94 +++++++ .../test_accountingConnectorBexio_balances.py | 114 ++++++++ .../test_accountingConnectorRma_balances.py | 156 +++++++++++ .../test_accountingDataSync_balances.py | 196 +++++++++++++ ...est_featureInstanceRef_node_definitions.py | 95 +++++++ .../unit/graphicalEditor/test_node_adapter.py | 6 +- .../test_route_options_feature_instance.py | 66 +++++ 104 files changed, 2983 insertions(+), 1055 deletions(-) create mode 100644 tests/unit/features/test_trustee_template_workflows.py create mode 100644 tests/unit/features/trustee/test_accountingConnectorAbacus_balances.py create mode 100644 tests/unit/features/trustee/test_accountingConnectorBexio_balances.py create mode 100644 tests/unit/features/trustee/test_accountingConnectorRma_balances.py create mode 100644 tests/unit/features/trustee/test_accountingDataSync_balances.py create mode 100644 tests/unit/graphicalEditor/test_featureInstanceRef_node_definitions.py create mode 100644 tests/unit/graphicalEditor/test_route_options_feature_instance.py diff --git a/app.py b/app.py index 8e3552b5..d4d0ba99 100644 --- a/app.py +++ b/app.py @@ -327,9 +327,9 @@ async def lifespan(app: FastAPI): # Sync gateway i18n registry to DB and load translation cache try: - from modules.shared.i18nRegistry import _syncRegistryToDb, _loadCache - await _syncRegistryToDb() - await _loadCache() + from modules.shared.i18nRegistry import syncRegistryToDb, loadCache + await syncRegistryToDb() + await loadCache() logger.info("i18n registry sync + cache load completed") except Exception as e: logger.warning(f"i18n registry sync failed (non-critical): {e}") @@ -522,15 +522,15 @@ from modules.auth import ( # Per-request context middleware: language (Accept-Language) + user timezone (X-User-Timezone). # Both are written into ContextVars and consumed by t() / resolveText() and getRequestNow() # without having to thread them through every call site. -from modules.shared.i18nRegistry import _setLanguage, normalizePrimaryLanguageTag -from modules.shared.timeUtils import _setRequestTimezone +from modules.shared.i18nRegistry import setLanguage, normalizePrimaryLanguageTag +from modules.shared.timeUtils import setRequestTimezone @app.middleware("http") async def _requestContextMiddleware(request: Request, call_next): acceptLang = request.headers.get("Accept-Language", "") lang = normalizePrimaryLanguageTag(acceptLang, "de") - _setLanguage(lang) - _setRequestTimezone(request.headers.get("X-User-Timezone", "")) + setLanguage(lang) + setRequestTimezone(request.headers.get("X-User-Timezone", "")) return await call_next(request) app.add_middleware(CSRFMiddleware) diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index 72098ff1..f2e7758e 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -76,7 +76,7 @@ def _isJsonbType(fieldType) -> bool: return False -def _get_model_fields(model_class) -> Dict[str, str]: +def getModelFields(model_class) -> Dict[str, str]: """Get all fields from Pydantic model and map to SQL types. Supports explicit db_type override via json_schema_extra={"db_type": "vector(1536)"}. @@ -122,21 +122,27 @@ def _get_model_fields(model_class) -> Dict[str, str]: def _get_fk_sort_meta(model_class) -> Dict[str, Dict[str, str]]: - """Map FK field name -> {model, labelField} from json_schema_extra (fk_model + frontend_fk_display_field).""" + """Map FK field name -> {model, labelField} from json_schema_extra (``fk_model`` + ``fk_label_field``). + + ``fk_model`` may be omitted if ``fk_target.table`` is set (table name = resolver / JOIN key). + """ result: Dict[str, Dict[str, str]] = {} for name, field_info in model_class.model_fields.items(): extra = field_info.json_schema_extra if not extra or not isinstance(extra, dict): continue fk_model = extra.get("fk_model") - label_field = extra.get("frontend_fk_display_field") + tgt = extra.get("fk_target") + if not fk_model and isinstance(tgt, dict) and tgt.get("table"): + fk_model = tgt["table"] + label_field = extra.get("fk_label_field") if fk_model and label_field: result[name] = {"model": str(fk_model), "labelField": str(label_field)} return result -def _parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: str = "") -> None: +def parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: str = "") -> None: """Parse record fields in-place: numeric typing, vector parsing, JSONB deserialization.""" import json as _json @@ -189,7 +195,7 @@ _current_user_id: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar ) -def _get_cached_connector( +def getCachedConnector( dbHost: str, dbDatabase: str, dbUser: str = None, @@ -553,7 +559,7 @@ class DatabaseConnector: } # Desired columns based on model - model_fields = _get_model_fields(model_class) + model_fields = getModelFields(model_class) desired_columns = set(["id"]) | set(model_fields.keys()) # Add missing columns @@ -633,7 +639,7 @@ class DatabaseConnector: def _create_table_from_model(self, cursor, table: str, model_class: type) -> None: """Create table with columns matching Pydantic model fields.""" - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) # Enable pgvector if any field uses vector type if any(_isVectorType(sqlType) for sqlType in fields.values()): @@ -666,7 +672,7 @@ class DatabaseConnector: ) -> None: """Save record to normalized table with explicit columns.""" # Get columns from Pydantic model instead of database schema - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) columns = ["id"] + [field for field in fields.keys() if field != "id"] if not columns: @@ -751,9 +757,9 @@ class DatabaseConnector: # Convert row to dict and handle JSONB fields record = dict(row) - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) - _parseRecordFields(record, fields, f"record {recordId}") + parseRecordFields(record, fields, f"record {recordId}") return record except Exception as e: @@ -822,10 +828,10 @@ class DatabaseConnector: cursor.execute(f'SELECT * FROM "{table}" ORDER BY "id"') records = [dict(row) for row in cursor.fetchall()] - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) modelFields = model_class.model_fields for record in records: - _parseRecordFields(record, fields, f"table {table}") + parseRecordFields(record, fields, f"table {table}") # Set type-aware defaults for NULL JSONB fields for fieldName, fieldType in fields.items(): if fieldType == "JSONB" and fieldName in record and record[fieldName] is None: @@ -1011,10 +1017,10 @@ class DatabaseConnector: cursor.execute(query, where_values) records = [dict(row) for row in cursor.fetchall()] - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) modelFields = model_class.model_fields for record in records: - _parseRecordFields(record, fields, f"table {table}") + parseRecordFields(record, fields, f"table {table}") for fieldName, fieldType in fields.items(): if fieldType == "JSONB" and fieldName in record and record[fieldName] is None: fieldInfo = modelFields.get(fieldName) @@ -1055,7 +1061,7 @@ class DatabaseConnector: Translate PaginationParams + recordFilter into SQL clauses. Returns (where_clause, order_clause, limit_clause, values, count_values). """ - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) validColumns = set(fields.keys()) where_parts: List[str] = [] @@ -1214,10 +1220,10 @@ class DatabaseConnector: cursor.execute(dataSql, values) records = [dict(row) for row in cursor.fetchall()] - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) modelFields = model_class.model_fields for record in records: - _parseRecordFields(record, fields, f"table {table}") + parseRecordFields(record, fields, f"table {table}") for fieldName, fieldType in fields.items(): if fieldType == "JSONB" and fieldName in record and record[fieldName] is None: fieldInfo = modelFields.get(fieldName) @@ -1235,10 +1241,13 @@ class DatabaseConnector: if fieldFilter and isinstance(fieldFilter, list): records = [{f: r[f] for f in fieldFilter if f in r} for r in records] - pageSize = pagination.pageSize if pagination else max(totalItems, 1) - totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 + from modules.routes.routeHelpers import enrichRowsWithFkLabels + enrichRowsWithFkLabels(records, model_class) - return {"items": records, "totalItems": totalItems, "totalPages": totalPages} + pageSize = pagination.pageSize if pagination else max(totalItems, 1) + totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 + + return {"items": records, "totalItems": totalItems, "totalPages": totalPages} except Exception as e: logger.error(f"Error in getRecordsetPaginated for table {table}: {e}") return {"items": [], "totalItems": 0, "totalPages": 0} @@ -1249,13 +1258,18 @@ class DatabaseConnector: column: str, pagination=None, recordFilter: Dict[str, Any] = None, - ) -> List[str]: - """ - Returns sorted distinct non-null values for a column using SQL DISTINCT. + includeEmpty: bool = True, + ) -> List[Optional[str]]: + """Return sorted distinct values for a column using SQL DISTINCT. + + When ``includeEmpty`` is True (default), NULL and empty-string rows are + represented as a single ``None`` entry at the end of the list — this + allows the frontend to offer a "(Leer)" filter option. + Applies cross-filtering (all filters except the requested column). """ table = model_class.__name__ - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) if column not in fields: return [] @@ -1274,18 +1288,28 @@ class DatabaseConnector: where_clause, _, _, values, _ = \ self._buildPaginationClauses(model_class, pagination, recordFilter) - sql = ( - f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{where_clause} ' - f'WHERE "{column}" IS NOT NULL AND "{column}"::TEXT != \'\' ' - if not where_clause else - f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{where_clause} ' - f'AND "{column}" IS NOT NULL AND "{column}"::TEXT != \'\' ' - ) - sql += 'ORDER BY val' + nonNullCond = f'"{column}" IS NOT NULL AND "{column}"::TEXT != \'\'' + if where_clause: + sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{where_clause} AND {nonNullCond} ORDER BY val' + else: + sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}" WHERE {nonNullCond} ORDER BY val' with self.connection.cursor() as cursor: cursor.execute(sql, values) - return [row["val"] for row in cursor.fetchall()] + result: List[Optional[str]] = [row["val"] for row in cursor.fetchall()] + + if includeEmpty: + emptyCond = f'"{column}" IS NULL OR "{column}"::TEXT = \'\'' + if where_clause: + emptySql = f'SELECT 1 FROM "{table}"{where_clause} AND ({emptyCond}) LIMIT 1' + else: + emptySql = f'SELECT 1 FROM "{table}" WHERE ({emptyCond}) LIMIT 1' + with self.connection.cursor() as cursor: + cursor.execute(emptySql, values) + if cursor.fetchone(): + result.append(None) + + return result except Exception as e: logger.error(f"Error in getDistinctColumnValues for {table}.{column}: {e}") return [] @@ -1419,7 +1443,7 @@ class DatabaseConnector: if not self._ensureTableExists(model_class): raise ValueError(f"Table {table} does not exist") - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) columns = ["id"] + [f for f in fields.keys() if f != "id"] modelFields = model_class.model_fields @@ -1541,7 +1565,7 @@ class DatabaseConnector: if not self._ensureTableExists(model_class): return 0 - fields = _get_model_fields(model_class) + fields = getModelFields(model_class) clauses: List[str] = [] params: List[Any] = [] for key, val in recordFilter.items(): @@ -1659,9 +1683,9 @@ class DatabaseConnector: cursor.execute(query, params) records = [dict(row) for row in cursor.fetchall()] - fields = _get_model_fields(modelClass) + fields = getModelFields(modelClass) for record in records: - _parseRecordFields(record, fields, f"semanticSearch {table}") + parseRecordFields(record, fields, f"semanticSearch {table}") return records except Exception as e: diff --git a/modules/datamodels/datamodelBase.py b/modules/datamodels/datamodelBase.py index 353f780b..2a65bcdc 100644 --- a/modules/datamodels/datamodelBase.py +++ b/modules/datamodels/datamodelBase.py @@ -8,12 +8,12 @@ from pydantic import BaseModel, Field from modules.shared.i18nRegistry import i18nModel -_MODEL_REGISTRY: Dict[str, Type["PowerOnModel"]] = {} +MODEL_REGISTRY: Dict[str, Type["PowerOnModel"]] = {} def _getModelByTableName(tableName: str) -> Optional[Type["PowerOnModel"]]: """Look up a PowerOnModel subclass by its table name (= class name).""" - return _MODEL_REGISTRY.get(tableName) + return MODEL_REGISTRY.get(tableName) @i18nModel("Basisdatensatz") @@ -22,7 +22,7 @@ class PowerOnModel(BaseModel): def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) - _MODEL_REGISTRY[cls.__name__] = cls + MODEL_REGISTRY[cls.__name__] = cls sysCreatedAt: Optional[float] = Field( default=None, @@ -46,6 +46,9 @@ class PowerOnModel(BaseModel): "frontend_required": False, "frontend_visible": False, "system": True, + "fk_model": "User", + "fk_label_field": "username", + "fk_target": {"db": "poweron_app", "table": "User"}, }, ) sysModifiedAt: Optional[float] = Field( @@ -70,5 +73,8 @@ class PowerOnModel(BaseModel): "frontend_required": False, "frontend_visible": False, "system": True, + "fk_model": "User", + "fk_label_field": "username", + "fk_target": {"db": "poweron_app", "table": "User"}, }, ) diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py index c8b0c865..d9b78ddf 100644 --- a/modules/datamodels/datamodelFiles.py +++ b/modules/datamodels/datamodelFiles.py @@ -30,9 +30,8 @@ class FileItem(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "frontend_fk_source": "/api/mandates/", - "frontend_fk_display_field": "label", "fk_model": "Mandate", + "fk_label_field": "label", "fk_target": {"db": "poweron_app", "table": "Mandate"}, }, ) @@ -44,9 +43,8 @@ class FileItem(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "frontend_fk_source": "/api/features/instances", - "frontend_fk_display_field": "label", "fk_model": "FeatureInstance", + "fk_label_field": "label", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, }, ) diff --git a/modules/datamodels/datamodelMembership.py b/modules/datamodels/datamodelMembership.py index f70fe035..5c7280d0 100644 --- a/modules/datamodels/datamodelMembership.py +++ b/modules/datamodels/datamodelMembership.py @@ -31,9 +31,8 @@ class UserMandate(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "frontend_fk_source": "/api/users/", - "frontend_fk_display_field": "username", "fk_model": "User", + "fk_label_field": "username", "fk_target": {"db": "poweron_app", "table": "User"}, }, ) @@ -44,9 +43,8 @@ class UserMandate(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "frontend_fk_source": "/api/mandates/", - "frontend_fk_display_field": "label", "fk_model": "Mandate", + "fk_label_field": "label", "fk_target": {"db": "poweron_app", "table": "Mandate"}, }, ) @@ -75,8 +73,8 @@ class FeatureAccess(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "frontend_fk_source": "/api/users/", - "frontend_fk_display_field": "username", + "fk_model": "User", + "fk_label_field": "username", "fk_target": {"db": "poweron_app", "table": "User"}, }, ) @@ -87,8 +85,8 @@ class FeatureAccess(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "frontend_fk_source": "/api/features/instances", - "frontend_fk_display_field": "label", + "fk_model": "FeatureInstance", + "fk_label_field": "label", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, }, ) @@ -127,8 +125,8 @@ class UserMandateRole(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "frontend_fk_source": "/api/rbac/roles", - "frontend_fk_display_field": "roleLabel", + "fk_model": "Role", + "fk_label_field": "roleLabel", "fk_target": {"db": "poweron_app", "table": "Role"}, }, ) @@ -162,8 +160,8 @@ class FeatureAccessRole(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "frontend_fk_source": "/api/rbac/roles", - "frontend_fk_display_field": "roleLabel", + "fk_model": "Role", + "fk_label_field": "roleLabel", "fk_target": {"db": "poweron_app", "table": "Role"}, }, ) diff --git a/modules/datamodels/datamodelRbac.py b/modules/datamodels/datamodelRbac.py index 1214a96f..45aa76a7 100644 --- a/modules/datamodels/datamodelRbac.py +++ b/modules/datamodels/datamodelRbac.py @@ -63,8 +63,8 @@ class Role(PowerOnModel): "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, - "frontend_fk_source": "/api/mandates/", - "frontend_fk_display_field": "label", + "fk_model": "Mandate", + "fk_label_field": "label", "fk_target": {"db": "poweron_app", "table": "Mandate"}, }, ) @@ -77,8 +77,8 @@ class Role(PowerOnModel): "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, - "frontend_fk_source": "/api/features/instances", - "frontend_fk_display_field": "label", + "fk_model": "FeatureInstance", + "fk_label_field": "label", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, }, ) @@ -115,8 +115,8 @@ class AccessRule(PowerOnModel): "frontend_type": "select", "frontend_readonly": True, "frontend_required": True, - "frontend_fk_source": "/api/rbac/roles", - "frontend_fk_display_field": "roleLabel", + "fk_model": "Role", + "fk_label_field": "roleLabel", "fk_target": {"db": "poweron_app", "table": "Role"}, }, ) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 058f2e17..46ce1f31 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -407,7 +407,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { } -def _getPlan(planKey: str) -> Optional[SubscriptionPlan]: +def getPlan(planKey: str) -> Optional[SubscriptionPlan]: """Resolve a plan by key from the built-in catalog.""" return BUILTIN_PLANS.get(planKey) diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 90dd9452..5cfb4c37 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -397,6 +397,8 @@ class UserConnection(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Benutzer-ID", + "fk_model": "User", + "fk_label_field": "username", "fk_target": {"db": "poweron_app", "table": "User"}, }, ) @@ -650,7 +652,7 @@ class UserInDB(User): ) -def _normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]: +def normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]: """ Coerce ttsVoiceMap payloads to Dict[str, str]. @@ -728,6 +730,6 @@ class UserVoicePreferences(PowerOnModel): @field_validator("ttsVoiceMap", mode="before") @classmethod def _validateTtsVoiceMap(cls, value: Any) -> Optional[Dict[str, str]]: - return _normalizeTtsVoiceMap(value) + return normalizeTtsVoiceMap(value) diff --git a/modules/datamodels/datamodelUdm.py b/modules/datamodels/datamodelUdm.py index 330467b4..794b71f0 100644 --- a/modules/datamodels/datamodelUdm.py +++ b/modules/datamodels/datamodelUdm.py @@ -177,7 +177,7 @@ def _groupKeyForPart(part: ContentPart) -> Tuple[str, int, str]: _VALID_DOC_SOURCES = frozenset({"pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"}) -def _contentPartsToUdm(extracted: ContentExtracted, sourceType: str, sourcePath: str) -> UdmDocument: +def contentPartsToUdm(extracted: ContentExtracted, sourceType: str, sourcePath: str) -> UdmDocument: """Convert flat ContentPart list into a UdmDocument using structural heuristics.""" parts = list(extracted.parts or []) st: Literal["pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"] = ( @@ -290,7 +290,7 @@ def _stripUdmForReferences(udm: UdmDocument) -> UdmDocument: return clone -def _applyUdmOutputDetail(udm: UdmDocument, detail: str) -> UdmDocument: +def applyUdmOutputDetail(udm: UdmDocument, detail: str) -> UdmDocument: if detail == "structure": return _stripUdmRaw(udm) if detail == "references": @@ -298,7 +298,7 @@ def _applyUdmOutputDetail(udm: UdmDocument, detail: str) -> UdmDocument: return udm -def _mimeToUdmSourceType(mimeType: str, fileName: str) -> Literal["pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"]: +def mimeToUdmSourceType(mimeType: str, fileName: str) -> Literal["pdf", "docx", "pptx", "xlsx", "html", "binary", "unknown"]: m = (mimeType or "").lower() fn = (fileName or "").lower() if m == "application/pdf" or fn.endswith(".pdf"): diff --git a/modules/datamodels/datamodelUtils.py b/modules/datamodels/datamodelUtils.py index f389d0d7..0c1bb8c6 100644 --- a/modules/datamodels/datamodelUtils.py +++ b/modules/datamodels/datamodelUtils.py @@ -27,6 +27,8 @@ class Prompt(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, + "fk_model": "Mandate", + "fk_label_field": "label", "fk_target": {"db": "poweron_app", "table": "Mandate"}, }, ) diff --git a/modules/demoConfigs/__init__.py b/modules/demoConfigs/__init__.py index 9e64cf96..5395f71b 100644 --- a/modules/demoConfigs/__init__.py +++ b/modules/demoConfigs/__init__.py @@ -2,7 +2,7 @@ Demo Configs — Auto-Discovery Module Scans this folder for Python files that contain subclasses of _BaseDemoConfig -and exposes them via _getAvailableDemoConfigs(). +and exposes them via getAvailableDemoConfigs(). """ import importlib @@ -18,7 +18,7 @@ logger = logging.getLogger(__name__) _configCache: Dict[str, _BaseDemoConfig] = {} -def _getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]: +def getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]: """Return a dict of code -> instance for every discovered demo config.""" if _configCache: return _configCache @@ -43,7 +43,7 @@ def _getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]: return _configCache -def _getDemoConfigByCode(code: str) -> _BaseDemoConfig | None: +def getDemoConfigByCode(code: str) -> _BaseDemoConfig | None: """Get a specific demo config by its code.""" - configs = _getAvailableDemoConfigs() + configs = getAvailableDemoConfigs() return configs.get(code) diff --git a/modules/demoConfigs/investorDemo2026.py b/modules/demoConfigs/investorDemo2026.py index 81956c6d..f8fc678f 100644 --- a/modules/demoConfigs/investorDemo2026.py +++ b/modules/demoConfigs/investorDemo2026.py @@ -447,10 +447,10 @@ class InvestorDemo2026(_BaseDemoConfig): if not mandateId: return try: - from modules.interfaces.interfaceDbBilling import _getRootInterface + from modules.interfaces.interfaceDbBilling import getRootInterface from modules.datamodels.datamodelBilling import BillingSettings - billingInterface = _getRootInterface() + billingInterface = getRootInterface() existingSettings = billingInterface.getSettings(mandateId) if existingSettings: summary["skipped"].append(f"Billing for {mandateLabel} exists") @@ -532,8 +532,8 @@ class InvestorDemo2026(_BaseDemoConfig): summary["removed"].append(f"{len(roles)} roles in {mandateLabel}") try: - from modules.interfaces.interfaceDbBilling import _getRootInterface - billingDb = _getRootInterface().db + from modules.interfaces.interfaceDbBilling import getRootInterface + billingDb = getRootInterface().db billingSettings = billingDb.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) or [] for bs in billingSettings: billingDb.recordDelete(BillingSettings, bs.get("id")) diff --git a/modules/demoConfigs/pwgDemo2026.py b/modules/demoConfigs/pwgDemo2026.py index d4661bcf..f80760f9 100644 --- a/modules/demoConfigs/pwgDemo2026.py +++ b/modules/demoConfigs/pwgDemo2026.py @@ -377,9 +377,9 @@ class PwgDemo2026(_BaseDemoConfig): return try: from modules.datamodels.datamodelBilling import BillingSettings - from modules.interfaces.interfaceDbBilling import _getRootInterface + from modules.interfaces.interfaceDbBilling import getRootInterface - billingInterface = _getRootInterface() + billingInterface = getRootInterface() existingSettings = billingInterface.getSettings(mandateId) if existingSettings: summary["skipped"].append(f"Billing for {mandateLabel} exists") @@ -708,8 +708,8 @@ class PwgDemo2026(_BaseDemoConfig): db.recordDelete(Role, role.get("id")) try: - from modules.interfaces.interfaceDbBilling import _getRootInterface - billingDb = _getRootInterface().db + from modules.interfaces.interfaceDbBilling import getRootInterface + billingDb = getRootInterface().db billingSettings = billingDb.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) or [] for bs in billingSettings: billingDb.recordDelete(BillingSettings, bs.get("id")) diff --git a/modules/features/chatbot/interfaceFeatureChatbot.py b/modules/features/chatbot/interfaceFeatureChatbot.py index 28f6000c..68d672a4 100644 --- a/modules/features/chatbot/interfaceFeatureChatbot.py +++ b/modules/features/chatbot/interfaceFeatureChatbot.py @@ -139,13 +139,13 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI try: import os from datetime import datetime, UTC - from modules.shared.debugLogger import _getBaseDebugDir, _ensureDir + from modules.shared.debugLogger import getBaseDebugDir, ensureDir from modules.interfaces.interfaceDbManagement import getInterface # Create base debug directory (use base debug dir, not prompts subdirectory) - baseDebugDir = _getBaseDebugDir() + baseDebugDir = getBaseDebugDir() debug_root = os.path.join(baseDebugDir, 'messages') - _ensureDir(debug_root) + ensureDir(debug_root) # Generate timestamp timestamp = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3] @@ -210,7 +210,7 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI safe_label = "default" label_folder = os.path.join(message_path, safe_label) - _ensureDir(label_folder) + ensureDir(label_folder) # Store each document for i, doc in enumerate(docs): @@ -401,8 +401,8 @@ class ChatObjects: dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) - from modules.connectors.connectorDbPostgre import _get_cached_connector - self.db = _get_cached_connector( + from modules.connectors.connectorDbPostgre import getCachedConnector + self.db = getCachedConnector( dbHost=dbHost, dbDatabase=dbDatabase, dbUser=dbUser, diff --git a/modules/features/chatbot/routeFeatureChatbot.py b/modules/features/chatbot/routeFeatureChatbot.py index 06cf985d..4ee82fc5 100644 --- a/modules/features/chatbot/routeFeatureChatbot.py +++ b/modules/features/chatbot/routeFeatureChatbot.py @@ -204,19 +204,20 @@ def get_chatbot_threads( normalized_wf["maxSteps"] = 10 normalized_workflows.append(normalized_wf) - metadata = PaginationMetadata( - currentPage=paginationParams.page if paginationParams else 1, - pageSize=paginationParams.pageSize if paginationParams else len(workflows), - totalItems=totalItems, - totalPages=totalPages, - sort=paginationParams.sort if paginationParams else [], - filters=paginationParams.filters if paginationParams else None - ) - - return PaginatedResponse( - items=normalized_workflows, - pagination=metadata - ) + from modules.routes.routeHelpers import enrichRowsWithFkLabels + enriched = enrichRowsWithFkLabels(normalized_workflows, ChatbotConversation) + + return { + "items": enriched, + "pagination": PaginationMetadata( + currentPage=paginationParams.page if paginationParams else 1, + pageSize=paginationParams.pageSize if paginationParams else len(workflows), + totalItems=totalItems, + totalPages=totalPages, + sort=paginationParams.sort if paginationParams else [], + filters=paginationParams.filters if paginationParams else None + ).model_dump(), + } except HTTPException: raise diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py index 99ae798e..bb83c13c 100644 --- a/modules/features/commcoach/routeFeatureCommcoach.py +++ b/modules/features/commcoach/routeFeatureCommcoach.py @@ -336,10 +336,10 @@ async def startSession( try: from modules.interfaces.interfaceVoiceObjects import getVoiceInterface voiceInterface = getVoiceInterface(context.user, mandateId) - from .serviceCommcoach import _getUserVoicePrefs, _stripMarkdownForTts, _buildTtsConfigErrorMessage - language, voiceName = _getUserVoicePrefs(userId, mandateId) + from .serviceCommcoach import getUserVoicePrefs, stripMarkdownForTts, buildTtsConfigErrorMessage + language, voiceName = getUserVoicePrefs(userId, mandateId) ttsResult = await voiceInterface.textToSpeech( - text=_stripMarkdownForTts(greetingText), + text=stripMarkdownForTts(greetingText), languageCode=language, voiceName=voiceName, ) @@ -584,8 +584,8 @@ async def sendAudioStream( if not audioBody: raise HTTPException(status_code=400, detail=routeApiMsg("No audio data received")) - from .serviceCommcoach import _getUserVoicePrefs - language, _ = _getUserVoicePrefs(str(context.user.id), mandateId) + from .serviceCommcoach import getUserVoicePrefs + language, _ = getUserVoicePrefs(str(context.user.id), mandateId) contextId = session.get("contextId") service = CommcoachService(context.user, mandateId, instanceId) diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py index 332a4a01..8765e30c 100644 --- a/modules/features/commcoach/serviceCommcoach.py +++ b/modules/features/commcoach/serviceCommcoach.py @@ -79,7 +79,7 @@ def _selectConfiguredVoice( return None -def _buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawError: str = "") -> str: +def buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawError: str = "") -> str: if voiceName: return ( f'Die konfigurierte Stimme "{voiceName}" für {language} ist ungültig oder nicht verfügbar. ' @@ -91,7 +91,7 @@ def _buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawErro ) -def _getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple: +def getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple: """Load voice language and voiceName from central UserVoicePreferences. Returns (language, voiceName) tuple.""" try: @@ -160,7 +160,7 @@ def _getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple: return ("de-DE", None) -def _stripMarkdownForTts(text: str) -> str: +def stripMarkdownForTts(text: str) -> str: """Strip markdown formatting so TTS reads clean speech text.""" t = text t = re.sub(r'\*\*(.+?)\*\*', r'\1', t) @@ -346,9 +346,9 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand from modules.interfaces.interfaceVoiceObjects import getVoiceInterface import base64 voiceInterface = getVoiceInterface(currentUser, mandateId) - language, voiceName = _getUserVoicePrefs(str(currentUser.id), mandateId) + language, voiceName = getUserVoicePrefs(str(currentUser.id), mandateId) ttsResult = await voiceInterface.textToSpeech( - text=_stripMarkdownForTts(speechText), + text=stripMarkdownForTts(speechText), languageCode=language, voiceName=voiceName, ) @@ -362,7 +362,7 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand return errorDetail = ttsResult.get("error", "Text-to-Speech failed") await emitSessionEvent(sessionId, "error", { - "message": _buildTtsConfigErrorMessage(language, voiceName, errorDetail), + "message": buildTtsConfigErrorMessage(language, voiceName, errorDetail), "detail": errorDetail, "ttsLanguage": language, "ttsVoice": voiceName, @@ -370,7 +370,7 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand except Exception as e: logger.warning(f"TTS failed for session {sessionId}: {e}") await emitSessionEvent(sessionId, "error", { - "message": _buildTtsConfigErrorMessage("de-DE", None, str(e)), + "message": buildTtsConfigErrorMessage("de-DE", None, str(e)), "detail": str(e), }) @@ -695,7 +695,7 @@ _TTS_WORD_LIMIT = 200 async def _prepareSpeechText(fullText: str, callAiFn) -> str: """Prepare text for TTS. Short responses used directly; long ones get summarized.""" - cleaned = _stripMarkdownForTts(fullText) + cleaned = stripMarkdownForTts(fullText) wordCount = len(cleaned.split()) if wordCount <= _TTS_WORD_LIMIT: return cleaned @@ -1373,7 +1373,7 @@ class CommcoachService: from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface from modules.interfaces.interfaceDbApp import getRootInterface - from modules.shared.notifyMandateAdmins import _renderHtmlEmail, _resolveMandateName + from modules.shared.notifyMandateAdmins import renderHtmlEmail, resolveMandateName rootInterface = getRootInterface() user = rootInterface.getUser(self.userId) @@ -1382,9 +1382,9 @@ class CommcoachService: messaging = getMessagingInterface() subject = f"Coaching-Session Zusammenfassung: {contextTitle}" - mandateName = _resolveMandateName(self.mandateId) + mandateName = resolveMandateName(self.mandateId) contentHtml = _buildSummaryEmailBlock(emailData, summary, contextTitle) - htmlMessage = _renderHtmlEmail( + htmlMessage = renderHtmlEmail( "Coaching-Session Zusammenfassung", [ f'Thema: {contextTitle}', diff --git a/modules/features/commcoach/serviceCommcoachScheduler.py b/modules/features/commcoach/serviceCommcoachScheduler.py index dcbc1e86..00bc3b1e 100644 --- a/modules/features/commcoach/serviceCommcoachScheduler.py +++ b/modules/features/commcoach/serviceCommcoachScheduler.py @@ -64,7 +64,7 @@ async def _runDailyReminders(): from modules.connectors.connectorDbPostgre import DatabaseConnector from .datamodelCommcoach import CoachingUserProfile, CoachingContextStatus from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface - from modules.shared.notifyMandateAdmins import _renderHtmlEmail, _resolveMandateName + from modules.shared.notifyMandateAdmins import renderHtmlEmail, resolveMandateName dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data") db = DatabaseConnector( @@ -106,8 +106,8 @@ async def _runDailyReminders(): contextList = ", ".join(contextTitles) subject = "Dein tägliches Coaching wartet" - mandateName = _resolveMandateName(profile.get("mandateId")) - htmlMessage = _renderHtmlEmail( + mandateName = resolveMandateName(profile.get("mandateId")) + htmlMessage = renderHtmlEmail( "Zeit für dein tägliches Coaching", [ f"Du hast aktuell {len(contexts)} aktive Coaching-Themen.", diff --git a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py index b86c295a..63572649 100644 --- a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py @@ -68,8 +68,7 @@ class AutoWorkflow(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID", - "frontend_fk_source": "/api/mandates/", - "frontend_fk_display_field": "label", + "fk_label_field": "label", "fk_model": "Mandate", "fk_target": {"db": "poweron_app", "table": "Mandate"}, }, @@ -81,8 +80,7 @@ class AutoWorkflow(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Feature-Instanz-ID", - "frontend_fk_source": "/api/features/instances", - "frontend_fk_display_field": "label", + "fk_label_field": "label", "fk_model": "FeatureInstance", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, }, @@ -220,6 +218,8 @@ class AutoVersion(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Veröffentlicht von", + "fk_model": "User", + "fk_label_field": "username", "fk_target": {"db": "poweron_app", "table": "User"}, }, ) @@ -259,8 +259,7 @@ class AutoRun(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID", - "frontend_fk_source": "/api/mandates/", - "frontend_fk_display_field": "label", + "fk_label_field": "label", "fk_model": "Mandate", "fk_target": {"db": "poweron_app", "table": "Mandate"}, }, @@ -273,6 +272,8 @@ class AutoRun(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Auslöser", + "fk_model": "User", + "fk_label_field": "username", "fk_target": {"db": "poweron_app", "table": "User"}, }, ) diff --git a/modules/features/graphicalEditor/nodeAdapter.py b/modules/features/graphicalEditor/nodeAdapter.py index ed7ec711..f0cd1469 100644 --- a/modules/features/graphicalEditor/nodeAdapter.py +++ b/modules/features/graphicalEditor/nodeAdapter.py @@ -73,7 +73,7 @@ def _isMethodBoundNode(node: Mapping[str, Any]) -> bool: return bool(node.get("_method") and node.get("_action")) -def _bindsActionFromLegacy(node: Mapping[str, Any]) -> Optional[str]: +def bindsActionFromLegacy(node: Mapping[str, Any]) -> Optional[str]: """Build the canonical 'method.action' identifier from a legacy node dict. Returns None for framework-primitive nodes (trigger/flow/input/data). @@ -121,7 +121,7 @@ def _adapterFromLegacyNode(node: Mapping[str, Any]) -> Optional[NodeAdapter]: if not _isMethodBoundNode(node): return None - bindsAction = _bindsActionFromLegacy(node) + bindsAction = bindsActionFromLegacy(node) if not bindsAction: return None diff --git a/modules/features/graphicalEditor/nodeDefinitions/redmine.py b/modules/features/graphicalEditor/nodeDefinitions/redmine.py index 55a6e7c7..d9ea8bab 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/redmine.py +++ b/modules/features/graphicalEditor/nodeDefinitions/redmine.py @@ -4,6 +4,19 @@ from modules.shared.i18nRegistry import t +# Typed FeatureInstance binding (replaces legacy `string, hidden`). +# - type FeatureInstanceRef[redmine] is filtered by the DataPicker. +# - frontendType "featureInstance" is rendered by FeatureInstancePicker which +# loads /options/feature.instance?featureCode=redmine for the current mandate. +_REDMINE_INSTANCE_PARAM = { + "name": "featureInstanceId", + "type": "FeatureInstanceRef[redmine]", + "required": True, + "frontendType": "featureInstance", + "frontendOptions": {"featureCode": "redmine"}, + "description": t("Redmine-Mandant"), +} + REDMINE_NODES = [ { "id": "redmine.readTicket", @@ -11,8 +24,7 @@ REDMINE_NODES = [ "label": t("Ticket lesen"), "description": t("Einzelnes Redmine-Ticket aus dem Mirror laden."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Redmine Feature-Instanz-ID")}, + dict(_REDMINE_INSTANCE_PARAM), {"name": "ticketId", "type": "number", "required": True, "frontendType": "number", "description": t("Redmine-Ticket-ID")}, ], @@ -30,8 +42,7 @@ REDMINE_NODES = [ "label": t("Tickets auflisten"), "description": t("Tickets aus dem lokalen Mirror mit Filtern (Tracker, Status, Zeitraum, Zuweisung)."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Redmine Feature-Instanz-ID")}, + dict(_REDMINE_INSTANCE_PARAM), {"name": "trackerIds", "type": "string", "required": False, "frontendType": "text", "description": t("Tracker-IDs (Komma-separiert)"), "default": ""}, {"name": "status", "type": "string", "required": False, "frontendType": "text", @@ -59,8 +70,7 @@ REDMINE_NODES = [ "label": t("Ticket erstellen"), "description": t("Neues Ticket in Redmine anlegen. Mirror wird sofort aktualisiert."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Redmine Feature-Instanz-ID")}, + dict(_REDMINE_INSTANCE_PARAM), {"name": "subject", "type": "string", "required": True, "frontendType": "text", "description": t("Ticket-Titel")}, {"name": "trackerId", "type": "number", "required": True, "frontendType": "number", @@ -92,8 +102,7 @@ REDMINE_NODES = [ "label": t("Ticket bearbeiten"), "description": t("Felder eines Redmine-Tickets aktualisieren. Nur gesetzte Felder werden uebertragen."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Redmine Feature-Instanz-ID")}, + dict(_REDMINE_INSTANCE_PARAM), {"name": "ticketId", "type": "number", "required": True, "frontendType": "number", "description": t("Ticket-ID")}, {"name": "subject", "type": "string", "required": False, "frontendType": "text", @@ -129,8 +138,7 @@ REDMINE_NODES = [ "label": t("Statistik laden"), "description": t("Aggregierte Kennzahlen (KPIs, Durchsatz, Status-Verteilung, Backlog) aus dem Mirror."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Redmine Feature-Instanz-ID")}, + dict(_REDMINE_INSTANCE_PARAM), {"name": "dateFrom", "type": "string", "required": False, "frontendType": "date", "description": t("Zeitraum ab")}, {"name": "dateTo", "type": "string", "required": False, "frontendType": "date", @@ -154,8 +162,7 @@ REDMINE_NODES = [ "label": t("Mirror synchronisieren"), "description": t("Tickets und Beziehungen aus Redmine in den lokalen Mirror uebernehmen."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Redmine Feature-Instanz-ID")}, + dict(_REDMINE_INSTANCE_PARAM), {"name": "force", "type": "boolean", "required": False, "frontendType": "checkbox", "description": t("Vollsync erzwingen (ignoriert lastSyncAt)"), "default": False}, ], diff --git a/modules/features/graphicalEditor/nodeDefinitions/trustee.py b/modules/features/graphicalEditor/nodeDefinitions/trustee.py index 5f7de2b2..0a8e7cd7 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/trustee.py +++ b/modules/features/graphicalEditor/nodeDefinitions/trustee.py @@ -3,6 +3,20 @@ from modules.shared.i18nRegistry import t +# Typed FeatureInstance binding (replaces legacy `string, hidden`). +# - type uses the discriminator notation `FeatureInstanceRef[]` so the +# DataPicker / RequiredAttributePicker can filter compatible upstream paths. +# - frontendType "featureInstance" is rendered by FeatureInstancePicker which +# loads /options/feature.instance?featureCode=trustee for the current mandate. +_TRUSTEE_INSTANCE_PARAM = { + "name": "featureInstanceId", + "type": "FeatureInstanceRef[trustee]", + "required": True, + "frontendType": "featureInstance", + "frontendOptions": {"featureCode": "trustee"}, + "description": t("Trustee-Mandant"), +} + TRUSTEE_NODES = [ { "id": "trustee.refreshAccountingData", @@ -10,8 +24,7 @@ TRUSTEE_NODES = [ "label": t("Buchhaltungsdaten aktualisieren"), "description": t("Buchhaltungsdaten aus externem System importieren/aktualisieren."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Trustee Feature-Instanz-ID")}, + dict(_TRUSTEE_INSTANCE_PARAM), {"name": "forceRefresh", "type": "boolean", "required": False, "frontendType": "checkbox", "description": t("Import erzwingen"), "default": False}, {"name": "dateFrom", "type": "string", "required": False, "frontendType": "date", @@ -39,8 +52,7 @@ TRUSTEE_NODES = [ {"name": "sharepointFolder", "type": "string", "required": False, "frontendType": "sharepointFolder", "frontendOptions": {"dependsOn": "connectionReference"}, "description": t("SharePoint-Ordnerpfad"), "default": ""}, - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Trustee Feature-Instanz-ID")}, + dict(_TRUSTEE_INSTANCE_PARAM), {"name": "prompt", "type": "string", "required": False, "frontendType": "textarea", "description": t("AI-Prompt für Extraktion"), "default": ""}, ], @@ -62,12 +74,11 @@ TRUSTEE_NODES = [ "description": t("TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen."), "parameters": [ # Type matches what producers actually emit: ActionResult.documents - # is `List[ActionDocument]` (see datamodelChat.ActionResult). The + # is List[ActionDocument] (see datamodelChat.ActionResult). The # DataPicker uses this string to filter compatible upstream paths. {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef", - "description": t("Dokumentenliste eines Upstream-Producers (z.B. trustee.extractFromFiles → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")}, - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Trustee Feature-Instanz-ID")}, + "description": t("Dokumentenliste — gebunden via DataRef.")}, + dict(_TRUSTEE_INSTANCE_PARAM), ], "inputs": 1, "outputs": 1, @@ -83,13 +94,9 @@ TRUSTEE_NODES = [ "label": t("In Buchhaltung synchronisieren"), "description": t("Trustee-Positionen in Buchhaltungssystem übertragen."), "parameters": [ - # Type matches what producers actually emit: ActionResult.documents - # is `List[ActionDocument]` (see datamodelChat.ActionResult). The - # DataPicker uses this string to filter compatible upstream paths. {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef", - "description": t("Verarbeitete Dokumentenliste eines Upstream-Producers (z.B. trustee.processDocuments → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")}, - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Trustee Feature-Instanz-ID")}, + "description": t("Verarbeitete Dokumentenliste — gebunden via DataRef.")}, + dict(_TRUSTEE_INSTANCE_PARAM), ], "inputs": 1, "outputs": 1, @@ -105,8 +112,7 @@ TRUSTEE_NODES = [ "label": t("Treuhand-Daten abfragen"), "description": t("Daten aus der Trustee-DB lesen (Lookup, Aggregation, Roh-Export). Pendant zu refreshAccountingData ohne externen Sync."), "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", - "description": t("Trustee Feature-Instanz-ID")}, + dict(_TRUSTEE_INSTANCE_PARAM), {"name": "mode", "type": "string", "required": True, "frontendType": "select", "frontendOptions": {"options": ["lookup", "raw", "aggregate"]}, "description": t("Abfragemodus"), "default": "lookup"}, diff --git a/modules/features/graphicalEditor/nodeRegistry.py b/modules/features/graphicalEditor/nodeRegistry.py index dd302282..632e98fc 100644 --- a/modules/features/graphicalEditor/nodeRegistry.py +++ b/modules/features/graphicalEditor/nodeRegistry.py @@ -9,7 +9,7 @@ import logging from typing import Dict, List, Any, Optional from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES -from modules.features.graphicalEditor.nodeAdapter import _bindsActionFromLegacy +from modules.features.graphicalEditor.nodeAdapter import bindsActionFromLegacy from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText @@ -50,7 +50,7 @@ def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]: fields. """ lang = normalizePrimaryLanguageTag(language, "en") - bindsAction = _bindsActionFromLegacy(node) + bindsAction = bindsActionFromLegacy(node) out = dict(node) for key in list(out.keys()): if key.startswith("_"): diff --git a/modules/features/graphicalEditor/portTypes.py b/modules/features/graphicalEditor/portTypes.py index b607316a..e8d5b48d 100644 --- a/modules/features/graphicalEditor/portTypes.py +++ b/modules/features/graphicalEditor/portTypes.py @@ -610,7 +610,7 @@ SYSTEM_VARIABLES: Dict[str, Dict[str, str]] = { } -def _resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any: +def resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any: """Resolve a system variable name to its runtime value.""" from datetime import datetime, timezone @@ -642,7 +642,7 @@ def _resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any: # Output normalizers # --------------------------------------------------------------------------- -def _normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]: +def normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]: """ Normalize raw executor output to match the declared port schema. Ensures _success/_error meta-fields are always present. @@ -696,12 +696,12 @@ def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]: # Transit helpers # --------------------------------------------------------------------------- -def _wrapTransit(data: Any, meta: Dict[str, Any]) -> Dict[str, Any]: +def wrapTransit(data: Any, meta: Dict[str, Any]) -> Dict[str, Any]: """Wrap data in a Transit envelope.""" return {"_transit": True, "_meta": meta, "data": data} -def _unwrapTransit(output: Any) -> Any: +def unwrapTransit(output: Any) -> Any: """Unwrap a Transit envelope, returning the inner data.""" if isinstance(output, dict) and output.get("_transit"): return output.get("data") @@ -726,10 +726,10 @@ def _resolveTransitChain( return out sources = connectionMap.get(current, []) if not sources: - return _unwrapTransit(out) + return unwrapTransit(out) srcId = sources[0][0] if sources else None if not srcId: - return _unwrapTransit(out) + return unwrapTransit(out) current = srcId return nodeOutputs.get(nodeId) @@ -738,7 +738,7 @@ def _resolveTransitChain( # Schema derivation for dynamic outputs # --------------------------------------------------------------------------- -def _derive_form_payload_schema_from_param(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]: +def deriveFormPayloadSchemaFromParam(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]: """Derive output schema from a field-builder JSON list (``fields``, ``formFields``, …).""" fields_param = (node.get("parameters") or {}).get(param_key) if not fields_param or not isinstance(fields_param, list): @@ -776,7 +776,7 @@ def _derive_form_payload_schema_from_param(node: Dict[str, Any], param_key: str) def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]: """Derive output schema from form field definitions (``parameters.fields``).""" - return _derive_form_payload_schema_from_param(node, "fields") + return deriveFormPayloadSchemaFromParam(node, "fields") def parse_graph_defined_output_schema( @@ -796,9 +796,9 @@ def parse_graph_defined_output_schema( schema_spec = output_port.get("schema") if isinstance(schema_spec, dict) and schema_spec.get("kind") == "fromGraph": param_key = str(schema_spec.get("parameter") or "fields") - return _derive_form_payload_schema_from_param(node, param_key) + return deriveFormPayloadSchemaFromParam(node, param_key) if output_port.get("dynamic") and output_port.get("deriveFrom"): - return _derive_form_payload_schema_from_param(node, str(output_port.get("deriveFrom"))) + return deriveFormPayloadSchemaFromParam(node, str(output_port.get("deriveFrom"))) if isinstance(schema_spec, str) and schema_spec: return PORT_TYPE_CATALOG.get(schema_spec) return None diff --git a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py index 4332df50..dc136395 100644 --- a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py @@ -14,7 +14,7 @@ from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPExceptio from fastapi.responses import JSONResponse, StreamingResponse, Response from modules.auth import limiter, getRequestContext, RequestContext from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict -from modules.routes.routeHelpers import _applyFiltersAndSort +from modules.routes.routeHelpers import applyFiltersAndSort from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices from modules.features.graphicalEditor.nodeRegistry import getNodeTypesForApi @@ -230,6 +230,65 @@ def get_user_connection_options( return {"options": options} +@router.get("/{instanceId}/options/feature.instance") +@limiter.limit("60/minute") +def get_feature_instance_options( + request: Request, + instanceId: str = Path(..., description="GraphicalEditor feature instance ID (workflow context)"), + featureCode: str = Query(..., description="Feature code to filter by (e.g. 'trustee', 'redmine', 'clickup')"), + enabledOnly: bool = Query(True, description="If true (default), only enabled feature instances are returned"), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Return mandate-scoped FeatureInstances for the given featureCode. + + Used by node parameters with frontendType='featureInstance' (e.g. Trustee + or Redmine nodes that need to bind to a specific tenant FeatureInstance). + Always restricted to the calling user's mandate (derived from the workflow + feature instance) so the picker never leaks foreign-mandate instances. + + Response: { options: [ { value: "", label: " ([code])" } ] } + """ + mandateId = _validateInstanceAccess(instanceId, context) + if not context.user: + raise HTTPException(status_code=401, detail=routeApiMsg("Authentication required")) + code = (featureCode or "").strip().lower() + if not code: + raise HTTPException(status_code=400, detail=routeApiMsg("featureCode query parameter is required")) + if not mandateId: + return {"options": []} + + from modules.interfaces.interfaceDbApp import getRootInterface + rootInterface = getRootInterface() + try: + instances = rootInterface.getFeatureInstancesByMandate( + mandateId, enabledOnly=bool(enabledOnly) + ) or [] + except Exception as e: + logger.error( + "get_feature_instance_options: failed to load instances mandateId=%s: %s", + mandateId, e, exc_info=True, + ) + return {"options": []} + + options: List[Dict[str, str]] = [] + for fi in instances: + fiCode = (getattr(fi, "featureCode", "") or "").strip().lower() + if fiCode != code: + continue + fiId = str(getattr(fi, "id", "") or "") + if not fiId: + continue + rawLabel = getattr(fi, "label", None) or getattr(fi, "name", None) or fiId + options.append({"value": fiId, "label": f"{rawLabel} ({fiCode})"}) + + logger.info( + "graphicalEditor feature.instance options: instanceId=%s mandateId=%s " + "featureCode=%s enabledOnly=%s -> %d options", + instanceId, mandateId, code, enabledOnly, len(options), + ) + return {"options": options} + + @router.post("/{instanceId}/execute") @limiter.limit("30/minute") async def post_execute( @@ -474,6 +533,10 @@ def get_templates( iface = getGraphicalEditorInterface(context.user, mandateId, instanceId) templates = iface.getTemplates(scope=scope) + from modules.routes.routeHelpers import enrichRowsWithFkLabels + from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow + enrichRowsWithFkLabels(templates, AutoWorkflow) + paginationParams = None if pagination: try: @@ -485,7 +548,7 @@ def get_templates( raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") if paginationParams: - filtered = _applyFiltersAndSort(templates, paginationParams) + filtered = applyFiltersAndSort(templates, paginationParams) totalItems = len(filtered) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 startIdx = (paginationParams.page - 1) * paginationParams.pageSize @@ -906,15 +969,15 @@ async def _runEditorAgent( enrichedPrompt = prompt if dataSourceIds: - from modules.features.workspace.routeFeatureWorkspace import _buildDataSourceContext + from modules.features.workspace.routeFeatureWorkspace import buildDataSourceContext chatSvc = getService("chat", ctx) - dsInfo = _buildDataSourceContext(chatSvc, dataSourceIds) + dsInfo = buildDataSourceContext(chatSvc, dataSourceIds) if dsInfo: enrichedPrompt = f"{prompt}\n\n[Active Data Sources]\n{dsInfo}" if featureDataSourceIds: - from modules.features.workspace.routeFeatureWorkspace import _buildFeatureDataSourceContext - fdsInfo = _buildFeatureDataSourceContext(featureDataSourceIds) + from modules.features.workspace.routeFeatureWorkspace import buildFeatureDataSourceContext + fdsInfo = buildFeatureDataSourceContext(featureDataSourceIds) if fdsInfo: enrichedPrompt = f"{enrichedPrompt}\n\n[Attached Feature Data Sources]\n{fdsInfo}" @@ -1224,7 +1287,7 @@ def get_workflows( raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") if paginationParams: - filtered = _applyFiltersAndSort(enriched, paginationParams) + filtered = applyFiltersAndSort(enriched, paginationParams) totalItems = len(filtered) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 startIdx = (paginationParams.page - 1) * paginationParams.pageSize diff --git a/modules/features/redmine/serviceRedmine.py b/modules/features/redmine/serviceRedmine.py index e244bd84..f0cfbfb4 100644 --- a/modules/features/redmine/serviceRedmine.py +++ b/modules/features/redmine/serviceRedmine.py @@ -48,7 +48,7 @@ from modules.features.redmine.interfaceFeatureRedmine import ( RedmineObjects, getInterface, ) -from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache +from modules.features.redmine.serviceRedmineStatsCache import getStatsCache logger = logging.getLogger(__name__) @@ -334,7 +334,7 @@ def getTicket( def _invalidateCache(featureInstanceId: str) -> None: try: - _getStatsCache().invalidateInstance(featureInstanceId) + getStatsCache().invalidateInstance(featureInstanceId) except Exception as e: logger.warning(f"Failed to invalidate stats cache for {featureInstanceId}: {e}") diff --git a/modules/features/redmine/serviceRedmineStats.py b/modules/features/redmine/serviceRedmineStats.py index 2cfed27c..33a83aa7 100644 --- a/modules/features/redmine/serviceRedmineStats.py +++ b/modules/features/redmine/serviceRedmineStats.py @@ -38,7 +38,7 @@ from modules.features.redmine.datamodelRedmine import ( RedmineThroughputBucket, RedmineTicketDto, ) -from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache +from modules.features.redmine.serviceRedmineStatsCache import getStatsCache logger = logging.getLogger(__name__) @@ -69,7 +69,7 @@ async def getStats( if status_norm not in {"*", "open", "closed"}: status_norm = "*" - cache = _getStatsCache() + cache = getStatsCache() # Cache key now includes the new dimensions so different filter combos # don't collide. ``_freeze`` (in the cache module) hashes lists/sets # for us, so we can pass them directly as extra dimensions. diff --git a/modules/features/redmine/serviceRedmineStatsCache.py b/modules/features/redmine/serviceRedmineStatsCache.py index 46ad9372..12176178 100644 --- a/modules/features/redmine/serviceRedmineStatsCache.py +++ b/modules/features/redmine/serviceRedmineStatsCache.py @@ -123,7 +123,7 @@ class RedmineStatsCache: _globalCache: Optional[RedmineStatsCache] = None -def _getStatsCache() -> RedmineStatsCache: +def getStatsCache() -> RedmineStatsCache: """Process-wide singleton.""" global _globalCache if _globalCache is None: diff --git a/modules/features/redmine/serviceRedmineSync.py b/modules/features/redmine/serviceRedmineSync.py index 2c631630..2fd269d1 100644 --- a/modules/features/redmine/serviceRedmineSync.py +++ b/modules/features/redmine/serviceRedmineSync.py @@ -38,7 +38,7 @@ from modules.features.redmine.datamodelRedmine import ( RedmineTicketMirror, ) from modules.features.redmine.interfaceFeatureRedmine import getInterface -from modules.features.redmine.serviceRedmineStatsCache import _getStatsCache +from modules.features.redmine.serviceRedmineStatsCache import getStatsCache logger = logging.getLogger(__name__) @@ -134,7 +134,7 @@ async def runSync( durationMs=duration_ms, lastSyncAt=now_epoch, ) - _getStatsCache().invalidateInstance(featureInstanceId) + getStatsCache().invalidateInstance(featureInstanceId) return RedmineSyncResultDto( instanceId=featureInstanceId, @@ -188,7 +188,7 @@ async def upsertSingleTicket( now_epoch = time.time() _upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch) relations_upserted = _replaceRelations(iface, featureInstanceId, issue, now_epoch) - _getStatsCache().invalidateInstance(featureInstanceId) + getStatsCache().invalidateInstance(featureInstanceId) return relations_upserted @@ -202,7 +202,7 @@ def deleteMirroredTicket( iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId) deleted = iface.deleteMirroredTicket(featureInstanceId, int(issueId)) iface.deleteMirroredRelationsForIssue(featureInstanceId, int(issueId)) - _getStatsCache().invalidateInstance(featureInstanceId) + getStatsCache().invalidateInstance(featureInstanceId) return deleted diff --git a/modules/features/teamsbot/routeFeatureTeamsbot.py b/modules/features/teamsbot/routeFeatureTeamsbot.py index 37cb2d77..3368f9fc 100644 --- a/modules/features/teamsbot/routeFeatureTeamsbot.py +++ b/modules/features/teamsbot/routeFeatureTeamsbot.py @@ -383,7 +383,7 @@ async def streamSession( async def _eventGenerator(): """Generate SSE events from the session event queue.""" - from .service import _sessionEvents + from .service import sessionEvents # Send initial session state yield f"data: {json.dumps({'type': 'sessionState', 'data': session})}\n\n" @@ -394,10 +394,10 @@ async def streamSession( yield f"data: {json.dumps({'type': 'botConnectionState', 'data': {'connected': _getActiveService(sessionId) is not None}})}\n\n" # Stream events - eventQueue = _sessionEvents.get(sessionId) + eventQueue = sessionEvents.get(sessionId) if not eventQueue: - _sessionEvents[sessionId] = asyncio.Queue() - eventQueue = _sessionEvents[sessionId] + sessionEvents[sessionId] = asyncio.Queue() + eventQueue = sessionEvents[sessionId] try: while True: @@ -810,8 +810,8 @@ async def deleteUserAccount( # MFA Code Submission (relayed to active bot session) # ========================================================================= -_mfaCodeQueues: dict = {} -_mfaWaitTasks: dict = {} +mfaCodeQueues: dict = {} +mfaWaitTasks: dict = {} @router.post("/{instanceId}/sessions/{sessionId}/mfa") @limiter.limit("10/minute") @@ -834,7 +834,7 @@ async def submitMfaCode( logger.info(f"MFA submission for session {sessionId}: action={mfaAction}, codeLen={len(mfaCode)}") - queue = _mfaCodeQueues.get(sessionId) + queue = mfaCodeQueues.get(sessionId) if queue: await queue.put({"action": mfaAction, "code": mfaCode}) return {"submitted": True} @@ -981,7 +981,7 @@ async def testVoice( ): """Test TTS voice with AI-generated sample text in the correct language.""" from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - from .service import _createAiService + from .service import createAiService from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum mandateId = _validateInstanceAccess(instanceId, context) @@ -992,7 +992,7 @@ async def testVoice( botName = body.get("botName", "AI Assistant") try: - aiService = _createAiService(context.user, mandateId, instanceId) + aiService = createAiService(context.user, mandateId, instanceId) await aiService.ensureAiObjectsInitialized() aiRequest = AiCallRequest( diff --git a/modules/features/teamsbot/service.py b/modules/features/teamsbot/service.py index 2067a7f2..6d9df074 100644 --- a/modules/features/teamsbot/service.py +++ b/modules/features/teamsbot/service.py @@ -532,7 +532,7 @@ def getActiveService(sessionId: str) -> Optional["TeamsbotService"]: # AI Service Factory (for billing-aware AI calls) # ========================================================================= -def _createAiService(user, mandateId, featureInstanceId=None): +def createAiService(user, mandateId, featureInstanceId=None): """Create a properly wired AiService via the service center.""" ctx = ServiceCenterContext( user=user, @@ -546,15 +546,15 @@ def _createAiService(user, mandateId, featureInstanceId=None): # ========================================================================= # Session Event Queues (for SSE streaming to frontend) # ========================================================================= -_sessionEvents: Dict[str, asyncio.Queue] = {} +sessionEvents: Dict[str, asyncio.Queue] = {} async def _emitSessionEvent(sessionId: str, eventType: str, data: Any): """Emit an event to the session's SSE stream. Creates the queue on-demand so events are never silently dropped.""" - if sessionId not in _sessionEvents: - _sessionEvents[sessionId] = asyncio.Queue() - await _sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getIsoTimestamp()}) + if sessionId not in sessionEvents: + sessionEvents[sessionId] = asyncio.Queue() + await sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getIsoTimestamp()}) def _normalizeGatewayHostForBotWs(host: str) -> str: @@ -709,7 +709,7 @@ class TeamsbotService: interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId) # Initialize SSE event queue - _sessionEvents[sessionId] = asyncio.Queue() + sessionEvents[sessionId] = asyncio.Queue() try: # Update status to JOINING @@ -798,7 +798,7 @@ class TeamsbotService: }) # Cleanup event queue - _sessionEvents.pop(sessionId, None) + sessionEvents.pop(sessionId, None) # ========================================================================= # Browser Bot WebSocket Communication @@ -1048,9 +1048,9 @@ class TeamsbotService: "timestamp": getIsoTimestamp(), }) - from .routeFeatureTeamsbot import _mfaCodeQueues, _mfaWaitTasks + from .routeFeatureTeamsbot import mfaCodeQueues, mfaWaitTasks mfaQueue = asyncio.Queue() - _mfaCodeQueues[sessionId] = mfaQueue + mfaCodeQueues[sessionId] = mfaQueue async def _waitAndForwardMfa(sid, queue, ws): try: @@ -1075,10 +1075,10 @@ class TeamsbotService: except asyncio.CancelledError: logger.info(f"[WS] MFA wait cancelled for session {sid} (resolved via page)") finally: - _mfaCodeQueues.pop(sid, None) - _mfaWaitTasks.pop(sid, None) + mfaCodeQueues.pop(sid, None) + mfaWaitTasks.pop(sid, None) - _mfaWaitTasks[sessionId] = asyncio.create_task( + mfaWaitTasks[sessionId] = asyncio.create_task( _waitAndForwardMfa(sessionId, mfaQueue, websocket) ) @@ -1100,11 +1100,11 @@ class TeamsbotService: elif msgType == "mfaResolved": success = message.get("success", False) logger.info(f"[WS] MFA resolved: success={success}") - from .routeFeatureTeamsbot import _mfaCodeQueues, _mfaWaitTasks - task = _mfaWaitTasks.pop(sessionId, None) + from .routeFeatureTeamsbot import mfaCodeQueues, mfaWaitTasks + task = mfaWaitTasks.pop(sessionId, None) if task and not task.done(): task.cancel() - _mfaCodeQueues.pop(sessionId, None) + mfaCodeQueues.pop(sessionId, None) await _emitSessionEvent(sessionId, "mfaResolved", { "success": success, "timestamp": getIsoTimestamp(), @@ -1844,7 +1844,7 @@ class TeamsbotService: ) try: - aiService = _createAiService( + aiService = createAiService( self.currentUser, self.mandateId, self.instanceId ) await aiService.ensureAiObjectsInitialized() @@ -1976,7 +1976,7 @@ class TeamsbotService: ) try: - aiService = _createAiService( + aiService = createAiService( self.currentUser, self.mandateId, self.instanceId ) await aiService.ensureAiObjectsInitialized() @@ -2195,7 +2195,7 @@ class TeamsbotService: # Call SPEECH_TEAMS try: - aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) + aiService = createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( @@ -3767,7 +3767,7 @@ class TeamsbotService: ) try: - aiService = _createAiService( + aiService = createAiService( self.currentUser, self.mandateId, self.instanceId ) await aiService.ensureAiObjectsInitialized() @@ -3930,7 +3930,7 @@ class TeamsbotService: """Summarize a long user-provided session context to its essential points. This reduces token usage in every subsequent AI call.""" try: - aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) + aiService = createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( @@ -3980,7 +3980,7 @@ class TeamsbotService: lines.append(f"[{speaker}]: {text}") textToSummarize = "\n".join(lines) - aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) + aiService = createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( @@ -4021,7 +4021,7 @@ class TeamsbotService: for t in transcripts ) - aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) + aiService = createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( diff --git a/modules/features/trustee/accounting/accountingBridge.py b/modules/features/trustee/accounting/accountingBridge.py index b91cd83e..2a267b73 100644 --- a/modules/features/trustee/accounting/accountingBridge.py +++ b/modules/features/trustee/accounting/accountingBridge.py @@ -16,7 +16,7 @@ from .accountingConnectorBase import ( AccountingChart, SyncResult, ) -from .accountingRegistry import _getAccountingRegistry +from .accountingRegistry import getAccountingRegistry logger = logging.getLogger(__name__) @@ -26,7 +26,7 @@ class AccountingBridge: def __init__(self, trusteeInterface): self._trusteeInterface = trusteeInterface - self._registry = _getAccountingRegistry() + self._registry = getAccountingRegistry() async def getActiveConfig(self, featureInstanceId: str) -> Optional[Dict[str, Any]]: """Load the active TrusteeAccountingConfig for a feature instance.""" diff --git a/modules/features/trustee/accounting/accountingConnectorBase.py b/modules/features/trustee/accounting/accountingConnectorBase.py index c5124184..5d76c997 100644 --- a/modules/features/trustee/accounting/accountingConnectorBase.py +++ b/modules/features/trustee/accounting/accountingConnectorBase.py @@ -39,6 +39,26 @@ class AccountingChart(BaseModel): accountType: Optional[str] = None +class AccountingPeriodBalance(BaseModel): + """Balance snapshot for one account in one period. + + Mirrors the `TrusteeDataAccountBalance` table 1:1 so + `accountingDataSync._persistBalances` can persist connector output without + re-mapping. `closingBalance` is always the *cumulative* balance at the end + of the period (NOT the period's net movement). `periodMonth=0` denotes the + annual bucket (closing balance per fiscal year-end). + """ + accountNumber: str + periodYear: int + periodMonth: int = 0 + openingBalance: float = 0.0 + debitTotal: float = 0.0 + creditTotal: float = 0.0 + closingBalance: float = 0.0 + currency: str = "CHF" + asOfDate: Optional[str] = None + + class SyncResult(BaseModel): """Result of a sync operation.""" success: bool @@ -126,6 +146,31 @@ class BaseAccountingConnector(ABC): accountNumbers: pre-fetched account numbers (avoids redundant API call). Override in connectors that support it.""" return [] + async def getAccountBalances( + self, + config: Dict[str, Any], + years: List[int], + accountNumbers: Optional[List[str]] = None, + ) -> List[AccountingPeriodBalance]: + """Read closing balances per account and period from the external system. + + Contract: + - One row per (accountNumber, periodYear, periodMonth). + - `periodMonth=0` => annual bucket (closing balance per fiscal year-end). + - `periodMonth=1..12` => closing balance per end of that calendar month. + - `closingBalance` MUST be the *cumulative* balance at period end, + including all prior-year carry-over and yearend bookings -- NOT the + period's net movement. + - `openingBalance` MUST be the cumulative balance at period start + (= previous period's closingBalance). + + Default returns []; `AccountingDataSync` will then fall back to a + local cumulative aggregation from journal lines. Override in + connectors that can fetch authoritative balances from the source + system (e.g. RMA `/gl/saldo`). + """ + return [] + async def uploadDocument( self, config: Dict[str, Any], diff --git a/modules/features/trustee/accounting/accountingDataSync.py b/modules/features/trustee/accounting/accountingDataSync.py index ef8789ea..0770ead5 100644 --- a/modules/features/trustee/accounting/accountingDataSync.py +++ b/modules/features/trustee/accounting/accountingDataSync.py @@ -25,7 +25,7 @@ from pathlib import Path from typing import Callable, Dict, Any, List, Optional, Type from .accountingConnectorBase import BaseAccountingConnector -from .accountingRegistry import _getAccountingRegistry +from .accountingRegistry import getAccountingRegistry logger = logging.getLogger(__name__) @@ -33,6 +33,72 @@ logger = logging.getLogger(__name__) _HEARTBEAT_EVERY = 500 +def _isIncomeStatementAccount(accountNumber: str) -> bool: + """Swiss KMU-Kontenrahmen heuristic: 1xxx + 2xxx -> balance sheet + (cumulative carry-over across years); 3xxx..9xxx -> income statement + (reset to 0 at fiscal-year start). Used by the local fallback only; + when a connector returns balances, those values are used verbatim. + """ + a = (accountNumber or "").strip() + if not a or not a[0].isdigit(): + return False + return a[0] not in ("1", "2") + + +def _resolveBalanceYears( + dateFrom: Optional[str], + dateTo: Optional[str], + oldestBookingDate: Optional[str], + newestBookingDate: Optional[str], +) -> List[int]: + """Derive the list of years for which the connector should compute balances. + + Prefers the ``dateFrom``/``dateTo`` import window the user requested. Falls + back to the actual oldest/newest booking date observed in the imported + journal (so e.g. a `dateTo=None` import still produces balances for every + year that has data). If nothing is known, returns the current year as a + sensible default. + """ + def _yearOf(s: Optional[str]) -> Optional[int]: + if not s: + return None + try: + return int(str(s)[:4]) + except (TypeError, ValueError): + return None + + fromYear = _yearOf(dateFrom) or _yearOf(oldestBookingDate) + toYear = _yearOf(dateTo) or _yearOf(newestBookingDate) + if fromYear is None and toYear is None: + return [time.gmtime().tm_year] + if fromYear is None: + fromYear = toYear + if toYear is None: + toYear = fromYear + if toYear < fromYear: + fromYear, toYear = toYear, fromYear + return list(range(fromYear, toYear + 1)) + + +def _balanceModelToRow(b: Any, scope: Dict[str, Any]) -> Dict[str, Any]: + """Map an ``AccountingPeriodBalance`` (or compatible dict) to a DB row.""" + if isinstance(b, dict): + get = b.get + else: + get = lambda k, default=None: getattr(b, k, default) + return { + "accountNumber": str(get("accountNumber", "") or ""), + "periodYear": int(get("periodYear", 0) or 0), + "periodMonth": int(get("periodMonth", 0) or 0), + "openingBalance": round(float(get("openingBalance", 0) or 0), 2), + "debitTotal": round(float(get("debitTotal", 0) or 0), 2), + "creditTotal": round(float(get("creditTotal", 0) or 0), 2), + "closingBalance": round(float(get("closingBalance", 0) or 0), 2), + "currency": str(get("currency", "CHF") or "CHF"), + **scope, + } + + def _isDebugDumpEnabled() -> bool: """Whether to write raw connector payloads to disk for offline inspection. @@ -101,7 +167,7 @@ class AccountingDataSync: def __init__(self, trusteeInterface): self._if = trusteeInterface - self._registry = _getAccountingRegistry() + self._registry = getAccountingRegistry() async def importData( self, @@ -246,18 +312,39 @@ class AccountingDataSync: logger.error(f"Import contacts failed: {e}", exc_info=True) summary["errors"].append(f"Contacts: {e}") - # ---- Phase 4: Compute account balances ---- - # Progress budget: 90-95 %. Pure DB aggregation, no external calls. + # ---- Phase 4: Account balances ---- + # Progress budget: 88-95 %. Connector first (RMA -> /gl/saldo, Bexio + # & Abacus -> aggregated journal). On empty/failed connector output + # we fall back to a *correct* cumulative aggregation from the + # journal lines we just persisted. + connectorBalances: list = [] + balanceSource = "local-fallback" try: - _progress(90, "Berechne Kontensaldi...") + _progress(88, "Lade Kontensaldi vom Buchhaltungssystem...") + balanceYears = _resolveBalanceYears(dateFrom, dateTo, summary.get("oldestBookingDate"), summary.get("newestBookingDate")) + connectorBalances = await connector.getAccountBalances( + connConfig, + years=balanceYears, + accountNumbers=fetchedAccountNumbers or None, + ) + _dumpSyncData("accountBalances", connectorBalances) + if connectorBalances: + balanceSource = "connector" + except Exception as e: + logger.warning(f"Connector getAccountBalances failed, will use local fallback: {e}", exc_info=True) + summary["errors"].append(f"Balances connector: {e}") + + try: + _progress(92, "Speichere Kontensaldi...") balanceCount = await asyncio.to_thread( self._persistBalances, featureInstanceId, mandateId, TrusteeDataJournalEntry, TrusteeDataJournalLine, TrusteeDataAccountBalance, + connectorBalances, balanceSource, ) summary["accountBalances"] = balanceCount - _progress(95, f"{balanceCount} Saldi berechnet.") + _progress(95, f"{balanceCount} Saldi gespeichert (source={balanceSource}).") except Exception as e: - logger.error(f"Compute balances failed: {e}", exc_info=True) + logger.error(f"Persist balances failed: {e}", exc_info=True) summary["errors"].append(f"Balances: {e}") cfgId = cfgRecord.get("id") @@ -401,12 +488,66 @@ class AccountingDataSync: logger.info(f"Persisted {n} contacts for {featureInstanceId} in {time.time() - t0:.1f}s") return n - def _persistBalances(self, featureInstanceId: str, mandateId: str, - modelEntry: Type, modelLine: Type, modelBalance: Type) -> int: - """Re-aggregate journal lines into monthly + annual balances.""" + def _persistBalances( + self, + featureInstanceId: str, + mandateId: str, + modelEntry: Type, + modelLine: Type, + modelBalance: Type, + connectorBalances: list, + source: str, + ) -> int: + """Persist account balances per (account, period) into ``TrusteeDataAccountBalance``. + + Source of truth (``source="connector"``): the list returned by + ``BaseAccountingConnector.getAccountBalances`` is persisted 1:1. + + Fallback (``source="local-fallback"``): aggregate the just-persisted + journal lines into **cumulative** balances. Unlike the previous + implementation, this version (a) carries the cumulative balance + forward across months/years for balance-sheet accounts, (b) resets + income-statement accounts at fiscal-year start, and (c) computes + ``openingBalance`` correctly as the previous period's + ``closingBalance``. ``openingBalance`` of the very first imported + period stays at 0 (no prior data available -- by design; see plan + document for rationale). + """ t0 = time.time() self._bulkClear(modelBalance, featureInstanceId) + scope = {"featureInstanceId": featureInstanceId, "mandateId": mandateId} + if connectorBalances: + rows = [_balanceModelToRow(b, scope) for b in connectorBalances] + n = self._bulkCreate(modelBalance, rows) + logger.info( + f"Persisted {n} balances for {featureInstanceId} in {time.time() - t0:.1f}s " + f"(source={source})" + ) + return n + + rows = self._buildLocalBalanceFallback(featureInstanceId, modelEntry, modelLine, scope) + n = self._bulkCreate(modelBalance, rows) + logger.info( + f"Persisted {n} balances for {featureInstanceId} in {time.time() - t0:.1f}s " + f"(source={source})" + ) + return n + + def _buildLocalBalanceFallback( + self, + featureInstanceId: str, + modelEntry: Type, + modelLine: Type, + scope: Dict[str, Any], + ) -> List[Dict[str, Any]]: + """Aggregate ``TrusteeDataJournalLine`` rows into cumulative period balances. + + Returns rows ready for ``_bulkCreate``. Walks every account + chronologically through all years observed in the journal so the + cumulative balance and per-period opening are exact (within the + bounds of the imported window). + """ entries = self._if.db.getRecordset( modelEntry, recordFilter={"featureInstanceId": featureInstanceId}, ) or [] @@ -421,7 +562,9 @@ class AccountingDataSync: modelLine, recordFilter={"featureInstanceId": featureInstanceId}, ) or [] - buckets: Dict[tuple, Dict[str, float]] = defaultdict(lambda: {"debit": 0.0, "credit": 0.0}) + movements: Dict[tuple, Dict[str, float]] = defaultdict(lambda: {"debit": 0.0, "credit": 0.0}) + observedYears: set = set() + observedAccounts: set = set() for ln in lines: if isinstance(ln, dict): jeid = ln.get("journalEntryId", "") @@ -437,7 +580,7 @@ class AccountingDataSync: bdate = entryDates.get(jeid, "") if not accNo or not bdate: continue - parts = bdate.split("-") + parts = str(bdate).split("-") if len(parts) < 2: continue try: @@ -445,29 +588,56 @@ class AccountingDataSync: month = int(parts[1]) except ValueError: continue + movements[(accNo, year, month)]["debit"] += debit + movements[(accNo, year, month)]["credit"] += credit + observedYears.add(year) + observedAccounts.add(accNo) - buckets[(accNo, year, month)]["debit"] += debit - buckets[(accNo, year, month)]["credit"] += credit - buckets[(accNo, year, 0)]["debit"] += debit - buckets[(accNo, year, 0)]["credit"] += credit + if not observedYears or not observedAccounts: + return [] - scope = {"featureInstanceId": featureInstanceId, "mandateId": mandateId} - rows = [{ - "accountNumber": accNo, - "periodYear": year, - "periodMonth": month, - "openingBalance": 0.0, - "debitTotal": round(totals["debit"], 2), - "creditTotal": round(totals["credit"], 2), - "closingBalance": round(totals["debit"] - totals["credit"], 2), - "currency": "CHF", - **scope, - } for (accNo, year, month), totals in buckets.items()] - n = self._bulkCreate(modelBalance, rows) - logger.info( - f"Persisted {n} balances for {featureInstanceId} in {time.time() - t0:.1f}s" - ) - return n + sortedYears = sorted(observedYears) + rows: List[Dict[str, Any]] = [] + for accNo in sorted(observedAccounts): + isER = _isIncomeStatementAccount(accNo) + cumulativeOpeningOfYear = 0.0 + for year in sortedYears: + yearOpening = 0.0 if isER else cumulativeOpeningOfYear + running = yearOpening + yearDebit = 0.0 + yearCredit = 0.0 + for month in range(1, 13): + opening = running + mov = movements.get((accNo, year, month), {"debit": 0.0, "credit": 0.0}) + running = opening + mov["debit"] - mov["credit"] + yearDebit += mov["debit"] + yearCredit += mov["credit"] + if mov["debit"] == 0 and mov["credit"] == 0 and opening == 0 and running == 0: + continue + rows.append({ + "accountNumber": accNo, + "periodYear": year, + "periodMonth": month, + "openingBalance": round(opening, 2), + "debitTotal": round(mov["debit"], 2), + "creditTotal": round(mov["credit"], 2), + "closingBalance": round(running, 2), + "currency": "CHF", + **scope, + }) + rows.append({ + "accountNumber": accNo, + "periodYear": year, + "periodMonth": 0, + "openingBalance": round(yearOpening, 2), + "debitTotal": round(yearDebit, 2), + "creditTotal": round(yearCredit, 2), + "closingBalance": round(running, 2), + "currency": "CHF", + **scope, + }) + cumulativeOpeningOfYear = running + return rows # ===== Low-level bulk helpers ===== diff --git a/modules/features/trustee/accounting/accountingRegistry.py b/modules/features/trustee/accounting/accountingRegistry.py index ca5e27d9..fe1b20d5 100644 --- a/modules/features/trustee/accounting/accountingRegistry.py +++ b/modules/features/trustee/accounting/accountingRegistry.py @@ -74,7 +74,7 @@ class AccountingRegistry: _registryInstance: Optional[AccountingRegistry] = None -def _getAccountingRegistry() -> AccountingRegistry: +def getAccountingRegistry() -> AccountingRegistry: """Singleton access to the accounting registry.""" global _registryInstance if _registryInstance is None: diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py b/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py index 0269a654..e03e7df7 100644 --- a/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py +++ b/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py @@ -6,12 +6,22 @@ API docs: https://downloads.abacus.ch/fileadmin/ablage/abaconnect/htmlfiles/docs Auth: OAuth 2.0 Client Credentials (Service User). Each Abacus instance has its own host URL; there is no central cloud endpoint. Entity API uses OData V4 format. + +Account balances: + Abacus exposes an ``AccountBalances`` entity (per fiscal year), but its + availability depends on the customer's Abacus license / Profile and is + NOT guaranteed for all instances. The robust default is therefore to + aggregate balances locally from ``GeneralJournalEntries`` (always + present). If a future iteration confirms the entity for a specific + instance, ``getAccountBalances`` can be extended to prefer that source + via a config flag (e.g. ``useAccountBalancesEntity: true``). """ import base64 +import calendar import logging import time -from typing import List, Dict, Any, Optional +from typing import List, Dict, Any, Optional, Tuple import aiohttp @@ -19,6 +29,7 @@ from ..accountingConnectorBase import ( BaseAccountingConnector, AccountingBooking, AccountingChart, + AccountingPeriodBalance, ConnectorConfigField, SyncResult, ) @@ -27,6 +38,21 @@ from modules.shared.i18nRegistry import t logger = logging.getLogger(__name__) +def _formatLastDayOfMonth(year: int, month: int) -> str: + lastDay = calendar.monthrange(year, month)[1] + return f"{year:04d}-{month:02d}-{lastDay:02d}" + + +def _isIncomeStatementAccount(accountNumber: str) -> bool: + """Swiss KMU-Kontenrahmen heuristic: 1xxx + 2xxx -> balance sheet (cumulative); + 3xxx..9xxx -> income statement (reset per fiscal year). + """ + a = (accountNumber or "").strip() + if not a or not a[0].isdigit(): + return False + return a[0] not in ("1", "2") + + class AccountingConnectorAbacus(BaseAccountingConnector): def __init__(self): @@ -341,3 +367,158 @@ class AccountingConnectorAbacus(BaseAccountingConnector): except Exception as e: logger.error(f"Abacus getVendors error: {e}") return [] + + async def getAccountBalances( + self, + config: Dict[str, Any], + years: List[int], + accountNumbers: Optional[List[str]] = None, + ) -> List[AccountingPeriodBalance]: + """Aggregate account balances from ``GeneralJournalEntries`` (OData V4). + + Strategy: + 1. Page through ``GET GeneralJournalEntries?$filter=JournalDate le YYYY-12-31`` + until ``@odata.nextLink`` is exhausted. Including ALL prior years + is required to compute the carry-over for balance-sheet accounts. + 2. Per (account, year, month) accumulate ``DebitAmount``/``CreditAmount`` + from ``Lines``. + 3. Income-statement accounts (3xxx-9xxx) reset to 0 per fiscal year; + balance-sheet accounts (1xxx-2xxx) carry their cumulative balance. + + Optional optimization (not yet active): if the customer's Abacus + instance ships the ``AccountBalances`` OData entity, it can return + authoritative period balances directly. Detect via a probe GET on + ``AccountBalances?$top=1`` and prefer that source. This is intentionally + deferred until we hit a customer where the entity is available -- + the local aggregation is always-correct fallback. + """ + if not years: + return [] + sortedYears = sorted({int(y) for y in years if y}) + minYear = sortedYears[0] + maxYear = sortedYears[-1] + accountNumbersSet = set(accountNumbers) if accountNumbers else None + + headers = await self._buildAuthHeaders(config) + if not headers: + logger.warning("Abacus getAccountBalances: no access token, skipping") + return [] + + rawEntries = await self._fetchAllJournalEntries(config, headers, dateTo=f"{maxYear}-12-31") + + movements: Dict[Tuple[str, int, int], Dict[str, float]] = {} + seenAccounts: set = set() + for entry in rawEntries: + dateRaw = str(entry.get("JournalDate") or "")[:10] + if len(dateRaw) < 7: + continue + try: + year = int(dateRaw[:4]) + month = int(dateRaw[5:7]) + except ValueError: + continue + for line in (entry.get("Lines") or []): + accNo = str(line.get("AccountId") or "").strip() + if not accNo: + continue + seenAccounts.add(accNo) + try: + debit = float(line.get("DebitAmount") or 0) + credit = float(line.get("CreditAmount") or 0) + except (TypeError, ValueError): + continue + if debit == 0 and credit == 0: + continue + bucket = movements.setdefault((accNo, year, month), {"debit": 0.0, "credit": 0.0}) + bucket["debit"] += debit + bucket["credit"] += credit + + results: List[AccountingPeriodBalance] = [] + for accNo in sorted(seenAccounts): + if accountNumbersSet is not None and accNo not in accountNumbersSet: + continue + isER = _isIncomeStatementAccount(accNo) + + preMinYearBalance = 0.0 + if not isER: + for (a, yr, _mo), m in movements.items(): + if a == accNo and yr < minYear: + preMinYearBalance += m["debit"] - m["credit"] + + cumulativeOpeningOfYear = preMinYearBalance + for year in sortedYears: + yearOpening = 0.0 if isER else cumulativeOpeningOfYear + running = yearOpening + yearDebit = 0.0 + yearCredit = 0.0 + for month in range(1, 13): + opening = running + mov = movements.get((accNo, year, month), {"debit": 0.0, "credit": 0.0}) + running = opening + mov["debit"] - mov["credit"] + yearDebit += mov["debit"] + yearCredit += mov["credit"] + results.append(AccountingPeriodBalance( + accountNumber=accNo, + periodYear=year, + periodMonth=month, + openingBalance=round(opening, 2), + debitTotal=round(mov["debit"], 2), + creditTotal=round(mov["credit"], 2), + closingBalance=round(running, 2), + currency="CHF", + asOfDate=_formatLastDayOfMonth(year, month), + )) + + results.append(AccountingPeriodBalance( + accountNumber=accNo, + periodYear=year, + periodMonth=0, + openingBalance=round(yearOpening, 2), + debitTotal=round(yearDebit, 2), + creditTotal=round(yearCredit, 2), + closingBalance=round(running, 2), + currency="CHF", + asOfDate=f"{year}-12-31", + )) + + cumulativeOpeningOfYear = running + + logger.info( + "Abacus getAccountBalances: %s rows from %s journal entries (years=%s)", + len(results), len(rawEntries), sortedYears, + ) + return results + + async def _fetchAllJournalEntries( + self, + config: Dict[str, Any], + headers: Dict[str, str], + dateTo: str, + ) -> List[Dict[str, Any]]: + """Page through ``GeneralJournalEntries`` (OData V4) following ``@odata.nextLink``. + + We filter ``JournalDate le dateTo`` to bound the result, but include + ALL prior years (no lower bound) so cumulative balance-sheet + carry-over is correct. + """ + results: List[Dict[str, Any]] = [] + baseUrl = self._buildEntityUrl(config, f"GeneralJournalEntries?$filter=JournalDate le {dateTo}") + nextUrl: Optional[str] = baseUrl + async with aiohttp.ClientSession() as session: + while nextUrl: + try: + async with session.get(nextUrl, headers=headers, timeout=aiohttp.ClientTimeout(total=60)) as resp: + if resp.status != 200: + body = await resp.text() + logger.warning("Abacus GeneralJournalEntries HTTP %s: %s", resp.status, body[:200]) + break + data = await resp.json() + except Exception as ex: + logger.warning("Abacus GeneralJournalEntries request failed: %s", ex) + break + page = data.get("value") or [] + if not isinstance(page, list): + break + results.extend(page) + nextUrl = data.get("@odata.nextLink") + return results diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py b/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py index dcb3233d..28c2a334 100644 --- a/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py +++ b/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py @@ -7,10 +7,20 @@ Auth: Personal Access Token (PAT) as Bearer token. Base URL: https://api.bexio.com/ Note: Bexio uses internal account IDs (int), not account numbers. The connector caches the chart of accounts to resolve accountNumber -> account_id. + +Account balances: + Bexio does NOT expose a dedicated saldo endpoint (no equivalent to RMA's + ``/gl/saldo``). ``getAccountBalances`` therefore aggregates balances + locally by paginating ``GET /3.0/accounting/journal`` (max 2000 rows per + page) and computing cumulative balances per (account, period). Income- + statement accounts (3xxx-9xxx in the Swiss KMU-Kontenrahmen) are reset + at the start of each fiscal year; balance-sheet accounts (1xxx-2xxx) + carry their cumulative balance across years. """ +import calendar import logging -from typing import List, Dict, Any, Optional +from typing import List, Dict, Any, Optional, Tuple import aiohttp @@ -18,6 +28,7 @@ from ..accountingConnectorBase import ( BaseAccountingConnector, AccountingBooking, AccountingChart, + AccountingPeriodBalance, ConnectorConfigField, SyncResult, ) @@ -26,6 +37,23 @@ from modules.shared.i18nRegistry import t logger = logging.getLogger(__name__) _DEFAULT_API_BASE_URL = "https://api.bexio.com/" +_JOURNAL_PAGE_SIZE = 2000 + + +def _formatLastDayOfMonth(year: int, month: int) -> str: + lastDay = calendar.monthrange(year, month)[1] + return f"{year:04d}-{month:02d}-{lastDay:02d}" + + +def _isIncomeStatementAccount(accountNumber: str) -> bool: + """Swiss KMU-Kontenrahmen: 1xxx Aktiven + 2xxx Passiven -> balance sheet + (cumulative balance carried across years); 3xxx..9xxx -> income statement + (reset to 0 at fiscal-year start). + """ + a = (accountNumber or "").strip() + if not a or not a[0].isdigit(): + return False + return a[0] not in ("1", "2") class AccountingConnectorBexio(BaseAccountingConnector): @@ -260,3 +288,148 @@ class AccountingConnectorBexio(BaseAccountingConnector): except Exception as e: logger.error(f"Bexio getCustomers error: {e}") return [] + + async def getAccountBalances( + self, + config: Dict[str, Any], + years: List[int], + accountNumbers: Optional[List[str]] = None, + ) -> List[AccountingPeriodBalance]: + """Aggregate account balances locally from ``/3.0/accounting/journal``. + + Bexio offers no per-account saldo endpoint, so we paginate the full + journal up to the latest requested fiscal year-end and compute + opening / debit / credit / closing per (account, period). For balance- + sheet accounts the cumulative carry-over from prior years is included; + for income-statement accounts the balance is reset at the start of + every requested fiscal year (per Swiss accounting principles). + """ + if not years: + return [] + sortedYears = sorted({int(y) for y in years if y}) + minYear = sortedYears[0] + maxYear = sortedYears[-1] + accountNumbersSet = set(accountNumbers) if accountNumbers else None + + accounts = await self._loadRawAccounts(config) + accIdToNumber: Dict[int, str] = {acc.get("id"): str(acc.get("account_no", "")) for acc in accounts if acc.get("id") is not None and acc.get("account_no") is not None} + if not accIdToNumber: + logger.warning("Bexio getAccountBalances: chart of accounts is empty -- cannot derive balances") + return [] + + rawEntries = await self._fetchAllJournalRows(config, dateTo=f"{maxYear}-12-31") + + movements: Dict[Tuple[str, int, int], Dict[str, float]] = {} + for e in rawEntries: + dateRaw = str(e.get("date") or "")[:10] + if len(dateRaw) < 7: + continue + try: + year = int(dateRaw[:4]) + month = int(dateRaw[5:7]) + except ValueError: + continue + try: + amount = float(e.get("amount") or 0) + except (TypeError, ValueError): + continue + if amount == 0: + continue + debitAcc = accIdToNumber.get(e.get("debit_account_id")) + creditAcc = accIdToNumber.get(e.get("credit_account_id")) + if debitAcc: + bucket = movements.setdefault((debitAcc, year, month), {"debit": 0.0, "credit": 0.0}) + bucket["debit"] += amount + if creditAcc: + bucket = movements.setdefault((creditAcc, year, month), {"debit": 0.0, "credit": 0.0}) + bucket["credit"] += amount + + accountsByNumber = sorted({n for n in accIdToNumber.values() if n}) + results: List[AccountingPeriodBalance] = [] + + for accNo in accountsByNumber: + if accountNumbersSet is not None and accNo not in accountNumbersSet: + continue + isER = _isIncomeStatementAccount(accNo) + + preMinYearBalance = 0.0 + if not isER: + for (a, yr, _mo), m in movements.items(): + if a == accNo and yr < minYear: + preMinYearBalance += m["debit"] - m["credit"] + + cumulativeOpeningOfYear = preMinYearBalance + for year in sortedYears: + if isER: + yearOpening = 0.0 + else: + yearOpening = cumulativeOpeningOfYear + + running = yearOpening + yearDebit = 0.0 + yearCredit = 0.0 + for month in range(1, 13): + opening = running + mov = movements.get((accNo, year, month), {"debit": 0.0, "credit": 0.0}) + running = opening + mov["debit"] - mov["credit"] + yearDebit += mov["debit"] + yearCredit += mov["credit"] + results.append(AccountingPeriodBalance( + accountNumber=accNo, + periodYear=year, + periodMonth=month, + openingBalance=round(opening, 2), + debitTotal=round(mov["debit"], 2), + creditTotal=round(mov["credit"], 2), + closingBalance=round(running, 2), + currency="CHF", + asOfDate=_formatLastDayOfMonth(year, month), + )) + + results.append(AccountingPeriodBalance( + accountNumber=accNo, + periodYear=year, + periodMonth=0, + openingBalance=round(yearOpening, 2), + debitTotal=round(yearDebit, 2), + creditTotal=round(yearCredit, 2), + closingBalance=round(running, 2), + currency="CHF", + asOfDate=f"{year}-12-31", + )) + + cumulativeOpeningOfYear = running + + logger.info("Bexio getAccountBalances: %s rows from %s journal entries (years=%s)", len(results), len(rawEntries), sortedYears) + return results + + async def _fetchAllJournalRows(self, config: Dict[str, Any], dateTo: str) -> List[Dict[str, Any]]: + """Paginate ``GET /3.0/accounting/journal?to=YYYY-12-31`` and return all rows. + + Bexio caps page size at 2000; we fetch until a short page is returned. + Failures abort early (returning whatever rows were collected) -- the + caller logs the row count, so partial data is visible. + """ + rows: List[Dict[str, Any]] = [] + offset = 0 + url = self._buildUrl(config, "3.0/accounting/journal") + async with aiohttp.ClientSession() as session: + while True: + params = {"to": dateTo, "limit": str(_JOURNAL_PAGE_SIZE), "offset": str(offset)} + try: + async with session.get(url, headers=self._buildHeaders(config), params=params, timeout=aiohttp.ClientTimeout(total=60)) as resp: + if resp.status != 200: + body = await resp.text() + logger.warning("Bexio /accounting/journal HTTP %s offset=%s: %s", resp.status, offset, body[:200]) + break + page = await resp.json() + except Exception as ex: + logger.warning("Bexio /accounting/journal request failed offset=%s: %s", offset, ex) + break + if not isinstance(page, list) or not page: + break + rows.extend(page) + if len(page) < _JOURNAL_PAGE_SIZE: + break + offset += _JOURNAL_PAGE_SIZE + return rows diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py index 9e372099..98634127 100644 --- a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py +++ b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py @@ -9,6 +9,7 @@ Base URL: https://service.runmyaccounts.com/api/latest/clients/{clientName}/ """ import asyncio +import calendar import json import logging import re @@ -21,6 +22,7 @@ from ..accountingConnectorBase import ( BaseAccountingConnector, AccountingBooking, AccountingChart, + AccountingPeriodBalance, ConnectorConfigField, SyncResult, ) @@ -31,6 +33,73 @@ logger = logging.getLogger(__name__) _DEFAULT_API_BASE_URL = "https://service.runmyaccounts.com/api/latest/clients/" +def _formatLastDayOfMonth(year: int, month: int) -> str: + """Return ``YYYY-MM-DD`` of the last day of a calendar month.""" + lastDay = calendar.monthrange(year, month)[1] + return f"{year:04d}-{month:02d}-{lastDay:02d}" + + +def _isIncomeStatementAccount(accountNumber: str) -> bool: + """Decide whether an account is part of the income statement (Erfolgsrechnung). + + Swiss KMU-Kontenrahmen: 1xxx Aktiven, 2xxx Passiven (incl. 28xx + Eigenkapital) -> balance sheet; 3xxx..9xxx -> income statement. + Used by the RMA connector to choose between the two `/gl/saldo` query + variants (with vs. without ``from`` parameter). + """ + a = (accountNumber or "").strip() + if not a or not a[0].isdigit(): + return False + return a[0] not in ("1", "2") + + +def _parseSaldoBody(body: str) -> List[tuple]: + """Parse the response body of ``GET /gl/saldo`` (JSON or XML). + + Returns a list of ``(accountNumber, saldo)`` tuples. The endpoint + delivers ``{"row": [{"column": [accno, label, saldo]}, ...]}`` (JSON) or + ``accnolabelsaldo...`` + (XML). Rows that cannot be parsed are silently skipped to keep one bad row + from poisoning the whole sync. + """ + if not body or not body.strip(): + return [] + rows: List[tuple] = [] + try: + data = json.loads(body) + items = data.get("row") if isinstance(data, dict) else data + if isinstance(items, dict): + items = [items] + if isinstance(items, list): + for item in items: + if not isinstance(item, dict): + continue + cols = item.get("column") or [] + if isinstance(cols, list) and len(cols) >= 3: + accno = str(cols[0]).strip() + try: + saldo = float(cols[2]) + except (TypeError, ValueError): + continue + if accno: + rows.append((accno, saldo)) + return rows + except (json.JSONDecodeError, ValueError): + pass + rowMatches = re.findall(r"(.*?)", body, re.DOTALL) + for raw in rowMatches: + cols = re.findall(r"([^<]*)", raw) + if len(cols) >= 3: + accno = cols[0].strip() + try: + saldo = float(cols[2]) + except (TypeError, ValueError): + continue + if accno: + rows.append((accno, saldo)) + return rows + + class AccountingConnectorRma(BaseAccountingConnector): def getConnectorType(self) -> str: @@ -447,6 +516,191 @@ class AccountingConnectorRma(BaseAccountingConnector): logger.error(f"RMA getJournalEntries error: {e}", exc_info=True) return [] + async def getAccountBalances( + self, + config: Dict[str, Any], + years: List[int], + accountNumbers: Optional[List[str]] = None, + ) -> List[AccountingPeriodBalance]: + """Fetch authoritative closing balances per account and period via RMA's + ``GET /gl/saldo`` endpoint. + + For each requested year we issue 13 API calls (one per month-end + one + for the prior fiscal year-end as opening reference). The endpoint + returns the cumulative balance per account at the requested ``to`` date, + already including prior-year carry-over and yearend bookings -- which + is exactly the value the local journal-line aggregation cannot + reconstruct when the import window covers only part of the history. + + ``accno`` is mandatory; we use a digit-length-grouped wildcard + (``xxxx`` matches all 4-digit accounts, ``xxxxx`` all 5-digit, etc.) + derived from the chart of accounts, so 1-2 calls cover every account + per period. + """ + if not years: + return [] + + accountNumbersSet: Optional[set] = set(accountNumbers) if accountNumbers else None + wildcardPatterns = await self._resolveWildcardPatterns(config) + if not wildcardPatterns: + logger.warning("RMA getAccountBalances: chart of accounts is empty, no wildcards derivable") + return [] + + results: List[AccountingPeriodBalance] = [] + sortedYears = sorted({int(y) for y in years if y}) + + for year in sortedYears: + priorYearEnd = f"{year - 1}-12-31" + priorSaldosRaw = await self._fetchSaldoMapForDate(config, wildcardPatterns, priorYearEnd) + # ER (income statement) accounts reset to 0 at the start of each + # fiscal year -- prior-year YTD must NOT carry forward as opening. + priorSaldos = {a: (0.0 if _isIncomeStatementAccount(a) else v) for a, v in priorSaldosRaw.items()} + + runningOpening: Dict[str, float] = dict(priorSaldos) + decSaldos: Dict[str, float] = {} + + for month in range(1, 13): + lastDay = _formatLastDayOfMonth(year, month) + saldos = await self._fetchSaldoMapForDate(config, wildcardPatterns, lastDay) + + accountKeys = set(saldos.keys()) | set(runningOpening.keys()) + for accno in accountKeys: + if accountNumbersSet is not None and accno not in accountNumbersSet: + continue + closing = saldos.get(accno, runningOpening.get(accno, 0.0)) + opening = runningOpening.get(accno, 0.0) + results.append(AccountingPeriodBalance( + accountNumber=accno, + periodYear=year, + periodMonth=month, + openingBalance=round(opening, 2), + closingBalance=round(closing, 2), + currency="CHF", + asOfDate=lastDay, + )) + runningOpening = {**runningOpening, **saldos} + if month == 12: + decSaldos = dict(saldos) + + annualKeys = set(decSaldos.keys()) | set(priorSaldos.keys()) + for accno in annualKeys: + if accountNumbersSet is not None and accno not in accountNumbersSet: + continue + closing = decSaldos.get(accno, priorSaldos.get(accno, 0.0)) + opening = priorSaldos.get(accno, 0.0) + results.append(AccountingPeriodBalance( + accountNumber=accno, + periodYear=year, + periodMonth=0, + openingBalance=round(opening, 2), + closingBalance=round(closing, 2), + currency="CHF", + asOfDate=f"{year}-12-31", + )) + + logger.info( + "RMA getAccountBalances: %s rows for years=%s, wildcards=%s", + len(results), sortedYears, wildcardPatterns, + ) + return results + + async def _resolveWildcardPatterns(self, config: Dict[str, Any]) -> List[str]: + """Derive `accno` wildcard patterns from the chart of accounts. + + RMA's `/gl/saldo` requires `accno`; using digit-length-grouped + wildcards (`xxxx`, `xxxxx`, ...) lets us cover every account in 1-2 + calls per period instead of one call per account number. + """ + try: + charts = await self.getChartOfAccounts(config) + except Exception as ex: + logger.warning("RMA _resolveWildcardPatterns: getChartOfAccounts failed: %s", ex) + return [] + lengths = set() + for c in charts: + accno = (c.accountNumber or "").strip() + if accno.isdigit(): + lengths.add(len(accno)) + return [("x" * n) for n in sorted(lengths)] + + async def _fetchSaldoMapForDate( + self, + config: Dict[str, Any], + wildcardPatterns: List[str], + toDate: str, + ) -> Dict[str, float]: + """Call `/gl/saldo` and return ``{accountNumber: cumulativeSaldo}``. + + Per RMA docs ("Warning: Chart of the balance sheet do not need a from + date. Charts of the income statement need from and to parameter."), + we issue **two** calls per pattern: + + * No ``from`` -> correct cumulative saldo for balance-sheet accounts + (1xxx, 2xxx in Swiss KMU-Kontenrahmen). + * ``from=YYYY-01-01`` (year of ``toDate``) -> correct YTD result for + income-statement accounts (3xxx..9xxx, which reset annually). + + Per account number we keep the value from the appropriate call. + Empty / failed responses are logged at DEBUG and skipped to avoid + aborting the whole sync. + """ + yearStart = f"{toDate[:4]}-01-01" + bsRows: Dict[str, float] = {} + erRows: Dict[str, float] = {} + for pattern in wildcardPatterns: + try: + bs = await self._fetchSaldoRows(config, accno=pattern, fromDate=None, toDate=toDate) + except Exception as ex: + logger.debug("RMA _fetchSaldoMapForDate(BS, pattern=%s, to=%s) failed: %s", pattern, toDate, ex) + bs = [] + try: + er = await self._fetchSaldoRows(config, accno=pattern, fromDate=yearStart, toDate=toDate) + except Exception as ex: + logger.debug("RMA _fetchSaldoMapForDate(ER, pattern=%s, %s..%s) failed: %s", pattern, yearStart, toDate, ex) + er = [] + for accno, saldo in bs: + bsRows[accno] = saldo + for accno, saldo in er: + erRows[accno] = saldo + + merged: Dict[str, float] = {} + for accno in set(bsRows) | set(erRows): + if _isIncomeStatementAccount(accno): + merged[accno] = erRows.get(accno, bsRows.get(accno, 0.0)) + else: + merged[accno] = bsRows.get(accno, erRows.get(accno, 0.0)) + return merged + + async def _fetchSaldoRows( + self, + config: Dict[str, Any], + accno: str, + fromDate: Optional[str], + toDate: str, + ) -> List[tuple]: + """Single `/gl/saldo` call. Returns list of ``(accountNumber, saldo)`` tuples.""" + url = self._buildUrl(config, "gl/saldo") + params: Dict[str, str] = { + "accno": accno, + "to": toDate, + "bookkeeping_main_curr": "true", + } + if fromDate: + params["from"] = fromDate + async with aiohttp.ClientSession() as session: + async with session.get( + url, + headers=self._buildHeaders(config), + params=params, + timeout=aiohttp.ClientTimeout(total=20), + ) as resp: + if resp.status != 200: + body = await resp.text() + logger.debug("RMA /gl/saldo accno=%s from=%s to=%s -> HTTP %s: %s", accno, fromDate, toDate, resp.status, body[:200]) + return [] + body = await resp.text() + return _parseSaldoBody(body) + async def _fetchGlBulk(self, config: Dict[str, Any], params: Dict[str, str]) -> List[Dict[str, Any]]: """Try GET /gl to fetch journal entries in bulk (not all RMA versions support this).""" try: diff --git a/modules/features/trustee/interfaceFeatureTrustee.py b/modules/features/trustee/interfaceFeatureTrustee.py index b1a6aab6..9f1c911a 100644 --- a/modules/features/trustee/interfaceFeatureTrustee.py +++ b/modules/features/trustee/interfaceFeatureTrustee.py @@ -1109,10 +1109,15 @@ class TrusteeObjects: ) def _cleanDocumentRecords(records): - return [ - TrusteeDocument(**{k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"}) - for r in records - ] + cleaned = [] + for r in records: + labelCols = {k: v for k, v in r.items() if k.endswith("Label")} + filteredFields = {k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"} + doc = TrusteeDocument(**filteredFields) + d = doc.model_dump() + d.update(labelCols) + cleaned.append(d) + return cleaned if isinstance(result, PaginatedResult): result.items = _cleanDocumentRecords(result.items) @@ -1133,10 +1138,15 @@ class TrusteeObjects: ) def _cleanDocumentRecords(records): - return [ - TrusteeDocument(**{k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"}) - for r in records - ] + cleaned = [] + for r in records: + labelCols = {k: v for k, v in r.items() if k.endswith("Label")} + filteredFields = {k: v for k, v in r.items() if not k.startswith("_") and k != "documentData"} + doc = TrusteeDocument(**filteredFields) + d = doc.model_dump() + d.update(labelCols) + cleaned.append(d) + return cleaned if isinstance(result, PaginatedResult): result.items = _cleanDocumentRecords(result.items) @@ -1297,10 +1307,13 @@ class TrusteeObjects: def _cleanAndValidate(records): items = [] for record in records: + labelCols = {k: v for k, v in record.items() if k.endswith("Label")} cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_") or k in keepFields} position = self._toTrusteePositionOrDelete(cleanedRecord, deleteCorrupt=True) if position is not None: - items.append(position) + d = position.model_dump() + d.update(labelCols) + items.append(d) return items if isinstance(result, PaginatedResult): diff --git a/modules/features/trustee/mainTrustee.py b/modules/features/trustee/mainTrustee.py index 0799fa1c..020aeda5 100644 --- a/modules/features/trustee/mainTrustee.py +++ b/modules/features/trustee/mainTrustee.py @@ -394,9 +394,15 @@ TEMPLATE_WORKFLOWS = [ {"id": "extract", "type": "trustee.extractFromFiles", "label": "Dokumente extrahieren", "_method": "trustee", "_action": "extractFromFiles", "parameters": {"featureInstanceId": "{{featureInstanceId}}", "prompt": ""}, "position": {"x": 250, "y": 0}}, {"id": "process", "type": "trustee.processDocuments", "label": "Verarbeiten", "_method": "trustee", "_action": "processDocuments", - "parameters": {"documentList": [], "featureInstanceId": "{{featureInstanceId}}"}, "position": {"x": 500, "y": 0}}, + "parameters": { + "documentList": {"type": "ref", "nodeId": "extract", "path": ["documents"]}, + "featureInstanceId": "{{featureInstanceId}}", + }, "position": {"x": 500, "y": 0}}, {"id": "sync", "type": "trustee.syncToAccounting", "label": "Synchronisieren", "_method": "trustee", "_action": "syncToAccounting", - "parameters": {"documentList": [], "featureInstanceId": "{{featureInstanceId}}"}, "position": {"x": 750, "y": 0}}, + "parameters": { + "documentList": {"type": "ref", "nodeId": "process", "path": ["documents"]}, + "featureInstanceId": "{{featureInstanceId}}", + }, "position": {"x": 750, "y": 0}}, ], "connections": [ {"source": "trigger", "sourcePort": 0, "target": "extract", "targetPort": 0}, diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py index fbdd0966..021251fc 100644 --- a/modules/features/trustee/routeFeatureTrustee.py +++ b/modules/features/trustee/routeFeatureTrustee.py @@ -412,34 +412,41 @@ def get_position_options( # ===== Organisation Routes ===== -@router.get("/{instanceId}/organisations", response_model=PaginatedResponse[TrusteeOrganisation]) +@router.get("/{instanceId}/organisations") @limiter.limit("30/minute") def get_organisations( request: Request, instanceId: str = Path(..., description="Feature Instance ID"), pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[TrusteeOrganisation]: +): """Get all organisations for a feature instance with optional pagination.""" + from modules.routes.routeHelpers import enrichRowsWithFkLabels mandateId = _validateInstanceAccess(instanceId, context) paginationParams = _parsePagination(pagination) interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllOrganisations(paginationParams) + def _toDicts(items): + return [r.model_dump() if hasattr(r, "model_dump") else r for r in items] + if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeOrganisation) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort if paginationParams else [], filters=paginationParams.filters if paginationParams else None - ) - ) - return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None) + ).model_dump(), + } + items = result if isinstance(result, list) else result.items + enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeOrganisation) + return {"items": enriched, "pagination": None} @router.get("/{instanceId}/organisations/{orgId}", response_model=TrusteeOrganisation) @@ -525,34 +532,41 @@ def delete_organisation( # ===== Role Routes ===== -@router.get("/{instanceId}/roles", response_model=PaginatedResponse[TrusteeRole]) +@router.get("/{instanceId}/roles") @limiter.limit("30/minute") def get_roles( request: Request, instanceId: str = Path(..., description="Feature Instance ID"), pagination: Optional[str] = Query(None), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[TrusteeRole]: +): """Get all roles with optional pagination.""" + from modules.routes.routeHelpers import enrichRowsWithFkLabels mandateId = _validateInstanceAccess(instanceId, context) paginationParams = _parsePagination(pagination) interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllRoles(paginationParams) + def _toDicts(items): + return [r.model_dump() if hasattr(r, "model_dump") else r for r in items] + if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeRole) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort if paginationParams else [], filters=paginationParams.filters if paginationParams else None - ) - ) - return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None) + ).model_dump(), + } + items = result if isinstance(result, list) else result.items + enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeRole) + return {"items": enriched, "pagination": None} @router.get("/{instanceId}/roles/{roleId}", response_model=TrusteeRole) @@ -638,34 +652,41 @@ def delete_role( # ===== Access Routes ===== -@router.get("/{instanceId}/access", response_model=PaginatedResponse[TrusteeAccess]) +@router.get("/{instanceId}/access") @limiter.limit("30/minute") def get_all_access( request: Request, instanceId: str = Path(..., description="Feature Instance ID"), pagination: Optional[str] = Query(None), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[TrusteeAccess]: +): """Get all access records with optional pagination.""" + from modules.routes.routeHelpers import enrichRowsWithFkLabels mandateId = _validateInstanceAccess(instanceId, context) paginationParams = _parsePagination(pagination) interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllAccess(paginationParams) + def _toDicts(items): + return [r.model_dump() if hasattr(r, "model_dump") else r for r in items] + if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeAccess) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort if paginationParams else [], filters=paginationParams.filters if paginationParams else None - ) - ) - return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None) + ).model_dump(), + } + items = result if isinstance(result, list) else result.items + enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeAccess) + return {"items": enriched, "pagination": None} @router.get("/{instanceId}/access/{accessId}", response_model=TrusteeAccess) @@ -781,34 +802,41 @@ def delete_access( # ===== Contract Routes ===== -@router.get("/{instanceId}/contracts", response_model=PaginatedResponse[TrusteeContract]) +@router.get("/{instanceId}/contracts") @limiter.limit("30/minute") def get_contracts( request: Request, instanceId: str = Path(..., description="Feature Instance ID"), pagination: Optional[str] = Query(None), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[TrusteeContract]: +): """Get all contracts with optional pagination.""" + from modules.routes.routeHelpers import enrichRowsWithFkLabels mandateId = _validateInstanceAccess(instanceId, context) paginationParams = _parsePagination(pagination) interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllContracts(paginationParams) + def _toDicts(items): + return [r.model_dump() if hasattr(r, "model_dump") else r for r in items] + if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_toDicts(result.items), TrusteeContract) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort if paginationParams else [], filters=paginationParams.filters if paginationParams else None - ) - ) - return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None) + ).model_dump(), + } + items = result if isinstance(result, list) else result.items + enriched = enrichRowsWithFkLabels(_toDicts(items), TrusteeContract) + return {"items": enriched, "pagination": None} @router.get("/{instanceId}/contracts/{contractId}", response_model=TrusteeContract) @@ -909,7 +937,7 @@ def delete_contract( # ===== Document Routes ===== -@router.get("/{instanceId}/documents", response_model=PaginatedResponse[TrusteeDocument]) +@router.get("/{instanceId}/documents") @limiter.limit("30/minute") def get_documents( request: Request, @@ -918,7 +946,7 @@ def get_documents( mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"), column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[TrusteeDocument]: +): """Get all documents (metadata only) with optional pagination.""" mandateId = _validateInstanceAccess(instanceId, context) @@ -929,19 +957,23 @@ def get_documents( interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllDocuments(paginationParams) + def _itemsToDicts(items): + return [r.model_dump() if hasattr(r, 'model_dump') else r for r in items] + if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + return { + "items": _itemsToDicts(result.items), + "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort if paginationParams else [], filters=paginationParams.filters if paginationParams else None - ) - ) - return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None) + ).model_dump(), + } + items = result if isinstance(result, list) else result.items + return {"items": _itemsToDicts(items), "pagination": None} def _handleDocumentMode(instanceId, mandateId, mode, column, pagination, context): @@ -1154,7 +1186,7 @@ def delete_document( # ===== Position Routes ===== -@router.get("/{instanceId}/positions", response_model=PaginatedResponse[TrusteePosition]) +@router.get("/{instanceId}/positions") @limiter.limit("30/minute") def get_positions( request: Request, @@ -1163,7 +1195,7 @@ def get_positions( mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"), column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[TrusteePosition]: +): """Get all positions with optional pagination.""" mandateId = _validateInstanceAccess(instanceId, context) @@ -1174,19 +1206,23 @@ def get_positions( interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllPositions(paginationParams) + def _itemsToDicts(items): + return [r.model_dump() if hasattr(r, 'model_dump') else r for r in items] + if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + return { + "items": _itemsToDicts(result.items), + "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort if paginationParams else [], filters=paginationParams.filters if paginationParams else None - ) - ) - return PaginatedResponse(items=result if isinstance(result, list) else result.items, pagination=None) + ).model_dump(), + } + items = result if isinstance(result, list) else result.items + return {"items": _itemsToDicts(items), "pagination": None} def _handlePositionMode(instanceId, mandateId, mode, column, pagination, context): @@ -1347,8 +1383,8 @@ def get_available_accounting_connectors( ) -> List[Dict[str, Any]]: """List all available accounting system connectors with their config fields.""" _validateInstanceAccess(instanceId, context) - from .accounting.accountingRegistry import _getAccountingRegistry - return _getAccountingRegistry().getAvailableConnectors() + from .accounting.accountingRegistry import getAccountingRegistry + return getAccountingRegistry().getAvailableConnectors() # Placeholder returned for secret config fields so frontend can prefill form without sending real secrets. @@ -1357,8 +1393,8 @@ _CONFIG_PLACEHOLDER = "***" def _getConfigMasked(connectorType: str, plainConfig: Dict[str, Any]) -> Dict[str, str]: """Build config with secret values replaced by placeholder for GET response.""" - from .accounting.accountingRegistry import _getAccountingRegistry - connector = _getAccountingRegistry().getConnector(connectorType) + from .accounting.accountingRegistry import getAccountingRegistry + connector = getAccountingRegistry().getConnector(connectorType) if not connector: return {k: (v if isinstance(v, str) else str(v)) for k, v in (plainConfig or {}).items()} secretKeys = {f.key for f in connector.getRequiredConfigFields() if f.secret} @@ -2081,13 +2117,13 @@ def _serializeRoleForApi(role) -> Dict[str, Any]: return payload -@router.get("/{instanceId}/instance-roles", response_model=PaginatedResponse) +@router.get("/{instanceId}/instance-roles") @limiter.limit("30/minute") def get_instance_roles( request: Request, instanceId: str = Path(..., description="Feature Instance ID"), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse: +): """ Get all roles for this feature instance. Requires feature admin permission. @@ -2095,14 +2131,9 @@ def get_instance_roles( mandateId = _validateInstanceAdmin(instanceId, context) rootInterface = getRootInterface() - - # Get instance-specific roles (Pydantic models) roles = rootInterface.getRolesByFeatureCode("trustee", featureInstanceId=instanceId) - return PaginatedResponse( - items=[_serializeRoleForApi(r) for r in roles], - pagination=None - ) + return {"items": [_serializeRoleForApi(r) for r in roles], "pagination": None} @router.get("/{instanceId}/instance-roles/{roleId}", response_model=Dict[str, Any]) @@ -2129,14 +2160,14 @@ def get_instance_role( return _serializeRoleForApi(role) -@router.get("/{instanceId}/instance-roles/{roleId}/rules", response_model=PaginatedResponse) +@router.get("/{instanceId}/instance-roles/{roleId}/rules") @limiter.limit("30/minute") def get_instance_role_rules( request: Request, instanceId: str = Path(..., description="Feature Instance ID"), roleId: str = Path(..., description="Role ID"), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse: +): """ Get all AccessRules for a specific instance role. Requires feature admin permission. @@ -2145,18 +2176,13 @@ def get_instance_role_rules( rootInterface = getRootInterface() - # Verify role belongs to this instance (Pydantic model) role = rootInterface.getRole(roleId) if not role or str(role.featureInstanceId) != instanceId: raise HTTPException(status_code=404, detail=f"Role {roleId} not found in this instance") - # Get AccessRules for this role (Pydantic models) rules = rootInterface.getAccessRulesByRole(roleId) - return PaginatedResponse( - items=[r.model_dump() for r in rules], - pagination=None - ) + return {"items": [r.model_dump() for r in rules], "pagination": None} @router.post("/{instanceId}/instance-roles/{roleId}/rules", response_model=Dict[str, Any], status_code=201) @@ -2336,6 +2362,7 @@ def _paginatedReadEndpoint( handleFilterValuesInMemory, handleIdsInMemory, parseCrossFilterPagination, + enrichRowsWithFkLabels, ) from fastapi.responses import JSONResponse @@ -2401,23 +2428,28 @@ def _paginatedReadEndpoint( featureCode=interface.FEATURE_CODE, ) + def _itemsToDicts(rawItems): + return [r.model_dump() if hasattr(r, "model_dump") else r for r in rawItems] + if paginationParams and hasattr(result, "items"): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_itemsToDicts(result.items), modelClass) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort if paginationParams else [], filters=paginationParams.filters if paginationParams else None, - ), - ) + ).model_dump(), + } items = result.items if hasattr(result, "items") else result - return PaginatedResponse(items=items, pagination=None) + enriched = enrichRowsWithFkLabels(_itemsToDicts(items), modelClass) + return {"items": enriched, "pagination": None} -@router.get("/{instanceId}/data/accounts", response_model=PaginatedResponse[TrusteeDataAccount]) +@router.get("/{instanceId}/data/accounts") @limiter.limit("30/minute") def get_data_accounts( request: Request, @@ -2438,7 +2470,7 @@ def get_data_accounts( ) -@router.get("/{instanceId}/data/journal-entries", response_model=PaginatedResponse[TrusteeDataJournalEntry]) +@router.get("/{instanceId}/data/journal-entries") @limiter.limit("30/minute") def get_data_journal_entries( request: Request, @@ -2459,7 +2491,7 @@ def get_data_journal_entries( ) -@router.get("/{instanceId}/data/journal-lines", response_model=PaginatedResponse[TrusteeDataJournalLine]) +@router.get("/{instanceId}/data/journal-lines") @limiter.limit("30/minute") def get_data_journal_lines( request: Request, @@ -2480,7 +2512,7 @@ def get_data_journal_lines( ) -@router.get("/{instanceId}/data/contacts", response_model=PaginatedResponse[TrusteeDataContact]) +@router.get("/{instanceId}/data/contacts") @limiter.limit("30/minute") def get_data_contacts( request: Request, @@ -2501,7 +2533,7 @@ def get_data_contacts( ) -@router.get("/{instanceId}/data/account-balances", response_model=PaginatedResponse[TrusteeDataAccountBalance]) +@router.get("/{instanceId}/data/account-balances") @limiter.limit("30/minute") def get_data_account_balances( request: Request, @@ -2522,7 +2554,7 @@ def get_data_account_balances( ) -@router.get("/{instanceId}/accounting/configs", response_model=PaginatedResponse[TrusteeAccountingConfig]) +@router.get("/{instanceId}/accounting/configs") @limiter.limit("30/minute") def get_accounting_configs( request: Request, @@ -2548,7 +2580,7 @@ def get_accounting_configs( ) -@router.get("/{instanceId}/accounting/syncs", response_model=PaginatedResponse[TrusteeAccountingSync]) +@router.get("/{instanceId}/accounting/syncs") @limiter.limit("30/minute") def get_accounting_syncs( request: Request, diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 1c44d54d..96313293 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -191,7 +191,7 @@ _SOURCE_TYPE_TO_SERVICE = { } -def _buildDataSourceContext(chatService, dataSourceIds: List[str]) -> str: +def buildDataSourceContext(chatService, dataSourceIds: List[str]) -> str: """Build a description of active data sources for the agent prompt.""" parts = [ "The user has attached the following external data sources to this prompt.", @@ -229,7 +229,7 @@ def _buildDataSourceContext(chatService, dataSourceIds: List[str]) -> str: return "\n".join(parts) if found else "" -def _buildFeatureDataSourceContext(featureDataSourceIds: List[str]) -> str: +def buildFeatureDataSourceContext(featureDataSourceIds: List[str]) -> str: """Build a description of attached feature data sources for the agent prompt.""" from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource from modules.security.rbacCatalog import getCatalogService @@ -735,12 +735,12 @@ async def _runWorkspaceAgent( enrichedPrompt = prompt if dataSourceIds: - dsInfo = _buildDataSourceContext(chatService, dataSourceIds) + dsInfo = buildDataSourceContext(chatService, dataSourceIds) if dsInfo: enrichedPrompt = f"{prompt}\n\n[Active Data Sources]\n{dsInfo}" if featureDataSourceIds: - fdsInfo = _buildFeatureDataSourceContext(featureDataSourceIds) + fdsInfo = buildFeatureDataSourceContext(featureDataSourceIds) if fdsInfo: enrichedPrompt = f"{enrichedPrompt}\n\n[Attached Feature Data Sources]\n{fdsInfo}" diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 3e8bf4ea..a6ae0052 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -139,7 +139,7 @@ def _bootstrapBilling() -> None: Idempotent: only creates missing settings/accounts. """ try: - from modules.interfaces.interfaceDbBilling import _getRootInterface as getBillingRootInterface + from modules.interfaces.interfaceDbBilling import getRootInterface as getBillingRootInterface billingInterface = getBillingRootInterface() @@ -1968,11 +1968,11 @@ def initRootMandateBilling(mandateId: str) -> None: Creates mandate pool account and user audit accounts. """ try: - from modules.interfaces.interfaceDbBilling import _getRootInterface + from modules.interfaces.interfaceDbBilling import getRootInterface as getBillingRootInterface from modules.interfaces.interfaceDbApp import getRootInterface as getAppRootInterface from modules.datamodels.datamodelBilling import BillingSettings - billingInterface = _getRootInterface() + billingInterface = getBillingRootInterface() appInterface = getAppRootInterface() existingSettings = billingInterface.getSettings(mandateId) @@ -2012,7 +2012,7 @@ def _initRootMandateSubscription(mandateId: str) -> None: Called during bootstrap after billing init. """ try: - from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface + from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface from modules.datamodels.datamodelSubscription import ( MandateSubscription, SubscriptionStatusEnum, diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index c754684f..d1593473 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -15,7 +15,7 @@ from typing import Dict, Any, List, Optional, Union from passlib.context import CryptContext import uuid -from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached_connector +from modules.connectors.connectorDbPostgre import DatabaseConnector, getCachedConnector from modules.shared.configuration import APP_CONFIG from modules.shared.dbRegistry import registerDatabase from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp @@ -143,7 +143,7 @@ class AppObjects: dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) - self.db = _get_cached_connector( + self.db = getCachedConnector( dbHost=dbHost, dbDatabase=dbDatabase, dbUser=dbUser, @@ -1594,8 +1594,8 @@ class AppObjects: if not adminRoleId: raise ValueError(f"No admin role found for mandate {mandateId} — cannot assign user without role") - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot - from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbBilling import getRootInterface as _getBillingRoot from datetime import datetime, timezone, timedelta now = datetime.now(timezone.utc) @@ -1693,7 +1693,7 @@ class AppObjects: from modules.datamodels.datamodelSubscription import ( SubscriptionStatusEnum, BUILTIN_PLANS, ) - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot from datetime import datetime, timezone, timedelta activated = 0 @@ -1936,7 +1936,7 @@ class AppObjects: logger.info(f"Cascade: deleted {len(memberships)} UserMandates for mandate {mandateId}") # 3. Cancel Stripe subscriptions + delete MandateSubscription records (poweron_billing) - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot subInterface = _getSubRoot() subs = subInterface.listForMandate(mandateId) for sub in subs: @@ -1954,7 +1954,7 @@ class AppObjects: logger.info(f"Cascade: deleted {len(subs)} subscriptions for mandate {mandateId}") # 3b. Delete Billing data (poweron_billing) - from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot + from modules.interfaces.interfaceDbBilling import getRootInterface as _getBillingRoot billingDb = _getBillingRoot().db billingAccounts = billingDb.getRecordset(BillingAccount, recordFilter={"mandateId": mandateId}) for acc in billingAccounts: @@ -2202,7 +2202,7 @@ class AppObjects: Balance is always on the mandate pool (PREPAY_MANDATE). User accounts are for audit trail only. """ try: - from modules.interfaces.interfaceDbBilling import _getRootInterface as getBillingRootInterface + from modules.interfaces.interfaceDbBilling import getRootInterface as getBillingRootInterface billingInterface = getBillingRootInterface() settings = billingInterface.getSettings(mandateId) diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index a4af7b25..db1ee619 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -134,7 +134,7 @@ def getInterface(currentUser: User, mandateId: str = None) -> "BillingObjects": return _billingInterfaces[cacheKey] -def _getRootInterface() -> "BillingObjects": +def getRootInterface() -> "BillingObjects": """Get interface with system access for bootstrap operations.""" from modules.security.rootAccess import getRootUser rootUser = getRootUser() @@ -888,7 +888,7 @@ class BillingObjects: prev = self._parseSettingsDateTime(settings.get("storagePeriodStartAt")) if prev is not None and abs((prev - periodStartAt).total_seconds()) < 2: return - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot usedMB = float(_getSubRoot().getMandateDataVolumeMB(mandateId)) self.updateSettings( @@ -911,13 +911,13 @@ class BillingObjects: settings = self.getSettings(mandateId) if not settings: return None - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot - from modules.datamodels.datamodelSubscription import _getPlan + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot + from modules.datamodels.datamodelSubscription import getPlan subIface = _getSubRoot() usedMB = float(subIface.getMandateDataVolumeMB(mandateId)) sub = subIface.getOperativeForMandate(mandateId) - plan = _getPlan(sub.get("planKey", "")) if sub else None + plan = getPlan(sub.get("planKey", "")) if sub else None includedMB = plan.maxDataVolumeMB if plan and plan.maxDataVolumeMB is not None else None if includedMB is None: return None @@ -971,13 +971,13 @@ class BillingObjects: Amount = budgetAiPerUserCHF * activeUsers (dynamic, not the static plan.budgetAiCHF). Should be called once per billing period (initial activation + each invoice.paid). Returns the created CREDIT transaction or None if budget is 0.""" - from modules.datamodels.datamodelSubscription import _getPlan + from modules.datamodels.datamodelSubscription import getPlan - plan = _getPlan(planKey) + plan = getPlan(planKey) if not plan or not plan.budgetAiPerUserCHF or plan.budgetAiPerUserCHF <= 0: return None - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot subRoot = _getSubRoot() activeUsers = max(subRoot.countActiveUsers(mandateId), 1) amount = plan.budgetAiPerUserCHF * activeUsers @@ -1027,13 +1027,13 @@ class BillingObjects: delta > 0: user added -> CREDIT pro-rata portion delta < 0: user removed -> DEBIT pro-rata portion """ - from modules.datamodels.datamodelSubscription import _getPlan + from modules.datamodels.datamodelSubscription import getPlan - plan = _getPlan(planKey) + plan = getPlan(planKey) if not plan or not plan.budgetAiPerUserCHF or plan.budgetAiPerUserCHF <= 0: return None - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot subRoot = _getSubRoot() operative = subRoot.getOperativeForMandate(mandateId) if not operative: @@ -1221,7 +1221,7 @@ class BillingObjects: if not mandate or not getattr(mandate, "enabled", True): continue - mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") + mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})" settings = self.getSettings(mandateId) if not settings: @@ -1280,13 +1280,12 @@ class BillingObjects: if not userAccount: continue - # Get transactions for user's account (all transactions are on user accounts now) transactions = self.getTransactions(userAccount["id"], limit=limit) mandate = appInterface.getMandate(mandateId) - mandateName = "" + mandateName = f"NA({mandateId})" if mandate: - mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") + mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})" for t in transactions: t["mandateId"] = mandateId @@ -1333,9 +1332,9 @@ class BillingObjects: continue mandate = appInterface.getMandate(mandateId) - mandateName = "" + mandateName = f"NA({mandateId})" if mandate: - mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") + mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})" allMandateAccounts = self.db.getRecordset( BillingAccount, @@ -1387,11 +1386,10 @@ class BillingObjects: for mandateId in targetMandateIds: transactions = self.getTransactionsByMandate(mandateId, limit=limit) - # Get mandate name mandate = appInterface.getMandate(mandateId) - mandateName = "" + mandateName = f"NA({mandateId})" if mandate: - mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") + mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) or f"NA({mandateId})" for t in transactions: t["mandateId"] = mandateId @@ -1439,7 +1437,6 @@ class BillingObjects: for s in allSettings: settingsMap[s.get("mandateId")] = s - # Get user info efficiently userIds = list(set(acc.get("userId") for acc in allAccounts if acc.get("userId"))) userMap = {} for userId in userIds: @@ -1447,16 +1444,15 @@ class BillingObjects: if user: displayName = getattr(user, 'displayName', None) or (user.get("displayName") if isinstance(user, dict) else None) username = getattr(user, 'username', None) or (user.get("username") if isinstance(user, dict) else None) - userMap[userId] = displayName or username or userId + userMap[userId] = displayName or username or f"NA({userId})" - # Get mandate info efficiently mandateMap = {} mandateIdList = list(set(acc.get("mandateId") for acc in allAccounts if acc.get("mandateId"))) for mandateId in mandateIdList: mandate = appInterface.getMandate(mandateId) if mandate: - mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") - mandateMap[mandateId] = mandateName + mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) + mandateMap[mandateId] = mandateName or f"NA({mandateId})" for account in allAccounts: mandateId = account.get("mandateId") @@ -1475,9 +1471,9 @@ class BillingObjects: balances.append({ "accountId": account.get("id"), "mandateId": mandateId, - "mandateName": mandateMap.get(mandateId, ""), + "mandateName": mandateMap.get(mandateId) or (f"NA({mandateId})" if mandateId else None), "userId": userId, - "userName": userMap.get(userId, userId), + "userName": userMap.get(userId) or (f"NA({userId})" if userId else None), "balance": balance, "warningThreshold": warningThreshold, "isWarning": balance <= warningThreshold, @@ -1596,14 +1592,14 @@ class BillingObjects: if pageUserIds: users = appInterface.getUsersByIds(list(pageUserIds)) for uid, u in users.items(): - dn = getattr(u, "displayName", None) or getattr(u, "username", None) or uid + dn = getattr(u, "displayName", None) or getattr(u, "username", None) or f"NA({uid})" userMap[uid] = dn mandateMap: Dict[str, str] = {} if pageMandateIds: mandates = appInterface.getMandatesByIds(list(pageMandateIds)) for mid, m in mandates.items(): - mandateMap[mid] = getattr(m, "label", None) or getattr(m, "name", None) or mid + mandateMap[mid] = getattr(m, "label", None) or getattr(m, "name", None) or f"NA({mid})" enriched = [] for t in pageItems: @@ -1613,9 +1609,9 @@ class BillingObjects: mid = acc.get("mandateId") txUserId = row.get("createdByUserId") or acc.get("userId") row["mandateId"] = mid - row["mandateName"] = mandateMap.get(mid, "") + row["mandateName"] = mandateMap.get(mid) or (f"NA({mid})" if mid else None) row["userId"] = txUserId - row["userName"] = userMap.get(txUserId, txUserId) if txUserId else None + row["userName"] = userMap.get(txUserId) or (f"NA({txUserId})" if txUserId else None) enriched.append(row) return PaginatedResult(items=enriched, totalItems=totalItems, totalPages=totalPages) @@ -1639,12 +1635,12 @@ class BillingObjects: first, then builds a single SQL query with OR-combined conditions. """ import math - from modules.connectors.connectorDbPostgre import _get_model_fields, _parseRecordFields + from modules.connectors.connectorDbPostgre import getModelFields, parseRecordFields from modules.datamodels.datamodelUam import UserInDB from modules.interfaces.interfaceDbApp import getInterface as getAppInterface table = BillingTransaction.__name__ - fields = _get_model_fields(BillingTransaction) + fields = getModelFields(BillingTransaction) pattern = f"%{searchTerm}%" # Resolve matching user / mandate IDs via the app DB (which is separate @@ -1785,7 +1781,7 @@ class BillingObjects: records = [dict(row) for row in cur.fetchall()] for rec in records: - _parseRecordFields(rec, fields, f"search table {table}") + parseRecordFields(rec, fields, f"search table {table}") totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 return {"items": records, "totalItems": totalItems, "totalPages": totalPages} @@ -2023,7 +2019,7 @@ class BillingObjects: appInterface = getAppInterface(self.currentUser) mandates = appInterface.getMandatesByIds(mandateIds) return sorted( - {getattr(m, "label", None) or getattr(m, "name", "") or mid for mid, m in mandates.items()}, + {getattr(m, "label", None) or getattr(m, "name", None) or f"NA({mid})" for mid, m in mandates.items()}, key=lambda v: v.lower(), ) @@ -2035,7 +2031,7 @@ class BillingObjects: appInterface = getAppInterface(self.currentUser) users = appInterface.getUsersByIds(values) return sorted( - {getattr(u, "displayName", None) or getattr(u, "username", None) or uid for uid, u in users.items()}, + {getattr(u, "displayName", None) or getattr(u, "username", None) or f"NA({uid})" for uid, u in users.items()}, key=lambda v: v.lower(), ) @@ -2075,7 +2071,6 @@ class BillingObjects: "userId": acc.get("userId") } - # Get user info efficiently userIds = list(set(acc.get("userId") for acc in allAccounts if acc.get("userId"))) userMap = {} for userId in userIds: @@ -2083,16 +2078,15 @@ class BillingObjects: if user: displayName = getattr(user, 'displayName', None) or (user.get("displayName") if isinstance(user, dict) else None) username = getattr(user, 'username', None) or (user.get("username") if isinstance(user, dict) else None) - userMap[userId] = displayName or username or userId + userMap[userId] = displayName or username or f"NA({userId})" - # Get mandate info efficiently mandateMap = {} mandateIdList = list(set(acc.get("mandateId") for acc in allAccounts if acc.get("mandateId"))) for mandateId in mandateIdList: mandate = appInterface.getMandate(mandateId) if mandate: - mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") - mandateMap[mandateId] = mandateName + mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", None) if isinstance(mandate, dict) else None) + mandateMap[mandateId] = mandateName or f"NA({mandateId})" # Get transactions for all accounts and collect createdByUserIds rawTransactions = [] @@ -2123,18 +2117,16 @@ class BillingObjects: if user: displayName = getattr(user, 'displayName', None) or (user.get("displayName") if isinstance(user, dict) else None) username = getattr(user, 'username', None) or (user.get("username") if isinstance(user, dict) else None) - userMap[uid] = displayName or username or uid + userMap[uid] = displayName or username or f"NA({uid})" - # Enrich transactions for t in rawTransactions: mandateId = t.pop("_accountMandateId", None) accountUserId = t.pop("_accountUserId", None) t["mandateId"] = mandateId - t["mandateName"] = mandateMap.get(mandateId, "") - # Prefer createdByUserId (per-transaction) over account-derived userId + t["mandateName"] = mandateMap.get(mandateId) or (f"NA({mandateId})" if mandateId else None) txUserId = t.get("createdByUserId") or accountUserId t["userId"] = txUserId - t["userName"] = userMap.get(txUserId, txUserId) if txUserId else None + t["userName"] = userMap.get(txUserId) or (f"NA({txUserId})" if txUserId else None) allTransactions.append(t) except Exception as e: diff --git a/modules/interfaces/interfaceDbChat.py b/modules/interfaces/interfaceDbChat.py index be097263..1b7ec59a 100644 --- a/modules/interfaces/interfaceDbChat.py +++ b/modules/interfaces/interfaceDbChat.py @@ -62,13 +62,13 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI try: import os from datetime import datetime, UTC - from modules.shared.debugLogger import _getBaseDebugDir, _ensureDir + from modules.shared.debugLogger import getBaseDebugDir, ensureDir from modules.interfaces.interfaceDbManagement import getInterface # Create base debug directory (use base debug dir, not prompts subdirectory) - baseDebugDir = _getBaseDebugDir() + baseDebugDir = getBaseDebugDir() debug_root = os.path.join(baseDebugDir, 'messages') - _ensureDir(debug_root) + ensureDir(debug_root) # Generate timestamp timestamp = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3] @@ -133,7 +133,7 @@ def storeDebugMessageAndDocuments(message, currentUser, mandateId=None, featureI safe_label = "default" label_folder = os.path.join(message_path, safe_label) - _ensureDir(label_folder) + ensureDir(label_folder) # Store each document for i, doc in enumerate(docs): diff --git a/modules/interfaces/interfaceDbKnowledge.py b/modules/interfaces/interfaceDbKnowledge.py index a12ac048..f819615e 100644 --- a/modules/interfaces/interfaceDbKnowledge.py +++ b/modules/interfaces/interfaceDbKnowledge.py @@ -11,7 +11,7 @@ from collections import defaultdict from datetime import datetime, timezone, timedelta from typing import Dict, Any, List, Optional -from modules.connectors.connectorDbPostgre import _get_cached_connector +from modules.connectors.connectorDbPostgre import getCachedConnector from modules.shared.dbRegistry import registerDatabase from modules.datamodels.datamodelKnowledge import FileContentIndex, ContentChunk, RoundMemory, WorkflowMemory from modules.datamodels.datamodelUam import User @@ -43,7 +43,7 @@ class KnowledgeObjects: dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) - self.db = _get_cached_connector( + self.db = getCachedConnector( dbHost=dbHost, dbDatabase=dbDatabase, dbUser=dbUser, @@ -103,9 +103,9 @@ class KnowledgeObjects: ok = self.db.recordDelete(FileContentIndex, fileId) if ok and mandateId: try: - from modules.interfaces.interfaceDbBilling import _getRootInterface + from modules.interfaces.interfaceDbBilling import getRootInterface - _getRootInterface().reconcileMandateStorageBilling(str(mandateId)) + getRootInterface().reconcileMandateStorageBilling(str(mandateId)) except Exception as ex: logger.warning("reconcileMandateStorageBilling after delete failed: %s", ex) return ok diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py index cca98ffa..e6cee0b8 100644 --- a/modules/interfaces/interfaceDbManagement.py +++ b/modules/interfaces/interfaceDbManagement.py @@ -13,7 +13,7 @@ import math import mimetypes from typing import Dict, Any, List, Optional, Union -from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached_connector +from modules.connectors.connectorDbPostgre import DatabaseConnector, getCachedConnector from modules.shared.dbRegistry import registerDatabase from modules.interfaces.interfaceRbac import getRecordsetWithRBAC, getRecordsetPaginatedWithRBAC from modules.security.rbac import RbacClass @@ -136,7 +136,7 @@ class ComponentObjects: dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) - self.db = _get_cached_connector( + self.db = getCachedConnector( dbHost=dbHost, dbDatabase=dbDatabase, dbUser=dbUser, @@ -992,8 +992,11 @@ class ComponentObjects: if file.get("neutralize") is None: file["neutralize"] = False + labelCols = {k: v for k, v in file.items() if k.endswith("Label")} fileItem = FileItem(**file) - fileItems.append(fileItem) + itemDict = fileItem.model_dump() + itemDict.update(labelCols) + fileItems.append(itemDict) except Exception as e: logger.warning(f"Skipping invalid file record: {str(e)}") continue @@ -1347,8 +1350,8 @@ class ComponentObjects: folderIds = [f["id"] for f in folders if f.get("id")] fileCounts: Dict[str, int] = {} try: - from modules.interfaces.interfaceRbac import _buildFilesScopeWhereClause - scopeClause = _buildFilesScopeWhereClause( + from modules.interfaces.interfaceRbac import buildFilesScopeWhereClause + scopeClause = buildFilesScopeWhereClause( self.currentUser, "FileItem", self.db, self.mandateId, self.featureInstanceId, [], [], diff --git a/modules/interfaces/interfaceDbSubscription.py b/modules/interfaces/interfaceDbSubscription.py index 05d83a58..a09fe93f 100644 --- a/modules/interfaces/interfaceDbSubscription.py +++ b/modules/interfaces/interfaceDbSubscription.py @@ -25,7 +25,7 @@ from modules.datamodels.datamodelSubscription import ( TERMINAL_STATUSES, OPERATIVE_STATUSES, BUILTIN_PLANS, - _getPlan, + getPlan as getPlanFromCatalog, _getSelectablePlans, ) @@ -55,7 +55,7 @@ def getInterface(currentUser: User, mandateId: str = None) -> "SubscriptionObjec return _subscriptionInterfaces[cacheKey] -def _getRootInterface() -> "SubscriptionObjects": +def getRootInterface() -> "SubscriptionObjects": from modules.security.rootAccess import getRootUser return SubscriptionObjects(getRootUser(), mandateId=None) @@ -96,7 +96,7 @@ class SubscriptionObjects: # ========================================================================= def getPlan(self, planKey: str) -> Optional[SubscriptionPlan]: - return _getPlan(planKey) + return getPlanFromCatalog(planKey) def getSelectablePlans(self) -> List[SubscriptionPlan]: return _getSelectablePlans() diff --git a/modules/interfaces/interfaceRbac.py b/modules/interfaces/interfaceRbac.py index 14953ef1..13bdfcba 100644 --- a/modules/interfaces/interfaceRbac.py +++ b/modules/interfaces/interfaceRbac.py @@ -247,8 +247,8 @@ def getRecordsetWithRBAC( # Handle JSONB fields and ensure numeric types are correct # Import the helper function from connector module - from modules.connectors.connectorDbPostgre import _get_model_fields - fields = _get_model_fields(modelClass) + from modules.connectors.connectorDbPostgre import getModelFields + fields = getModelFields(modelClass) for record in records: for fieldName, fieldType in fields.items(): # Ensure numeric fields are properly typed @@ -379,8 +379,8 @@ def getRecordsetPaginatedWithRBAC( whereValues.append(value) if pagination and pagination.filters: - from modules.connectors.connectorDbPostgre import _get_model_fields - fields = _get_model_fields(modelClass) + from modules.connectors.connectorDbPostgre import getModelFields + fields = getModelFields(modelClass) validColumns = set(fields.keys()) for key, val in pagination.filters.items(): if key == "search" and isinstance(val, str) and val.strip(): @@ -440,8 +440,8 @@ def getRecordsetPaginatedWithRBAC( orderParts: List[str] = [] if pagination and pagination.sort: - from modules.connectors.connectorDbPostgre import _get_model_fields - validColumns = set(_get_model_fields(modelClass).keys()) + from modules.connectors.connectorDbPostgre import getModelFields + validColumns = set(getModelFields(modelClass).keys()) for sf in pagination.sort: if sf.field in validColumns: direction = "DESC" if sf.direction.lower() == "desc" else "ASC" @@ -464,10 +464,10 @@ def getRecordsetPaginatedWithRBAC( cursor.execute(dataSql, whereValues) records = [dict(row) for row in cursor.fetchall()] - from modules.connectors.connectorDbPostgre import _get_model_fields, _parseRecordFields - fields = _get_model_fields(modelClass) + from modules.connectors.connectorDbPostgre import getModelFields, parseRecordFields + fields = getModelFields(modelClass) for record in records: - _parseRecordFields(record, fields, f"table {table}") + parseRecordFields(record, fields, f"table {table}") for fieldName, fieldType in fields.items(): if fieldType == "JSONB" and fieldName in record and record[fieldName] is None: modelFields = modelClass.model_fields @@ -484,12 +484,15 @@ def getRecordsetPaginatedWithRBAC( if enrichPermissions: records = _enrichRecordsWithPermissions(records, permissions, currentUser) - if pagination: - pageSize = pagination.pageSize - totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 - return PaginatedResult(items=records, totalItems=totalItems, totalPages=totalPages) + from modules.routes.routeHelpers import enrichRowsWithFkLabels + enrichRowsWithFkLabels(records, modelClass) - return records + if pagination: + pageSize = pagination.pageSize + totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 + return PaginatedResult(items=records, totalItems=totalItems, totalPages=totalPages) + + return records except Exception as e: logger.error(f"Error in getRecordsetPaginatedWithRBAC for table {table}: {e}") return PaginatedResult(items=[], totalItems=0, totalPages=0) if pagination else [] @@ -518,8 +521,8 @@ def getDistinctColumnValuesWithRBAC( if not connector._ensureTableExists(modelClass): return [] - from modules.connectors.connectorDbPostgre import _get_model_fields - fields = _get_model_fields(modelClass) + from modules.connectors.connectorDbPostgre import getModelFields + fields = getModelFields(modelClass) if column not in fields: return [] @@ -614,21 +617,34 @@ def getDistinctColumnValuesWithRBAC( whereClause = " WHERE " + " AND ".join(whereConditions) if whereConditions else "" notNullCond = f'"{column}" IS NOT NULL AND "{column}"::TEXT != \'\'' if whereClause: - whereClause += f" AND {notNullCond}" + nonNullWhere = whereClause + f" AND {notNullCond}" else: - whereClause = f" WHERE {notNullCond}" + nonNullWhere = f" WHERE {notNullCond}" - sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{whereClause} ORDER BY val' + sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{nonNullWhere} ORDER BY val' with connector.connection.cursor() as cursor: cursor.execute(sql, whereValues) - return [row["val"] for row in cursor.fetchall()] + result = [row["val"] for row in cursor.fetchall()] + + # Include a None entry when NULL/empty rows exist (enables "(Leer)" filter) + emptyCond = f'("{column}" IS NULL OR "{column}"::TEXT = \'\')' + if whereClause: + emptySql = f'SELECT 1 FROM "{table}"{whereClause} AND {emptyCond} LIMIT 1' + else: + emptySql = f'SELECT 1 FROM "{table}" WHERE {emptyCond} LIMIT 1' + with connector.connection.cursor() as cursor: + cursor.execute(emptySql, whereValues) + if cursor.fetchone(): + result.append(None) + + return result except Exception as e: logger.error(f"Error in getDistinctColumnValuesWithRBAC for {table}.{column}: {e}") return [] -def _buildFilesScopeWhereClause( +def buildFilesScopeWhereClause( currentUser: User, table: str, connector, @@ -673,7 +689,7 @@ def _buildFilesScopeWhereClause( if instances: effectiveMandateId = instances[0].get("mandateId") or "" except Exception as e: - logger.warning(f"_buildFilesScopeWhereClause: could not resolve mandate for instance {featureInstanceId}: {e}") + logger.warning(f"buildFilesScopeWhereClause: could not resolve mandate for instance {featureInstanceId}: {e}") scopeParts: List[str] = [] scopeValues: List = [] @@ -757,7 +773,7 @@ def buildRbacWhereClause( namespaceAll = TABLE_NAMESPACE.get(table, "system") # Files: scope-based context filtering applies even with ALL access if namespaceAll == "files": - return _buildFilesScopeWhereClause( + return buildFilesScopeWhereClause( currentUser, table, connector, mandateId, featureInstanceId, baseConditions, baseValues, ) @@ -811,7 +827,7 @@ def buildRbacWhereClause( # - scope='featureInstance' → visible to users with access to that instance # - scope='personal' → only visible to owner (sysCreatedBy) if namespace == "files": - return _buildFilesScopeWhereClause( + return buildFilesScopeWhereClause( currentUser, table, connector, mandateId, featureInstanceId, baseConditions, baseValues, ) diff --git a/modules/routes/routeAdminDemoConfig.py b/modules/routes/routeAdminDemoConfig.py index d893c205..db37e775 100644 --- a/modules/routes/routeAdminDemoConfig.py +++ b/modules/routes/routeAdminDemoConfig.py @@ -28,9 +28,9 @@ def listDemoConfigs( currentUser: User = Depends(requirePlatformAdmin), ) -> dict: """List all available demo configurations.""" - from modules.demoConfigs import _getAvailableDemoConfigs + from modules.demoConfigs import getAvailableDemoConfigs - configs = _getAvailableDemoConfigs() + configs = getAvailableDemoConfigs() return { "configs": [cfg.toDict() for cfg in configs.values()], } @@ -44,9 +44,9 @@ def loadDemoConfig( currentUser: User = Depends(requirePlatformAdmin), ) -> dict: """Load (create) a demo configuration. Idempotent.""" - from modules.demoConfigs import _getDemoConfigByCode + from modules.demoConfigs import getDemoConfigByCode - config = _getDemoConfigByCode(code) + config = getDemoConfigByCode(code) if not config: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, @@ -69,9 +69,9 @@ def removeDemoConfig( currentUser: User = Depends(requirePlatformAdmin), ) -> dict: """Remove all data created by a demo configuration.""" - from modules.demoConfigs import _getDemoConfigByCode + from modules.demoConfigs import getDemoConfigByCode - config = _getDemoConfigByCode(code) + config = getDemoConfigByCode(code) if not config: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, diff --git a/modules/routes/routeAdminFeatures.py b/modules/routes/routeAdminFeatures.py index 66682464..9634dd0d 100644 --- a/modules/routes/routeAdminFeatures.py +++ b/modules/routes/routeAdminFeatures.py @@ -18,7 +18,7 @@ import json import math from pydantic import BaseModel, Field from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict -from modules.routes.routeHelpers import _applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory +from modules.routes.routeHelpers import applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory from modules.auth import limiter, getRequestContext, RequestContext, requirePlatformAdmin from modules.datamodels.datamodelUam import User, UserInDB @@ -481,7 +481,7 @@ def list_feature_instances( return handleIdsInMemory(items, pagination) if paginationParams: - filtered = _applyFiltersAndSort(items, paginationParams) + filtered = applyFiltersAndSort(items, paginationParams) totalItems = len(filtered) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 startIdx = (paginationParams.page - 1) * paginationParams.pageSize @@ -1019,7 +1019,7 @@ def list_template_roles( if mode == "ids": return handleIdsInMemory(enriched, pagination) - filtered = _applyFiltersAndSort(enriched, paginationParams) + filtered = applyFiltersAndSort(enriched, paginationParams) if paginationParams: totalItems = len(filtered) @@ -1223,7 +1223,7 @@ def list_feature_instance_users( raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") if paginationParams: - filtered = _applyFiltersAndSort(items, paginationParams) + filtered = applyFiltersAndSort(items, paginationParams) totalItems = len(filtered) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 startIdx = (paginationParams.page - 1) * paginationParams.pageSize diff --git a/modules/routes/routeAudit.py b/modules/routes/routeAudit.py index 3634ff9d..0e686297 100644 --- a/modules/routes/routeAudit.py +++ b/modules/routes/routeAudit.py @@ -69,14 +69,24 @@ def _applySortFilterSearch( return items -def _distinctColumnValues(items: List[Dict[str, Any]], column: str) -> List[str]: - """Extract sorted distinct non-empty string values for a column.""" +def _distinctColumnValues(items: List[Dict[str, Any]], column: str) -> List[Optional[str]]: + """Extract sorted distinct values for a column. + + Includes ``None`` as the last entry when at least one row has a null/empty + value — this enables the "(Leer)" filter option in the frontend. + """ vals = set() + hasEmpty = False for r in items: v = r.get(column) - if v is not None and v != "": - vals.add(str(v)) - return sorted(vals) + if v is None or v == "": + hasEmpty = True + continue + vals.add(str(v)) + result: List[Optional[str]] = sorted(vals) + if hasEmpty: + result.append(None) + return result def _enrichUserAndInstanceLabels( @@ -87,46 +97,32 @@ def _enrichUserAndInstanceLabels( instanceKey: str = "featureInstanceId", instanceLabelKey: str = "instanceLabel", ) -> None: - """Resolve userId → username and featureInstanceId → label in-place.""" - userIds = set() - instanceIds = set() - for r in items: - uid = r.get(userKey) - if uid and not r.get(usernameKey): - userIds.add(uid) - iid = r.get(instanceKey) - if iid: - instanceIds.add(iid) + """Resolve userId -> username and featureInstanceId -> label in-place. - userMap: Dict[str, str] = {} - instanceMap: Dict[str, str] = {} + Uses the central resolvers from routeHelpers. Returns None (not the raw ID) + for unresolvable entries so the frontend can distinguish "resolved" from + "missing". + """ + from modules.routes.routeHelpers import resolveUserLabels, resolveInstanceLabels - try: - from modules.interfaces.interfaceDbApp import getInterface - appIf = getInterface( - context.user, - mandateId=str(context.mandateId) if context.mandateId else None, - ) - if userIds: - users = appIf.getUsersByIds(list(userIds)) - for uid, u in users.items(): - name = getattr(u, "displayName", None) or getattr(u, "email", None) or uid - userMap[uid] = name - if instanceIds: - for iid in instanceIds: - fi = appIf.getFeatureInstance(iid) - if fi: - instanceMap[iid] = getattr(fi, "label", None) or getattr(fi, "featureCode", None) or iid - except Exception as e: - logger.debug("_enrichUserAndInstanceLabels: %s", e) + userIds = list({r.get(userKey) for r in items if r.get(userKey) and not r.get(usernameKey)}) + instanceIds = list({r.get(instanceKey) for r in items if r.get(instanceKey)}) + + userMap: Dict[str, Optional[str]] = {} + instanceMap: Dict[str, Optional[str]] = {} + + if userIds: + userMap = resolveUserLabels(userIds) + if instanceIds: + instanceMap = resolveInstanceLabels(instanceIds) for r in items: uid = r.get(userKey) if uid and not r.get(usernameKey) and uid in userMap: r[usernameKey] = userMap[uid] iid = r.get(instanceKey) - if iid and iid in instanceMap: - r[instanceLabelKey] = instanceMap[iid] + if iid: + r[instanceLabelKey] = instanceMap.get(iid) def _requireAuditAccess(context: RequestContext): diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 382b709a..e3d26352 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -20,7 +20,7 @@ from pydantic import BaseModel, Field from modules.auth import limiter, requirePlatformAdmin, getRequestContext, RequestContext # Import billing components -from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface, _getRootInterface +from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface, getRootInterface from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService import json import math @@ -140,44 +140,46 @@ def _getBillingDataScope(user) -> BillingDataScope: def _isAdminOfMandate(ctx: RequestContext, targetMandateId: str) -> bool: - """Check if user is PlatformAdmin or admin of the specified mandate.""" + """Check if user is PlatformAdmin or admin of the specified mandate. + + Fail-loud: any DB/lookup error is logged at ERROR and re-raised. We never + silently return False — that would mask infrastructure outages as "no + permission" and produce confusing 403s instead of actionable 500s. + """ if ctx.isPlatformAdmin: return True - try: - from modules.interfaces.interfaceDbApp import getRootInterface - rootInterface = getRootInterface() - userMandates = rootInterface.getUserMandates(str(ctx.user.id)) - for um in userMandates: - if str(getattr(um, 'mandateId', None)) != str(targetMandateId): - continue - if not getattr(um, 'enabled', True): - continue - umId = str(getattr(um, 'id', '')) - roleIds = rootInterface.getRoleIdsForUserMandate(umId) - for roleId in roleIds: - role = rootInterface.getRole(roleId) - if role and role.roleLabel == "admin" and not role.featureInstanceId: - return True - return False - except Exception: - return False + from modules.interfaces.interfaceDbApp import getRootInterface + rootInterface = getRootInterface() + userMandates = rootInterface.getUserMandates(str(ctx.user.id)) + for um in userMandates: + if str(getattr(um, 'mandateId', None)) != str(targetMandateId): + continue + if not getattr(um, 'enabled', True): + continue + umId = str(getattr(um, 'id', '')) + roleIds = rootInterface.getRoleIdsForUserMandate(umId) + for roleId in roleIds: + role = rootInterface.getRole(roleId) + if role and role.roleLabel == "admin" and not role.featureInstanceId: + return True + return False def _isMemberOfMandate(ctx: RequestContext, targetMandateId: str) -> bool: - """Check if user has any enabled membership in the specified mandate.""" - try: - from modules.interfaces.interfaceDbApp import getRootInterface - rootInterface = getRootInterface() - userMandates = rootInterface.getUserMandates(str(ctx.user.id)) - for um in userMandates: - if str(getattr(um, 'mandateId', None)) != str(targetMandateId): - continue - if not getattr(um, 'enabled', True): - continue - return True - return False - except Exception: - return False + """Check if user has any enabled membership in the specified mandate. + + Fail-loud: see _isAdminOfMandate above for the same rationale. + """ + from modules.interfaces.interfaceDbApp import getRootInterface + rootInterface = getRootInterface() + userMandates = rootInterface.getUserMandates(str(ctx.user.id)) + for um in userMandates: + if str(getattr(um, 'mandateId', None)) != str(targetMandateId): + continue + if not getattr(um, 'enabled', True): + continue + return True + return False @@ -887,7 +889,7 @@ def confirmCheckoutSession( if not _isAdminOfMandate(ctx, mandate_id): raise HTTPException(status_code=403, detail=routeApiMsg("Mandate admin role required")) - root_billing_interface = _getRootInterface() + root_billing_interface = getRootInterface() return _creditStripeSessionIfNeeded(root_billing_interface, session_dict, eventId=None) except HTTPException: raise @@ -957,10 +959,10 @@ async def stripeWebhook( sessionMode = session.get("mode") if hasattr(session, "get") else getattr(session, "mode", None) if sessionMode == "subscription": - _handleSubscriptionCheckoutCompleted(session, event_id) + handleSubscriptionCheckoutCompleted(session, event_id) return {"received": True} - billingInterface = _getRootInterface() + billingInterface = getRootInterface() if billingInterface.getStripeWebhookEventByEventId(event_id): logger.info(f"Stripe event {event_id} already processed, skipping") return {"received": True} @@ -997,11 +999,11 @@ async def stripeWebhook( return {"received": True} -def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: +def handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: """Handle checkout.session.completed for mode=subscription. Resolves the local PENDING record by ID from webhook metadata and transitions it.""" - from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface - from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, _getPlan + from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface + from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, getPlan from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import ( getService as getSubscriptionService, _notifySubscriptionChange, @@ -1033,8 +1035,16 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: mandateId = metadata.get("mandateId") planKey = metadata.get("planKey", "") platformUrl = platformUrl or metadata.get("platformUrl", "") - except Exception: - pass + except Exception as e: + # Stripe lookup is the only way to recover the metadata at this + # point — if it fails we MUST surface it, otherwise the webhook + # later short-circuits with "missing metadata" and the user + # silently gets stuck in PENDING. + logger.error( + "Stripe Subscription.retrieve(%s) failed during checkout " + "metadata recovery: %s", stripeSub, e, + ) + raise stripeSubId = session.get("subscription") @@ -1083,7 +1093,17 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: elif priceMapping and priceId == priceMapping.stripePriceIdInstances: stripeData["stripeItemIdInstances"] = item["id"] except Exception as e: - logger.error("Error retrieving Stripe subscription %s: %s", stripeSubId, e) + # Without these enrichment fields the activation completes anyway + # (status flips to ACTIVE/SCHEDULED below), but periods + Stripe + # item-IDs are missing on the local record, which breaks later + # add-on billing and renewal accounting. Re-raise so the webhook + # is retried by Stripe instead of silently shipping a broken row. + logger.error( + "Error retrieving Stripe subscription %s during checkout " + "completion (will be retried by Stripe): %s", + stripeSubId, e, + ) + raise if stripeData: subInterface.updateFields(subscriptionRecordId, stripeData) @@ -1136,12 +1156,12 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: subService.invalidateCache(mandateId) if toStatus == SubscriptionStatusEnum.ACTIVE: - plan = _getPlan(planKey) + plan = getPlan(planKey) updatedSub = subInterface.getById(subscriptionRecordId) _notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=updatedSub, platformUrl=platformUrl) try: - billingIf = _getRootInterface() + billingIf = getRootInterface() billingIf.creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung") except Exception as ex: logger.error("creditSubscriptionBudget on activation failed: %s", ex) @@ -1155,8 +1175,8 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: def _handleSubscriptionWebhook(event) -> None: """Process Stripe subscription webhook events. All record resolution is by stripeSubscriptionId — no mandate-based guessing.""" - from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface - from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, _getPlan + from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface + from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, getPlan from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import ( getService as getSubscriptionService, _notifySubscriptionChange, @@ -1205,11 +1225,11 @@ def _handleSubscriptionWebhook(event) -> None: subInterface.transitionStatus(subId, SubscriptionStatusEnum.SCHEDULED, SubscriptionStatusEnum.ACTIVE) subService.invalidateCache(mandateId) planKey = sub.get("planKey", "") - plan = _getPlan(planKey) + plan = getPlan(planKey) refreshedSub = subInterface.getById(subId) _notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=refreshedSub, platformUrl=webhookPlatformUrl) try: - _getRootInterface().creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung") + getRootInterface().creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung") except Exception as ex: logger.error("creditSubscriptionBudget SCHEDULED->ACTIVE failed: %s", ex) logger.info("SCHEDULED -> ACTIVE for sub %s (mandate %s)", subId, mandateId) @@ -1245,7 +1265,7 @@ def _handleSubscriptionWebhook(event) -> None: scheduled["id"], SubscriptionStatusEnum.SCHEDULED, SubscriptionStatusEnum.ACTIVE, ) subService.invalidateCache(mandateId) - plan = _getPlan(scheduled.get("planKey", "")) + plan = getPlan(scheduled.get("planKey", "")) refreshedScheduled = subInterface.getById(scheduled["id"]) _notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=refreshedScheduled, platformUrl=webhookPlatformUrl) logger.info("Promoted SCHEDULED sub %s -> ACTIVE (mandate %s)", scheduled["id"], mandateId) @@ -1256,7 +1276,7 @@ def _handleSubscriptionWebhook(event) -> None: if currentStatus == SubscriptionStatusEnum.ACTIVE: subInterface.transitionStatus(subId, SubscriptionStatusEnum.ACTIVE, SubscriptionStatusEnum.PAST_DUE) subService.invalidateCache(mandateId) - plan = _getPlan(sub.get("planKey", "")) + plan = getPlan(sub.get("planKey", "")) _notifySubscriptionChange(mandateId, "payment_failed", plan, subscriptionRecord=sub, platformUrl=webhookPlatformUrl) logger.info("Payment failed for sub %s (mandate %s)", subId, mandateId) @@ -1283,7 +1303,7 @@ def _handleSubscriptionWebhook(event) -> None: period_start_at = datetime.fromtimestamp(int(period_ts), tz=timezone.utc) periodLabel = period_start_at.strftime("%Y-%m-%d") try: - billing_if = _getRootInterface() + billing_if = getRootInterface() billing_if.resetStorageBillingPeriod(mandateId, period_start_at) billing_if.reconcileMandateStorageBilling(mandateId) except Exception as ex: @@ -1291,7 +1311,7 @@ def _handleSubscriptionWebhook(event) -> None: planKey = sub.get("planKey", "") try: - billing_if = _getRootInterface() + billing_if = getRootInterface() billing_if.creditSubscriptionBudget(mandateId, planKey, periodLabel=periodLabel or "Periodenverlängerung") except Exception as ex: logger.error("creditSubscriptionBudget on invoice.paid failed: %s", ex) @@ -1408,28 +1428,21 @@ def getUsersForMandate( def _attachCreatedByUserNamesToTransactionRows(rows: List[Dict[str, Any]]) -> None: - """Resolve createdByUserId to userName using root app interface (sysadmin transaction views).""" - try: - from modules.interfaces.interfaceDbApp import getRootInterface + """Resolve createdByUserId to userName using central FK resolvers. - appRoot = getRootInterface() - userNames: Dict[str, str] = {} - for row in rows: - uid = row.get("createdByUserId") - if not uid: - row["userName"] = "" - continue - if uid not in userNames: - try: - u = appRoot.getUser(uid) - userNames[uid] = u.username if u else uid[:8] - except Exception: - userNames[uid] = uid[:8] - row["userName"] = userNames.get(uid, "") - except Exception: - for row in rows: - uid = row.get("createdByUserId") - row["userName"] = uid[:8] if uid else "" + Returns None (not a truncated UUID) for unresolvable IDs so the frontend + renders an explicit NA() indicator instead of a misleading 8-char snippet. + """ + from modules.routes.routeHelpers import resolveUserLabels + + userIds = list({r.get("createdByUserId") for r in rows if r.get("createdByUserId")}) + userMap: Dict[str, Optional[str]] = {} + if userIds: + userMap = resolveUserLabels(userIds) + + for row in rows: + uid = row.get("createdByUserId") + row["userName"] = userMap.get(uid) if uid else None def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]: @@ -1717,18 +1730,13 @@ def getUserViewStatistics( for acc in allAccounts: accountToMandate[acc.get("id", "")] = acc.get("mandateId", "") - from modules.interfaces.interfaceDbApp import getInterface as getAppInterface - mandateIdsForLookup = list(set(accountToMandate.values())) - mandateMap: Dict[str, str] = {} - if mandateIdsForLookup: - rootIface = getAppInterface(ctx.user) - mandatesById = rootIface.getMandatesByIds(mandateIdsForLookup) - for mid, m in mandatesById.items(): - mandateMap[mid] = getattr(m, "name", mid) or mid + from modules.routes.routeHelpers import resolveMandateLabels + mandateIdsForLookup = list({v for v in accountToMandate.values() if v}) + mandateMap: Dict[str, Optional[str]] = resolveMandateLabels(mandateIdsForLookup) if mandateIdsForLookup else {} def _mandateName(accountId: str) -> str: mid = accountToMandate.get(accountId, "") - return mandateMap.get(mid, mid or "unknown") + return mandateMap.get(mid) or f"NA({mid})" if mid else "unknown" costByMandate: Dict[str, float] = {} for accId, total in agg.get("costByAccountId", {}).items(): diff --git a/modules/routes/routeDataConnections.py b/modules/routes/routeDataConnections.py index 290be722..05c8aa9d 100644 --- a/modules/routes/routeDataConnections.py +++ b/modules/routes/routeDataConnections.py @@ -127,7 +127,7 @@ def get_auth_authority_options( # CRUD ENDPOINTS # ============================================================================ -@router.get("/", response_model=PaginatedResponse[UserConnection]) +@router.get("/") @limiter.limit("30/minute") async def get_connections( request: Request, @@ -135,7 +135,7 @@ async def get_connections( mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"), column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"), currentUser: User = Depends(getCurrentUser) -) -> PaginatedResponse[UserConnection]: +): """Get connections for the current user with optional pagination, sorting, and filtering. SECURITY: This endpoint is secure - users can only see their own connections. @@ -151,7 +151,7 @@ async def get_connections( - GET /api/connections/?mode=filterValues&column=status - GET /api/connections/?mode=ids """ - from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory + from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels def _buildEnhancedItems(): interface = getInterface(currentUser) @@ -252,27 +252,13 @@ async def get_connections( } enhanced_connections_dict.append(connection_dict) - # If no pagination requested, return all items + enrichRowsWithFkLabels(enhanced_connections_dict, UserConnection) + if paginationParams is None: - # Convert back to UserConnection objects (enum strings are already in dict) - items = [] - for conn_dict in enhanced_connections_dict: - conn_dict_copy = dict(conn_dict) - if "authority" in conn_dict_copy and isinstance(conn_dict_copy["authority"], str): - try: - conn_dict_copy["authority"] = AuthAuthority(conn_dict_copy["authority"]) - except ValueError: - pass - if "status" in conn_dict_copy and isinstance(conn_dict_copy["status"], str): - try: - conn_dict_copy["status"] = ConnectionStatus(conn_dict_copy["status"]) - except ValueError: - pass - items.append(UserConnection(**conn_dict_copy)) - return PaginatedResponse( - items=items, - pagination=None - ) + return { + "items": enhanced_connections_dict, + "pagination": None, + } # Apply filtering if provided if paginationParams.filters: @@ -292,43 +278,24 @@ async def get_connections( paginationParams.sort ) - # Count total items after filters totalItems = len(enhanced_connections_dict) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 - # Apply pagination (skip/limit) startIdx = (paginationParams.page - 1) * paginationParams.pageSize endIdx = startIdx + paginationParams.pageSize paged_connections = enhanced_connections_dict[startIdx:endIdx] - # Convert back to UserConnection objects (convert enum strings back to enums) - items = [] - for conn_dict in paged_connections: - # Convert enum strings back to enum objects - conn_dict_copy = dict(conn_dict) - if "authority" in conn_dict_copy and isinstance(conn_dict_copy["authority"], str): - try: - conn_dict_copy["authority"] = AuthAuthority(conn_dict_copy["authority"]) - except ValueError: - pass # Keep as string if invalid - if "status" in conn_dict_copy and isinstance(conn_dict_copy["status"], str): - try: - conn_dict_copy["status"] = ConnectionStatus(conn_dict_copy["status"]) - except ValueError: - pass # Keep as string if invalid - items.append(UserConnection(**conn_dict_copy)) - - return PaginatedResponse( - items=items, - pagination=PaginationMetadata( + return { + "items": paged_connections, + "pagination": PaginationMetadata( currentPage=paginationParams.page, pageSize=paginationParams.pageSize, totalItems=totalItems, totalPages=totalPages, sort=paginationParams.sort, filters=paginationParams.filters - ) - ) + ).model_dump(), + } except HTTPException: raise diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index 82cf1624..11b90f09 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -17,6 +17,7 @@ from modules.shared.attributeUtils import getModelAttributeDefinitions from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict from modules.shared.i18nRegistry import apiRouteContext +from modules.routes.routeHelpers import enrichRowsWithFkLabels routeApiMsg = apiRouteContext("routeDataFiles") # Configure logger @@ -220,7 +221,7 @@ router = APIRouter( } ) -@router.get("/list", response_model=PaginatedResponse[FileItem]) +@router.get("/list") @limiter.limit("120/minute") def get_files( request: Request, @@ -229,7 +230,7 @@ def get_files( column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"), currentUser: User = Depends(getCurrentUser), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[FileItem]: +): """ Get files with optional pagination, sorting, and filtering. @@ -303,24 +304,27 @@ def get_files( recordFilter = {"folderId": fVal} result = managementInterface.getAllFiles(pagination=paginationParams, recordFilter=recordFilter) - + + def _filesToDicts(items): + return [f.model_dump() if hasattr(f, "model_dump") else (dict(f) if not isinstance(f, dict) else f) for f in items] + if paginationParams: - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_filesToDicts(result.items), FileItem) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=paginationParams.page, pageSize=paginationParams.pageSize, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort, filters=paginationParams.filters - ) - ) + ).model_dump(), + } else: - return PaginatedResponse( - items=result, - pagination=None - ) + items = result if isinstance(result, list) else (result.items if hasattr(result, "items") else [result]) + enriched = enrichRowsWithFkLabels(_filesToDicts(items), FileItem) + return {"items": enriched, "pagination": None} except HTTPException: raise except Exception as e: @@ -1019,14 +1023,14 @@ def updateFileNeutralize( # ── File endpoints with path parameters (catch-all /{fileId}) ───────────────── -@router.get("/{fileId}", response_model=FileItem) +@router.get("/{fileId}") @limiter.limit("30/minute") def get_file( request: Request, fileId: str = Path(..., description="ID of the file"), currentUser: User = Depends(getCurrentUser), context: RequestContext = Depends(getRequestContext) -) -> FileItem: +): """Get a file. Resolves the file's mandate/instance scope automatically.""" try: _mgmt, fileData = _resolveFileWithScope(currentUser, context, fileId) @@ -1036,7 +1040,9 @@ def get_file( detail=f"File with ID {fileId} not found" ) - return fileData + fileDict = fileData.model_dump() if hasattr(fileData, "model_dump") else dict(fileData) + enriched = enrichRowsWithFkLabels([fileDict], FileItem) + return enriched[0] except interfaceDbManagement.FileNotFoundError as e: logger.warning(f"File not found: {str(e)}") diff --git a/modules/routes/routeDataMandates.py b/modules/routes/routeDataMandates.py index 2bed0169..7972181d 100644 --- a/modules/routes/routeDataMandates.py +++ b/modules/routes/routeDataMandates.py @@ -22,7 +22,7 @@ from modules.auth import limiter, requirePlatformAdmin, getRequestContext, getCu # Import interfaces import modules.interfaces.interfaceDbApp as interfaceDbApp -from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRootInterface +from modules.interfaces.interfaceDbBilling import getRootInterface as _getBillingRootInterface from modules.shared.attributeUtils import getModelAttributeDefinitions from modules.shared.auditLogger import audit_logger @@ -318,7 +318,7 @@ def create_mandate( from modules.datamodels.datamodelSubscription import ( MandateSubscription, SubscriptionStatusEnum, BUILTIN_PLANS, ) - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot from datetime import datetime, timezone, timedelta planKey = mandateData.get("planKey", "TRIAL_14D") @@ -660,7 +660,7 @@ def list_mandate_users( from modules.routes.routeHelpers import ( handleFilterValuesInMemory, handleIdsInMemory, - _applyFiltersAndSort as _sharedApplyFiltersAndSort, + applyFiltersAndSort as _sharedApplyFiltersAndSort, paginateInMemory, ) @@ -674,13 +674,23 @@ def list_mandate_users( if paginationParams: paginationParamsObj = None - try: - paginationDict = json.loads(pagination) if pagination else None + if pagination: + try: + paginationDict = json.loads(pagination) + except json.JSONDecodeError as e: + raise HTTPException( + status_code=400, + detail=f"Invalid 'pagination' query: not valid JSON ({e.msg})", + ) if paginationDict: - paginationDict = normalize_pagination_dict(paginationDict) - paginationParamsObj = PaginationParams(**paginationDict) - except Exception: - pass + try: + paginationDict = normalize_pagination_dict(paginationDict) + paginationParamsObj = PaginationParams(**paginationDict) + except Exception as e: + raise HTTPException( + status_code=400, + detail=f"Invalid 'pagination' payload: {e}", + ) filtered = _sharedApplyFiltersAndSort(result, paginationParamsObj) totalItems = len(filtered) diff --git a/modules/routes/routeDataPrompts.py b/modules/routes/routeDataPrompts.py index 79dc8d72..ee99b912 100644 --- a/modules/routes/routeDataPrompts.py +++ b/modules/routes/routeDataPrompts.py @@ -44,20 +44,25 @@ def get_prompts( - filterValues: distinct values for a column (cross-filtered) - ids: all IDs matching current filters """ - from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory + from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels + + def _promptsToEnrichedDicts(promptItems): + dicts = [r.model_dump() if hasattr(r, 'model_dump') else (dict(r) if not isinstance(r, dict) else r) for r in promptItems] + enrichRowsWithFkLabels(dicts, Prompt) + return dicts if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") managementInterface = interfaceDbManagement.getInterface(currentUser) result = managementInterface.getAllPrompts(pagination=None) - items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in result] + items = _promptsToEnrichedDicts(result) return handleFilterValuesInMemory(items, column, pagination) if mode == "ids": managementInterface = interfaceDbManagement.getInterface(currentUser) result = managementInterface.getAllPrompts(pagination=None) - items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in result] + items = _promptsToEnrichedDicts(result) return handleIdsInMemory(items, pagination) paginationParams = None @@ -74,22 +79,24 @@ def get_prompts( result = managementInterface.getAllPrompts(pagination=paginationParams) if paginationParams: - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + items = _promptsToEnrichedDicts(result.items) + return { + "items": items, + "pagination": PaginationMetadata( currentPage=paginationParams.page, pageSize=paginationParams.pageSize, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort, filters=paginationParams.filters - ) - ) + ).model_dump(), + } else: - return PaginatedResponse( - items=result, - pagination=None - ) + items = _promptsToEnrichedDicts(result) + return { + "items": items, + "pagination": None, + } @router.post("", response_model=Prompt) diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py index ea796aab..67156291 100644 --- a/modules/routes/routeDataUsers.py +++ b/modules/routes/routeDataUsers.py @@ -25,12 +25,17 @@ from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority from modules.interfaces.interfaceDbApp import getRootInterface from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict from modules.shared.i18nRegistry import apiRouteContext +from modules.routes.routeHelpers import enrichRowsWithFkLabels routeApiMsg = apiRouteContext("routeDataUsers") # Configure logger logger = logging.getLogger(__name__) +def _usersToDicts(items) -> list: + return [u.model_dump() if hasattr(u, "model_dump") else (dict(u) if not isinstance(u, dict) else u) for u in items] + + def _isAdminForUser(context: RequestContext, targetUserId: str) -> bool: """ Check if the current user has admin rights for the target user. @@ -187,7 +192,7 @@ def get_user_options( # CRUD ENDPOINTS # ============================================================================ -@router.get("/", response_model=PaginatedResponse[User]) +@router.get("/") @limiter.limit("30/minute") def get_users( request: Request, @@ -195,7 +200,7 @@ def get_users( mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"), column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"), context: RequestContext = Depends(getRequestContext) -) -> PaginatedResponse[User]: +): """ Get users with optional pagination, sorting, and filtering. MULTI-TENANT: mandateId from X-Mandate-Id header determines scope. @@ -236,48 +241,44 @@ def get_users( # Get users for specific mandate using getUsersByMandate result = appInterface.getUsersByMandate(str(context.mandateId), paginationParams) - # getUsersByMandate returns PaginatedResult if pagination was provided if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_usersToDicts(result.items), User) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=result.currentPage, pageSize=result.pageSize, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort, filters=paginationParams.filters - ) - ) + ).model_dump(), + } else: - # No pagination - result is a list users = result if isinstance(result, list) else result.items if hasattr(result, 'items') else [] - return PaginatedResponse( - items=users, - pagination=None - ) + enriched = enrichRowsWithFkLabels(_usersToDicts(users), User) + return {"items": enriched, "pagination": None} elif context.isPlatformAdmin: # PlatformAdmin without mandateId — DB-level pagination via interface result = appInterface.getAllUsers(paginationParams) if paginationParams and hasattr(result, 'items'): - return PaginatedResponse( - items=result.items, - pagination=PaginationMetadata( + enriched = enrichRowsWithFkLabels(_usersToDicts(result.items), User) + return { + "items": enriched, + "pagination": PaginationMetadata( currentPage=paginationParams.page, pageSize=paginationParams.pageSize, totalItems=result.totalItems, totalPages=result.totalPages, sort=paginationParams.sort, filters=paginationParams.filters - ) - ) + ).model_dump(), + } else: users = result if isinstance(result, list) else (result.items if hasattr(result, 'items') else []) - return PaginatedResponse( - items=users, - pagination=None - ) + enriched = enrichRowsWithFkLabels(_usersToDicts(users), User) + return {"items": enriched, "pagination": None} else: # Non-SysAdmin without mandateId: aggregate users across all admin mandates rootInterface = getRootInterface() @@ -316,34 +317,30 @@ def get_users( for u in batchUsers.values() ] - from modules.routes.routeHelpers import _applyFiltersAndSort as _applyFiltersAndSortHelper + from modules.routes.routeHelpers import applyFiltersAndSort as _applyFiltersAndSortHelper filteredUsers = _applyFiltersAndSortHelper(allUsers, paginationParams) - users = [User(**u) for u in filteredUsers] + enriched = enrichRowsWithFkLabels(filteredUsers, User) if paginationParams: import math - totalItems = len(users) + totalItems = len(enriched) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 startIdx = (paginationParams.page - 1) * paginationParams.pageSize endIdx = startIdx + paginationParams.pageSize - paginatedUsers = users[startIdx:endIdx] - return PaginatedResponse( - items=paginatedUsers, - pagination=PaginationMetadata( + return { + "items": enriched[startIdx:endIdx], + "pagination": PaginationMetadata( currentPage=paginationParams.page, pageSize=paginationParams.pageSize, totalItems=totalItems, totalPages=totalPages, sort=paginationParams.sort, filters=paginationParams.filters - ) - ) + ).model_dump(), + } else: - return PaginatedResponse( - items=users, - pagination=None - ) + return {"items": enriched, "pagination": None} except HTTPException: raise except Exception as e: @@ -753,10 +750,10 @@ def send_password_link( expiryHours = int(APP_CONFIG.get("Auth_RESET_TOKEN_EXPIRY_HOURS", "24")) try: - from modules.routes.routeSecurityLocal import _buildAuthEmailHtml, _sendAuthEmail + from modules.routes.routeSecurityLocal import buildAuthEmailHtml, sendAuthEmail emailSubject = "PowerOn - Passwort setzen" - emailHtml = _buildAuthEmailHtml( + emailHtml = buildAuthEmailHtml( greeting=f"Hallo {targetUser.fullName or targetUser.username}", bodyLines=[ "Ein Administrator hat einen Link zum Setzen Ihres Passworts angefordert.", @@ -770,7 +767,7 @@ def send_password_link( footerText=f"Dieser Link ist {expiryHours} Stunden gültig. Falls Sie diese Anforderung nicht erwartet haben, kontaktieren Sie bitte Ihren Administrator.", ) - emailSent = _sendAuthEmail( + emailSent = sendAuthEmail( recipient=targetUser.email, subject=emailSubject, message="", diff --git a/modules/routes/routeHelpers.py b/modules/routes/routeHelpers.py index de2f863b..19bfdb8e 100644 --- a/modules/routes/routeHelpers.py +++ b/modules/routes/routeHelpers.py @@ -12,7 +12,7 @@ Provides unified logic for: import copy import json import logging -from typing import Any, Dict, List, Optional, Callable +from typing import Any, Dict, List, Optional, Callable, Union from fastapi.responses import JSONResponse @@ -29,64 +29,183 @@ logger = logging.getLogger(__name__) # Central FK label resolvers (cross-DB) # --------------------------------------------------------------------------- -def _resolveMandateLabels(ids: List[str]) -> Dict[str, str]: +def resolveMandateLabels(ids: List[str]) -> Dict[str, Optional[str]]: + """Resolve mandate IDs to labels. Returns None (not the ID!) for + unresolvable entries so the caller can distinguish "resolved" from "missing". + """ from modules.interfaces.interfaceDbApp import getRootInterface rootIface = getRootInterface() mMap = rootIface.getMandatesByIds(ids) - return { - mid: getattr(m, "label", None) or getattr(m, "name", mid) or mid - for mid, m in mMap.items() - } + result: Dict[str, Optional[str]] = {} + for mid in ids: + m = mMap.get(mid) + label = (getattr(m, "label", None) or getattr(m, "name", None)) if m else None + if not label: + logger.warning("resolveMandateLabels: no label for id=%s (found=%s)", mid, m is not None) + result[mid] = label or None + return result -def _resolveInstanceLabels(ids: List[str]) -> Dict[str, str]: +def resolveInstanceLabels(ids: List[str]) -> Dict[str, Optional[str]]: + """Resolve feature-instance IDs to labels. Returns None for unresolvable.""" from modules.interfaces.interfaceDbApp import getRootInterface from modules.interfaces.interfaceFeatures import getFeatureInterface rootIface = getRootInterface() featureIface = getFeatureInterface(rootIface.db) - result: Dict[str, str] = {} + result: Dict[str, Optional[str]] = {} for iid in ids: fi = featureIface.getFeatureInstance(iid) - result[iid] = fi.label if fi and fi.label else iid + label = fi.label if fi and fi.label else None + if not label: + logger.warning("resolveInstanceLabels: no label for id=%s (found=%s)", iid, fi is not None) + result[iid] = label return result -def _resolveUserLabels(ids: List[str]) -> Dict[str, str]: +def resolveUserLabels(ids: List[str]) -> Dict[str, Optional[str]]: + """Resolve user IDs to display names. Returns None for unresolvable.""" from modules.interfaces.interfaceDbApp import getRootInterface rootIface = getRootInterface() + from modules.datamodels.datamodelUam import User as _User + uniqueIds = list(set(ids)) users = rootIface.db.getRecordset( - __import__("modules.datamodels.datamodelUam", fromlist=["User"]).User, - recordFilter={"id": list(set(ids))}, + _User, + recordFilter={"id": uniqueIds}, ) - result: Dict[str, str] = {} + if not users and uniqueIds: + logger.warning( + "resolveUserLabels: query returned 0 users for %d ids (db=%s, table=%s). " + "Attempting full table scan...", + len(uniqueIds), getattr(rootIface.db, 'dbDatabase', '?'), _User.__name__, + ) + allUsers = rootIface.db.getRecordset(_User) + logger.warning( + "resolveUserLabels: full scan found %d users total. Looking for ids: %s", + len(allUsers or []), uniqueIds[:3], + ) + users = [u for u in (allUsers or []) if u.get("id") in set(uniqueIds)] + result: Dict[str, Optional[str]] = {} + found: Dict[str, dict] = {} for u in (users or []): uid = u.get("id", "") - result[uid] = u.get("username") or u.get("email") or uid + found[uid] = u + for uid in ids: + u = found.get(uid) + if u: + result[uid] = u.get("username") or u.get("email") or None + else: + logger.warning("resolveUserLabels: user not found for id=%s", uid) + result[uid] = None return result +def resolveRoleLabels(ids: List[str]) -> Dict[str, Optional[str]]: + """Resolve Role.id to roleLabel. Returns None for unresolvable.""" + if not ids: + return {} + from modules.interfaces.interfaceDbApp import getRootInterface + from modules.datamodels.datamodelRbac import Role as _Role + rootIface = getRootInterface() + recs = rootIface.db.getRecordset( + _Role, + recordFilter={"id": list(set(ids))}, + ) or [] + out: Dict[str, Optional[str]] = {i: None for i in ids} + for r in recs: + rid = r.get("id") + if rid: + out[rid] = r.get("roleLabel") or None + for rid in ids: + if out.get(rid) is None: + logger.warning("resolveRoleLabels: no label for id=%s", rid) + return out + + _BUILTIN_FK_RESOLVERS: Dict[str, Callable[[List[str]], Dict[str, str]]] = { - "Mandate": _resolveMandateLabels, - "FeatureInstance": _resolveInstanceLabels, - "User": _resolveUserLabels, + "Mandate": resolveMandateLabels, + "FeatureInstance": resolveInstanceLabels, + "User": resolveUserLabels, + "Role": resolveRoleLabels, } def _buildLabelResolversFromModel(modelClass: type) -> Dict[str, Callable[[List[str]], Dict[str, str]]]: """ - Auto-build labelResolvers dict from fk_model annotations on a Pydantic model. - Maps field names to resolver functions for all fields that have a known fk_model. + Auto-build labelResolvers dict from fk_model / fk_target annotations on a Pydantic model. + Maps field names to resolver functions for all fields that have a known FK target. + Unlike ``_get_fk_sort_meta`` this does NOT require ``fk_label_field`` — the + builtin resolvers already know which column to read. """ - from modules.connectors.connectorDbPostgre import _get_fk_sort_meta - fkMeta = _get_fk_sort_meta(modelClass) resolvers: Dict[str, Callable[[List[str]], Dict[str, str]]] = {} - for fieldName, meta in fkMeta.items(): - fkModelName = meta.get("model", "") - if fkModelName in _BUILTIN_FK_RESOLVERS: - resolvers[fieldName] = _BUILTIN_FK_RESOLVERS[fkModelName] + for name, fieldInfo in modelClass.model_fields.items(): + extra = fieldInfo.json_schema_extra + if not extra or not isinstance(extra, dict): + continue + fkModel = extra.get("fk_model") + tgt = extra.get("fk_target") + if not fkModel and isinstance(tgt, dict): + fkModel = tgt.get("table") + if fkModel and fkModel in _BUILTIN_FK_RESOLVERS: + resolvers[name] = _BUILTIN_FK_RESOLVERS[fkModel] return resolvers +def enrichRowsWithFkLabels( + rows: List[Dict[str, Any]], + modelClass: type = None, + *, + labelResolvers: Optional[Dict[str, Callable[[List[str]], Dict[str, Optional[str]]]]] = None, + extraResolvers: Optional[Dict[str, Callable[[List[str]], Dict[str, Optional[str]]]]] = None, +) -> List[Dict[str, Any]]: + """Add ``{field}Label`` columns to each row for every FK field that has a + registered resolver. + + ``modelClass`` — if provided, resolvers are auto-built from ``fk_model`` + annotations on the Pydantic model (via ``_buildLabelResolversFromModel``). + + ``labelResolvers`` — explicit resolver map that overrides auto-built ones. + + ``extraResolvers`` — merged on top of auto-built / explicit resolvers. Use + for ad-hoc fields that are not FK-annotated on the model (e.g. + ``createdByUserId`` on billing transactions). + + If a label cannot be resolved the ``{field}Label`` value is ``None`` + (never the raw ID — that would reintroduce the silent-truncation bug). + """ + resolvers: Dict[str, Callable] = {} + + if modelClass is not None and labelResolvers is None: + resolvers = _buildLabelResolversFromModel(modelClass) + elif labelResolvers is not None: + resolvers = dict(labelResolvers) + + if extraResolvers: + resolvers.update(extraResolvers) + + if not resolvers or not rows: + return rows + + for field, resolver in resolvers.items(): + ids = list({str(r.get(field)) for r in rows if r.get(field)}) + if not ids: + continue + try: + labelMap = resolver(ids) + except Exception as e: + logger.error("enrichRowsWithFkLabels: resolver for '%s' raised: %s", field, e) + labelMap = {} + + labelKey = f"{field}Label" + for r in rows: + fkVal = r.get(field) + if fkVal: + r[labelKey] = labelMap.get(str(fkVal)) + else: + r[labelKey] = None + + return rows + + # --------------------------------------------------------------------------- # Cross-filter pagination parsing # --------------------------------------------------------------------------- @@ -210,7 +329,7 @@ def handleIdsMode( # In-memory helpers (for enriched / non-SQL routes) # --------------------------------------------------------------------------- -def _applyFiltersAndSort( +def applyFiltersAndSort( items: List[Dict[str, Any]], paginationParams: Optional[PaginationParams], ) -> List[Dict[str, Any]]: @@ -364,12 +483,21 @@ def _extractDistinctValues( items: List[Dict[str, Any]], columnKey: str, requestLang: Optional[str] = None, -) -> List[str]: - """Extract sorted distinct display values for a column from enriched items.""" +) -> List[Optional[str]]: + """Extract sorted distinct display values for a column from enriched items. + + Includes ``None`` as the last entry when at least one row has a null/empty + value — this enables the "(Leer)" filter option in the frontend. + """ + _MISSING = object() values = set() + hasEmpty = False for item in items: - val = item.get(columnKey) + val = item.get(columnKey, _MISSING) + if val is _MISSING: + continue if val is None or val == "": + hasEmpty = True continue if isinstance(val, bool): values.add("true" if val else "false") @@ -381,7 +509,10 @@ def _extractDistinctValues( values.add(text) else: values.add(str(val)) - return sorted(values, key=lambda v: v.lower()) + result: List[Optional[str]] = sorted(values, key=lambda v: v.lower()) + if hasEmpty: + result.append(None) + return result def handleFilterValuesInMemory( @@ -396,7 +527,7 @@ def handleFilterValuesInMemory( Returns JSONResponse to bypass FastAPI response_model validation. """ crossFilterParams = parseCrossFilterPagination(column, paginationJson) - crossFiltered = _applyFiltersAndSort(items, crossFilterParams) + crossFiltered = applyFiltersAndSort(items, crossFilterParams) return JSONResponse(content=_extractDistinctValues(crossFiltered, column, requestLang)) @@ -411,7 +542,7 @@ def handleIdsInMemory( Returns JSONResponse to bypass FastAPI response_model validation. """ pagination = parsePaginationForIds(paginationJson) - filtered = _applyFiltersAndSort(items, pagination) + filtered = applyFiltersAndSort(items, pagination) ids = [] for item in filtered: val = item.get(idField) @@ -510,6 +641,7 @@ def getRecordsetPaginatedWithFkSort( idOrder = {pid: idx for idx, pid in enumerate(pageIds)} pageItems.sort(key=lambda r: idOrder.get(r.get(idField), 999999)) + enrichRowsWithFkLabels(pageItems, modelClass) totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 return {"items": pageItems, "totalItems": totalItems, "totalPages": totalPages} diff --git a/modules/routes/routeI18n.py b/modules/routes/routeI18n.py index cadf128e..927d1bf2 100644 --- a/modules/routes/routeI18n.py +++ b/modules/routes/routeI18n.py @@ -26,7 +26,7 @@ from fastapi.responses import Response from pydantic import BaseModel, Field from modules.auth import getCurrentUser, requireSysAdmin, requirePlatformAdmin -from modules.connectors.connectorDbPostgre import _get_cached_connector +from modules.connectors.connectorDbPostgre import getCachedConnector from modules.datamodels.datamodelAi import ( AiCallOptions, AiCallRequest, @@ -40,11 +40,11 @@ from modules.datamodels.datamodelRbac import Role from modules.datamodels.datamodelFeatures import Feature from modules.datamodels.datamodelNotification import NotificationType from modules.interfaces.interfaceDbManagement import getInterface as getMgmtInterface -from modules.routes.routeNotifications import _createNotification +from modules.routes.routeNotifications import createNotification from modules.shared.configuration import APP_CONFIG from modules.shared.i18nRegistry import ( _enforceSourcePlaceholders, - _loadCache as _reloadI18nCache, + loadCache as _reloadI18nCache, apiRouteContext, ) from modules.shared.timeUtils import getUtcTimestamp @@ -109,7 +109,7 @@ _ISO_PRIORITY_CODES: List[str] = ["de", "gsw", "en", "fr", "it"] # --------------------------------------------------------------------------- def _publicMgmtDb(): - return _get_cached_connector( + return getCachedConnector( dbHost=APP_CONFIG.get("DB_HOST", "localhost"), dbDatabase="poweron_management", dbUser=APP_CONFIG.get("DB_USER"), @@ -729,7 +729,7 @@ async def _run_create_language_job_async(userId: str, code: str, label: str, cur tmCount = await _translateTextMultilingualFields(db, code, label, billingCb) - _createNotification( + createNotification( userId, NotificationType.SYSTEM, title="Sprachset erstellt", @@ -739,7 +739,7 @@ async def _run_create_language_job_async(userId: str, code: str, label: str, cur logger.info("i18n create job done: code=%s, translated=%d/%d, tm_fields=%d", code, len(translated), len(xxEntries), tmCount) except Exception as e: logger.exception("create language job failed: %s", e) - _createNotification( + createNotification( userId, NotificationType.SYSTEM, title="Sprachset fehlgeschlagen", @@ -790,7 +790,7 @@ async def create_language_set( db.recordCreate(UiLanguageSet, rec) background.add_task(_run_create_language_job, uid, code, resolvedLabel, currentUser, mandateId) - _createNotification( + createNotification( uid, NotificationType.SYSTEM, title="Sprachset wird erzeugt", diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py index 7e852b54..8138775f 100644 --- a/modules/routes/routeInvitations.py +++ b/modules/routes/routeInvitations.py @@ -21,7 +21,7 @@ from pydantic import BaseModel, Field, model_validator from modules.auth import limiter, getRequestContext, RequestContext, getCurrentUser from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict -from modules.routes.routeHelpers import _applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory +from modules.routes.routeHelpers import applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels from modules.datamodels.datamodelInvitation import Invitation from modules.interfaces.interfaceDbApp import getRootInterface from modules.shared.timeUtils import getUtcTimestamp @@ -302,8 +302,8 @@ def create_invitation( emailSubject = f"Einladung zu {mandateName}" invite_desc = f"dem Mandanten «{mandateName}» beizutreten" - from modules.routes.routeSecurityLocal import _buildAuthEmailHtml - emailBody = _buildAuthEmailHtml( + from modules.routes.routeSecurityLocal import buildAuthEmailHtml + emailBody = buildAuthEmailHtml( greeting=f"Hallo {display_name}", bodyLines=[ f"Sie wurden eingeladen, {invite_desc}.", @@ -496,20 +496,22 @@ def list_invitations( raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") if paginationParams: - filtered = _applyFiltersAndSort(result, paginationParams) + filtered = applyFiltersAndSort(result, paginationParams) totalItems = len(filtered) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 startIdx = (paginationParams.page - 1) * paginationParams.pageSize endIdx = startIdx + paginationParams.pageSize + enriched = enrichRowsWithFkLabels(filtered[startIdx:endIdx], Invitation) return { - "items": filtered[startIdx:endIdx], + "items": enriched, "pagination": PaginationMetadata( currentPage=paginationParams.page, pageSize=paginationParams.pageSize, totalItems=totalItems, totalPages=totalPages, sort=paginationParams.sort, filters=paginationParams.filters, ).model_dump(), } - return result + enriched = enrichRowsWithFkLabels(result, Invitation) + return {"items": enriched, "pagination": None} except HTTPException: raise @@ -809,13 +811,13 @@ def accept_invitation( if featureInstanceId: existingAccess = rootInterface.getFeatureAccess(str(currentUser.id), featureInstanceId) if existingAccess: - # Update existing access with additional roles + # Update existing access with additional roles. addRoleToFeatureAccess + # is already idempotent (returns silently when the role is already + # assigned), so any exception here is a real error and must be + # surfaced — not swallowed. featureAccessId = str(existingAccess.id) for roleId in roleIds: - try: - rootInterface.addRoleToFeatureAccess(str(existingAccess.id), roleId) - except Exception: - pass # Role might already be assigned + rootInterface.addRoleToFeatureAccess(str(existingAccess.id), roleId) message = "Roles updated for existing feature access" else: # Create feature access with instance-level roles @@ -828,14 +830,13 @@ def accept_invitation( featureAccessId = str(featureAccess.id) message = "Successfully joined feature instance" else: - # Legacy: mandate-only invitation (no feature instance) + # Legacy: mandate-only invitation (no feature instance). + # addRoleToUserMandate is already idempotent — any exception here + # is a real error (e.g. DB / FK constraint) and must propagate. existingMembership = rootInterface.getUserMandate(str(currentUser.id), mandateId) if existingMembership: for roleId in roleIds: - try: - rootInterface.addRoleToUserMandate(str(existingMembership.id), roleId) - except Exception: - pass + rootInterface.addRoleToUserMandate(str(existingMembership.id), roleId) message = "Roles updated for existing membership" else: rootInterface.createUserMandate( diff --git a/modules/routes/routeNotifications.py b/modules/routes/routeNotifications.py index 41d7fe26..c1cacb17 100644 --- a/modules/routes/routeNotifications.py +++ b/modules/routes/routeNotifications.py @@ -52,7 +52,7 @@ class UnreadCountResponse(BaseModel): # Helper Functions # ============================================================================= -def _createNotification( +def createNotification( userId: str, notificationType: NotificationType, title: str, @@ -103,7 +103,7 @@ def create_access_change_notification( Failures are logged only so RBAC mutations still succeed. """ try: - _createNotification( + createNotification( userId=userId, notificationType=NotificationType.SYSTEM, title=title, @@ -132,7 +132,7 @@ def createInvitationNotification( msg = f"{inviterName} hat Sie zur Feature-Instanz '{featureInstanceName}' eingeladen." else: msg = f"{inviterName} hat Sie zu '{mandateName}' eingeladen." - return _createNotification( + return createNotification( userId=userId, notificationType=NotificationType.INVITATION, title="Neue Einladung", diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index b6227cb0..807d5192 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -28,7 +28,7 @@ routeApiMsg = apiRouteContext("routeSecurityLocal") logger = logging.getLogger(__name__) -def _buildAuthEmailHtml( +def buildAuthEmailHtml( greeting: str, bodyLines: list, buttonText: str = None, @@ -118,7 +118,7 @@ def _buildAuthEmailHtml( ''' -def _sendAuthEmail(recipient: str, subject: str, message: str, userId: str = None, htmlOverride: str = None) -> bool: +def sendAuthEmail(recipient: str, subject: str, message: str, userId: str = None, htmlOverride: str = None) -> bool: """ Send authentication-related email directly without requiring full Services initialization. Used for registration, password reset, and other auth flows. @@ -128,7 +128,7 @@ def _sendAuthEmail(recipient: str, subject: str, message: str, userId: str = Non subject: Email subject message: Plain text fallback (ignored when htmlOverride is given) userId: Optional user ID for logging - htmlOverride: Pre-built branded HTML (from _buildAuthEmailHtml) + htmlOverride: Pre-built branded HTML (from buildAuthEmailHtml) Returns: bool: True if email was sent successfully @@ -486,7 +486,7 @@ def register_user( expiryHours = int(APP_CONFIG.get("Auth_RESET_TOKEN_EXPIRY_HOURS", "24")) emailSubject = "PowerOn Registrierung - Passwort setzen" - emailHtml = _buildAuthEmailHtml( + emailHtml = buildAuthEmailHtml( greeting=f"Hallo {user.fullName or user.username}", bodyLines=[ "Vielen Dank für Ihre Registrierung bei PowerOn.", @@ -500,7 +500,7 @@ def register_user( footerText=f"Dieser Link ist {expiryHours} Stunden gültig. Falls Sie sich nicht registriert haben, können Sie diese E-Mail ignorieren.", ) - emailSent = _sendAuthEmail( + emailSent = sendAuthEmail( recipient=user.email, subject=emailSubject, message="", @@ -787,7 +787,7 @@ def password_reset_request( # Send email using dedicated auth email function emailSubject = "PowerOn - Passwort zurücksetzen" - emailHtml = _buildAuthEmailHtml( + emailHtml = buildAuthEmailHtml( greeting=f"Hallo {user.fullName or user.username}", bodyLines=[ "Sie haben eine Passwort-Zurücksetzung für Ihren PowerOn Account angefordert.", @@ -801,7 +801,7 @@ def password_reset_request( footerText=f"Dieser Link ist {expiryHours} Stunden gültig. Falls Sie diese Anforderung nicht gestellt haben, können Sie diese E-Mail ignorieren.", ) - emailSent = _sendAuthEmail( + emailSent = sendAuthEmail( recipient=user.email, subject=emailSubject, message="", diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index b1f8dbc6..3419038c 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -227,7 +227,7 @@ def getSubscriptionInfo( } from modules.datamodels.datamodelSubscription import BUILTIN_PLANS - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot subInterface = _getSubRoot() allSubs = subInterface.listForMandate(mandateId) @@ -342,7 +342,7 @@ def activateStoreFeature( # ── 1. Resolve subscription & plan ────────────────────────────── from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS, SubscriptionStatusEnum - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot subInterface = _getSubRoot() operative = subInterface.getOperativeForMandate(mandateId) diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index dbdbe37d..9c8a7ed7 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -22,7 +22,7 @@ from pydantic import BaseModel, Field from modules.auth import limiter, getRequestContext, RequestContext from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict -from modules.routes.routeHelpers import _applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory +from modules.routes.routeHelpers import applyFiltersAndSort, handleFilterValuesInMemory, handleIdsInMemory from modules.shared.i18nRegistry import apiRouteContext, resolveText routeApiMsg = apiRouteContext("routeSubscription") @@ -46,25 +46,28 @@ def _resolveMandateId(context: RequestContext) -> str: def _assertMandateAdmin(context: RequestContext, mandateId: str) -> None: + """Authorize the caller as PlatformAdmin or admin of the given mandate. + + Fail-loud: a DB error during role lookup MUST NOT be silently masked as + "no permission" — that produces misleading 403s and hides infra outages. + Any unexpected exception propagates and surfaces as a 500. + """ if context.isPlatformAdmin: return - try: - from modules.interfaces.interfaceDbApp import getRootInterface - rootInterface = getRootInterface() - userMandates = rootInterface.getUserMandates(str(context.user.id)) - for um in userMandates: - if str(getattr(um, "mandateId", None)) != str(mandateId): - continue - if not getattr(um, "enabled", True): - continue - umId = str(getattr(um, "id", "")) - roleIds = rootInterface.getRoleIdsForUserMandate(umId) - for roleId in roleIds: - role = rootInterface.getRole(roleId) - if role and role.roleLabel == "admin" and not role.featureInstanceId: - return - except Exception: - pass + from modules.interfaces.interfaceDbApp import getRootInterface + rootInterface = getRootInterface() + userMandates = rootInterface.getUserMandates(str(context.user.id)) + for um in userMandates: + if str(getattr(um, "mandateId", None)) != str(mandateId): + continue + if not getattr(um, "enabled", True): + continue + umId = str(getattr(um, "id", "")) + roleIds = rootInterface.getRoleIdsForUserMandate(umId) + for roleId in roleIds: + role = rootInterface.getRole(roleId) + if role and role.roleLabel == "admin" and not role.featureInstanceId: + return raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Mandate admin role required")) @@ -309,7 +312,7 @@ def forceCancel( from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import ( getService as getSubscriptionService, ) - from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface + from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface sub = getSubRootInterface().getById(data.subscriptionId) if not sub: raise HTTPException(status_code=404, detail=routeApiMsg("Subscription not found")) @@ -360,10 +363,10 @@ def verifyCheckout( if session.get("mode") != "subscription": raise HTTPException(status_code=400, detail=routeApiMsg("Not a subscription checkout session")) - from modules.routes.routeBilling import _handleSubscriptionCheckoutCompleted + from modules.routes.routeBilling import handleSubscriptionCheckoutCompleted try: - _handleSubscriptionCheckoutCompleted(session, f"verify-{data.sessionId}") + handleSubscriptionCheckoutCompleted(session, f"verify-{data.sessionId}") except Exception as e: logger.warning( "verifyCheckout: handler raised for session %s mandate %s: %s", @@ -383,7 +386,7 @@ def verifyCheckout( planKey = operative.get("planKey", "") if planKey: try: - from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot + from modules.interfaces.interfaceDbBilling import getRootInterface as _getBillingRoot _getBillingRoot().ensureActivationBudget(mandateId, planKey) except Exception as ex: logger.warning("verifyCheckout: ensureActivationBudget failed: %s", ex) @@ -398,23 +401,15 @@ def verifyCheckout( def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]: """Build the full enriched subscription list (shared by list + mode=filterValues).""" - from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface + from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface from modules.datamodels.datamodelSubscription import BUILTIN_PLANS, OPERATIVE_STATUSES subInterface = getSubRootInterface() allSubs = subInterface.listAll() - mandateNames: Dict[str, str] = {} - try: - from modules.datamodels.datamodelUam import Mandate - from modules.security.rootAccess import getRootDbAppConnector - appDb = getRootDbAppConnector() - for row in appDb.getRecordset(Mandate): - r = dict(row) - mid = r.get("id", "") - mandateNames[mid] = r.get("label") or r.get("name") or mid[:8] - except Exception as e: - logger.warning("Could not bulk-resolve mandate names: %s", e) + from modules.routes.routeHelpers import resolveMandateLabels + allMandateIds = list({sub.get("mandateId") for sub in allSubs if sub.get("mandateId")}) + mandateNames: Dict[str, Optional[str]] = resolveMandateLabels(allMandateIds) if allMandateIds else {} operativeValues = {s.value for s in OPERATIVE_STATUSES} @@ -452,7 +447,7 @@ def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]: planKey = sub.get("planKey", "") plan = BUILTIN_PLANS.get(planKey) - sub["mandateName"] = mandateNames.get(mid, mid[:8]) + sub["mandateName"] = mandateNames.get(mid) sub["planTitle"] = resolveText(plan.title) if plan else planKey if sub.get("status") in operativeValues: @@ -507,7 +502,7 @@ def getAllSubscriptions( raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") enriched = _buildEnrichedSubscriptions() - filtered = _applyFiltersAndSort(enriched, paginationParams) + filtered = applyFiltersAndSort(enriched, paginationParams) if paginationParams: totalItems = len(filtered) @@ -547,7 +542,7 @@ def _getDataVolumeUsage( from modules.datamodels.datamodelFeatures import FeatureInstance from modules.interfaces.interfaceDbKnowledge import aggregateMandateRagTotalBytes from modules.interfaces.interfaceDbManagement import getInterface as getMgmtInterface - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRootIf + from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRootIf rootIf = getRootInterface() mandateId = targetMandateId diff --git a/modules/routes/routeSystem.py b/modules/routes/routeSystem.py index 8ead1a6d..bf05f8c0 100644 --- a/modules/routes/routeSystem.py +++ b/modules/routes/routeSystem.py @@ -19,7 +19,7 @@ from slowapi import Limiter from slowapi.util import get_remote_address from modules.auth.authentication import getRequestContext, RequestContext -from modules.system.mainSystem import NAVIGATION_SECTIONS, _objectKeyToUiComponent +from modules.system.mainSystem import NAVIGATION_SECTIONS, objectKeyToUiComponent from modules.shared.i18nRegistry import resolveText, t from modules.interfaces.interfaceDbApp import getRootInterface from modules.interfaces.interfaceFeatures import getFeatureInterface @@ -455,7 +455,7 @@ def _buildStaticBlocks( def _formatBlockItem(item: Dict[str, Any]) -> Dict[str, Any]: """Format a navigation item for the API response.""" objectKey = item["objectKey"] - uiComponent = _objectKeyToUiComponent(objectKey) + uiComponent = objectKeyToUiComponent(objectKey) return { "uiComponent": uiComponent, diff --git a/modules/routes/routeVoiceUser.py b/modules/routes/routeVoiceUser.py index 4edbdf0d..7ddfbed4 100644 --- a/modules/routes/routeVoiceUser.py +++ b/modules/routes/routeVoiceUser.py @@ -14,7 +14,7 @@ from typing import Any, Dict from fastapi import APIRouter, Body, Depends, HTTPException, Query, Request, status from modules.auth import getCurrentUser, limiter -from modules.datamodels.datamodelUam import User, UserVoicePreferences, _normalizeTtsVoiceMap +from modules.datamodels.datamodelUam import User, UserVoicePreferences, normalizeTtsVoiceMap from modules.interfaces.interfaceDbApp import getRootInterface from modules.interfaces.interfaceVoiceObjects import getVoiceInterface from modules.shared.i18nRegistry import apiRouteContext @@ -83,7 +83,7 @@ def updateVoicePreferences( } updateData = {k: v for k, v in preferences.items() if k in allowedFields} if "ttsVoiceMap" in updateData: - updateData["ttsVoiceMap"] = _normalizeTtsVoiceMap(updateData["ttsVoiceMap"]) + updateData["ttsVoiceMap"] = normalizeTtsVoiceMap(updateData["ttsVoiceMap"]) if existing: existingRecord = existing[0] diff --git a/modules/routes/routeWorkflowDashboard.py b/modules/routes/routeWorkflowDashboard.py index 626f0872..998c47a7 100644 --- a/modules/routes/routeWorkflowDashboard.py +++ b/modules/routes/routeWorkflowDashboard.py @@ -154,6 +154,35 @@ def _userMayDeleteWorkflow(context: RequestContext, wfMandateId: Optional[str]) return wfMandateId in adminMandateIds +def _parsePaginationOr400(pagination: Optional[str]) -> Optional[PaginationParams]: + """Parse a JSON pagination query string into PaginationParams. + + Returns None when the input is empty/None. Raises HTTPException(400) on any + parse / validation error so the caller can propagate the error to the + client instead of silently falling back to defaults (which used to mask + real frontend bugs). + """ + if not pagination: + return None + try: + paginationDict = json.loads(pagination) + except json.JSONDecodeError as e: + raise HTTPException( + status_code=400, + detail=f"Invalid 'pagination' query: not valid JSON ({e.msg})", + ) + if not paginationDict: + return None + try: + paginationDict = normalize_pagination_dict(paginationDict) + return PaginationParams(**paginationDict) + except Exception as e: + raise HTTPException( + status_code=400, + detail=f"Invalid 'pagination' payload: {e}", + ) + + def _cascadeDeleteAutoWorkflow(db: DatabaseConnector, workflowId: str) -> None: """Delete AutoWorkflow and dependent rows (same order as interfaceDbApp._cascadeDeleteGraphicalEditorData).""" wf_id = workflowId @@ -218,16 +247,7 @@ def get_workflow_runs( if mandateId: recordFilter["mandateId"] = mandateId - paginationParams = None - if pagination: - try: - paginationDict = json.loads(pagination) - if paginationDict: - paginationDict = normalize_pagination_dict(paginationDict) - paginationParams = PaginationParams(**paginationDict) - except Exception: - pass - + paginationParams = _parsePaginationOr400(pagination) if not paginationParams: page = (offset // limit) + 1 if limit > 0 else 1 paginationParams = PaginationParams( @@ -252,30 +272,7 @@ def get_workflow_runs( for wf in (wfs or []): wfMap[wf.get("id")] = wf - mandateIds = list({r.get("mandateId") for r in pageRuns if r.get("mandateId")}) - instanceIds = list({ - wfMap[r.get("workflowId")].get("featureInstanceId") - for r in pageRuns - if r.get("workflowId") in wfMap and wfMap[r.get("workflowId")].get("featureInstanceId") - }) - - mandateLabelMap: dict = {} - instanceLabelMap: dict = {} - try: - rootIface = getRootInterface() - if mandateIds: - mMap = rootIface.getMandatesByIds(mandateIds) - for mid, m in mMap.items(): - mandateLabelMap[mid] = getattr(m, "label", None) or getattr(m, "name", mid) or mid - if instanceIds: - from modules.interfaces.interfaceFeatures import getFeatureInterface - featureIface = getFeatureInterface(rootIface.db) - for iid in instanceIds: - fi = featureIface.getFeatureInstance(iid) - if fi: - instanceLabelMap[iid] = fi.label or iid - except Exception as e: - logger.warning(f"Failed to enrich run labels: {e}") + from modules.routes.routeHelpers import enrichRowsWithFkLabels, resolveMandateLabels, resolveInstanceLabels runs = [] for r in pageRuns: @@ -286,14 +283,22 @@ def get_workflow_runs( row.get("label") or (wf.get("label") if isinstance(wf, dict) else None) or wfId - or "—" ) - row["mandateLabel"] = mandateLabelMap.get(row.get("mandateId"), row.get("mandateId") or "—") fiid = wf.get("featureInstanceId") if isinstance(wf, dict) else None row["featureInstanceId"] = fiid - row["instanceLabel"] = instanceLabelMap.get(fiid, fiid or "—") runs.append(row) + enrichRowsWithFkLabels( + runs, + labelResolvers={ + "mandateId": resolveMandateLabels, + "featureInstanceId": resolveInstanceLabels, + }, + ) + for row in runs: + row["instanceLabel"] = row.pop("featureInstanceIdLabel", None) + row["mandateLabel"] = row.pop("mandateIdLabel", None) + return {"runs": runs, "total": total, "limit": limit, "offset": offset} @@ -349,18 +354,15 @@ def get_workflow_metrics( totalRuns = countResult.get("totalItems", 0) if isinstance(countResult, dict) else countResult.totalItems runsByStatus: dict = {} - try: - statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=runBaseFilter) - for sv in (statusValues or []): - statusFilter = dict(runBaseFilter) if runBaseFilter else {} - statusFilter["status"] = sv - sr = db.getRecordsetPaginated( - AutoRun, pagination=PaginationParams(page=1, pageSize=1), - recordFilter=statusFilter, - ) - runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems - except Exception as e: - logger.warning(f"Failed to compute runsByStatus: {e}") + statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=runBaseFilter) + for sv in (statusValues or []): + statusFilter = dict(runBaseFilter) if runBaseFilter else {} + statusFilter["status"] = sv + sr = db.getRecordsetPaginated( + AutoRun, pagination=PaginationParams(page=1, pageSize=1), + recordFilter=statusFilter, + ) + runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems totalTokens = 0 totalCredits = 0.0 @@ -425,16 +427,7 @@ def get_system_workflows( if mandateId: recordFilter["mandateId"] = mandateId - paginationParams = None - if pagination: - try: - paginationDict = json.loads(pagination) - if paginationDict: - paginationDict = normalize_pagination_dict(paginationDict) - paginationParams = PaginationParams(**paginationDict) - except Exception: - pass - + paginationParams = _parsePaginationOr400(pagination) if not paginationParams: paginationParams = PaginationParams( page=1, @@ -452,28 +445,25 @@ def get_system_workflows( totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages - mandateIds = list({w.get("mandateId") for w in pageItems if w.get("mandateId")}) - instanceIds = list({w.get("featureInstanceId") for w in pageItems if w.get("featureInstanceId")}) + from modules.routes.routeHelpers import enrichRowsWithFkLabels, resolveMandateLabels, resolveInstanceLabels - mandateLabelMap: dict = {} - instanceLabelMap: dict = {} + # Resolve featureCode in same pass as instance labels — need full FI object featureCodeMap: dict = {} - try: - rootIface = getRootInterface() - if mandateIds: - mandateMap = rootIface.getMandatesByIds(mandateIds) - for mid, m in mandateMap.items(): - mandateLabelMap[mid] = getattr(m, "label", None) or getattr(m, "name", mid) or mid - if instanceIds: - from modules.interfaces.interfaceFeatures import getFeatureInterface - featureIface = getFeatureInterface(rootIface.db) - for iid in instanceIds: - fi = featureIface.getFeatureInstance(iid) - if fi: - instanceLabelMap[iid] = fi.label or iid - featureCodeMap[iid] = fi.featureCode - except Exception as e: - logger.warning(f"Failed to enrich workflow labels: {e}") + def _resolveInstanceLabelsWithFeatureCode(ids): + from modules.interfaces.interfaceDbApp import getRootInterface as _getRI + from modules.interfaces.interfaceFeatures import getFeatureInterface + rootIf = _getRI() + featureIf = getFeatureInterface(rootIf.db) + result = {} + for iid in ids: + fi = featureIf.getFeatureInstance(iid) + if fi: + result[iid] = fi.label or None + featureCodeMap[iid] = fi.featureCode + else: + logger.warning("getSystemWorkflows: feature-instance not found for id=%s", iid) + result[iid] = None + return result userId = str(context.user.id) if context.user else None adminMandateIds = [] @@ -485,30 +475,23 @@ def get_system_workflows( activeRunMap: dict = {} runCountMap: dict = {} lastStartedMap: dict = {} - if workflowIds: - try: - if db._ensureTableExists(AutoRun): - for wfId in workflowIds: - runs = db.getRecordset(AutoRun, recordFilter={"workflowId": wfId}) - runCountMap[wfId] = len(runs) - for r in runs: - rDict = dict(r) - ts = rDict.get("sysCreatedAt") - if ts and (lastStartedMap.get(wfId) is None or ts > lastStartedMap.get(wfId)): - lastStartedMap[wfId] = ts - if rDict.get("status") in ("running", "paused"): - activeRunMap[wfId] = rDict.get("id") - except Exception as e: - logger.warning(f"Failed to enrich workflow run info: {e}") + if workflowIds and db._ensureTableExists(AutoRun): + for wfId in workflowIds: + runs = db.getRecordset(AutoRun, recordFilter={"workflowId": wfId}) + runCountMap[wfId] = len(runs) + for r in runs: + rDict = dict(r) + ts = rDict.get("sysCreatedAt") + if ts and (lastStartedMap.get(wfId) is None or ts > lastStartedMap.get(wfId)): + lastStartedMap[wfId] = ts + if rDict.get("status") in ("running", "paused"): + activeRunMap[wfId] = rDict.get("id") items = [] for w in pageItems: row = dict(w) wMandateId = row.get("mandateId") wfId = row.get("id") - row["mandateLabel"] = mandateLabelMap.get(wMandateId, wMandateId or "—") - row["instanceLabel"] = instanceLabelMap.get(row.get("featureInstanceId"), row.get("featureInstanceId") or "—") - row["featureCode"] = featureCodeMap.get(row.get("featureInstanceId"), "") row["isRunning"] = wfId in activeRunMap row["activeRunId"] = activeRunMap.get(wfId) row["runCount"] = runCountMap.get(wfId, 0) @@ -528,9 +511,20 @@ def get_system_workflows( row["canExecute"] = False row.pop("graph", None) - items.append(row) + enrichRowsWithFkLabels( + items, + labelResolvers={ + "mandateId": resolveMandateLabels, + "featureInstanceId": _resolveInstanceLabelsWithFeatureCode, + }, + ) + for row in items: + row["instanceLabel"] = row.pop("featureInstanceIdLabel", None) + row["mandateLabel"] = row.pop("mandateIdLabel", None) + row["featureCode"] = featureCodeMap.get(row.get("featureInstanceId")) + return { "items": items, "pagination": { @@ -572,15 +566,26 @@ def delete_system_workflow( try: _cascadeDeleteAutoWorkflow(db, workflowId) - try: - from modules.shared.callbackRegistry import callbackRegistry - callbackRegistry.trigger("graphicalEditor.workflow.changed") - except Exception: - pass except Exception as e: logger.error(f"delete_system_workflow cascade failed: {e}") raise HTTPException(status_code=500, detail=routeApiMsg(str(e))) + # Callback registry: log + propagate so listener bugs are visible. + # Cascade is already committed at this point — failure here is a side-effect + # bug (stale caches, missed notifications), never a "ignore silently" event. + try: + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.trigger("graphicalEditor.workflow.changed") + except Exception as e: + logger.error( + f"delete_system_workflow: callbackRegistry.trigger failed for " + f"workflowId={workflowId}: {e}" + ) + raise HTTPException( + status_code=500, + detail=routeApiMsg(f"Workflow deleted but post-delete callback failed: {e}"), + ) + return {"success": True, "id": workflowId} @@ -591,18 +596,34 @@ def delete_system_workflow( def _enrichedFilterValues( db, context: RequestContext, modelClass, scopeFilter, column: str, ): - """Return distinct filter values (IDs) for FK columns or delegate to DB-level DISTINCT. - FK columns return raw IDs — the frontend resolves them to labels via fkCache. - Returns JSONResponse to bypass FastAPI response_model validation.""" + """Return distinct filter values for FormGeneratorTable column filters. + + For FK columns (mandateId, featureInstanceId) returns ``{value, label}`` + objects so the frontend can display human-readable labels in the dropdown + without a separate source fk fetch. Non-FK columns return ``string | null``. + + ``null`` is included when rows with NULL/empty values exist (enables the + "(Leer)" filter option). + + Returns JSONResponse to bypass FastAPI response_model validation. + """ from fastapi.responses import JSONResponse + from modules.routes.routeHelpers import resolveMandateLabels, resolveInstanceLabels + if column in ("mandateLabel", "mandateId"): baseFilter = scopeFilter(context) recordFilter = dict(baseFilter) if baseFilter else {} if modelClass == AutoWorkflow: recordFilter["isTemplate"] = False items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["mandateId"]) or [] - mandateIds = sorted({r.get("mandateId") for r in items if r.get("mandateId")}) - return JSONResponse(content=mandateIds) + allVals = {r.get("mandateId") for r in items} + mandateIds = sorted(v for v in allVals if v) + hasEmpty = None in allVals or "" in allVals + labelMap = resolveMandateLabels(mandateIds) if mandateIds else {} + result = [{"value": mid, "label": labelMap.get(mid) or f"NA({mid})"} for mid in mandateIds] + if hasEmpty: + result.append(None) + return JSONResponse(content=result) if column in ("instanceLabel", "featureInstanceId"): baseFilter = scopeFilter(context) @@ -610,15 +631,24 @@ def _enrichedFilterValues( if modelClass == AutoWorkflow: recordFilter["isTemplate"] = False items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["featureInstanceId"]) or [] - instanceIds = sorted({r.get("featureInstanceId") for r in items if r.get("featureInstanceId")}) + allVals = {r.get("featureInstanceId") for r in items} + instanceIds = sorted(v for v in allVals if v) + hasEmpty = None in allVals or "" in allVals else: items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["workflowId"]) or [] wfIds = list({r.get("workflowId") for r in items if r.get("workflowId")}) instanceIds = [] + hasEmpty = False if wfIds and db._ensureTableExists(AutoWorkflow): wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds}, fieldFilter=["featureInstanceId"]) or [] - instanceIds = sorted({w.get("featureInstanceId") for w in wfs if w.get("featureInstanceId")}) - return JSONResponse(content=instanceIds) + allVals = {w.get("featureInstanceId") for w in wfs} + instanceIds = sorted(v for v in allVals if v) + hasEmpty = None in allVals or "" in allVals + labelMap = resolveInstanceLabels(instanceIds) if instanceIds else {} + result = [{"value": iid, "label": labelMap.get(iid) or f"NA({iid})"} for iid in instanceIds] + if hasEmpty: + result.append(None) + return JSONResponse(content=result) if column == "workflowLabel": baseFilter = scopeFilter(context) @@ -626,9 +656,12 @@ def _enrichedFilterValues( items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["workflowId", "label"]) or [] labels = set() wfIds = set() + hasEmpty = False for r in items: if r.get("label"): labels.add(r["label"]) + elif not r.get("workflowId"): + hasEmpty = True if r.get("workflowId"): wfIds.add(r["workflowId"]) if wfIds and db._ensureTableExists(AutoWorkflow): @@ -636,7 +669,10 @@ def _enrichedFilterValues( for wf in wfs: if wf.get("label"): labels.add(wf["label"]) - return JSONResponse(content=sorted(labels, key=lambda v: v.lower())) + result = sorted(labels, key=lambda v: v.lower()) + if hasEmpty: + result.append(None) + return JSONResponse(content=result) baseFilter = scopeFilter(context) recordFilter = dict(baseFilter) if baseFilter else {} diff --git a/modules/serviceCenter/services/serviceAgent/agentLoop.py b/modules/serviceCenter/services/serviceAgent/agentLoop.py index d458ee27..1c930e26 100644 --- a/modules/serviceCenter/services/serviceAgent/agentLoop.py +++ b/modules/serviceCenter/services/serviceAgent/agentLoop.py @@ -592,7 +592,7 @@ _DATA_SOURCE_TOOLS = {"browseDataSource", "searchDataSource", "downloadFromDataS _DECISION_TOOLS = {"writeFile", "replaceInFile"} -def _classifyToolResult( +def classifyToolResult( tc: ToolCallRequest, result: ToolResult ) -> Optional[Dict[str, Any]]: """Classify a successful tool result into a RoundMemory dict. diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index fb54199e..61c00ed0 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -578,7 +578,7 @@ class AgentService: def _createPersistRoundMemoryFn(self, workflowId: str): """Create callback that persists RoundMemory entries after tool execution.""" - from modules.serviceCenter.services.serviceAgent.agentLoop import _classifyToolResult + from modules.serviceCenter.services.serviceAgent.agentLoop import classifyToolResult from modules.datamodels.datamodelKnowledge import RoundMemory async def _persistRoundMemory( @@ -593,7 +593,7 @@ class AgentService: for tc, result in zip(toolCalls, results): if not result.success: continue - classified = _classifyToolResult(tc, result) + classified = classifyToolResult(tc, result) if not classified: continue diff --git a/modules/serviceCenter/services/serviceExtraction/subPipeline.py b/modules/serviceCenter/services/serviceExtraction/subPipeline.py index ab14fddb..b76578ed 100644 --- a/modules/serviceCenter/services/serviceExtraction/subPipeline.py +++ b/modules/serviceCenter/services/serviceExtraction/subPipeline.py @@ -4,7 +4,7 @@ from typing import List import logging from modules.datamodels.datamodelExtraction import ContentExtracted, ContentPart, ExtractionOptions, MergeStrategy -from modules.datamodels.datamodelUdm import _applyUdmOutputDetail +from modules.datamodels.datamodelUdm import applyUdmOutputDetail from .subUtils import makeId from .subRegistry import ExtractorRegistry, ChunkerRegistry @@ -54,7 +54,7 @@ def runExtraction(extractorRegistry: ExtractorRegistry, chunkerRegistry: Chunker {**extractCtx, "extractionId": ec_id}, precomputedParts=parts, ) - extracted.udm = _applyUdmOutputDetail(udm, options.outputDetail) + extracted.udm = applyUdmOutputDetail(udm, options.outputDetail) return extracted diff --git a/modules/serviceCenter/services/serviceExtraction/subRegistry.py b/modules/serviceCenter/services/serviceExtraction/subRegistry.py index 9412ef91..422b4b50 100644 --- a/modules/serviceCenter/services/serviceExtraction/subRegistry.py +++ b/modules/serviceCenter/services/serviceExtraction/subRegistry.py @@ -47,15 +47,15 @@ class Extractor: precomputedParts: Optional[List[ContentPart]] = None, ) -> "UdmDocument": """Build UDM from extracted parts (default: heuristic grouping). Override for format-specific trees.""" - from modules.datamodels.datamodelUdm import _contentPartsToUdm, _mimeToUdmSourceType + from modules.datamodels.datamodelUdm import contentPartsToUdm, mimeToUdmSourceType from modules.datamodels.datamodelExtraction import ContentExtracted from .subUtils import makeId parts = precomputedParts if precomputedParts is not None else self.extract(fileBytes, context) eid = context.get("extractionId") or makeId() extracted = ContentExtracted(id=eid, parts=parts) - src = _mimeToUdmSourceType(context.get("mimeType", ""), context.get("fileName", "")) - return _contentPartsToUdm(extracted, src, context.get("fileName", "")) + src = mimeToUdmSourceType(context.get("mimeType", ""), context.get("fileName", "")) + return contentPartsToUdm(extracted, src, context.get("fileName", "")) def getSupportedExtensions(self) -> list[str]: """Return list of supported file extensions (including dots).""" diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py index 378c83cf..dab8cc25 100644 --- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py +++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py @@ -253,9 +253,9 @@ class KnowledgeService: ) if resolvedMandateId: try: - from modules.interfaces.interfaceDbBilling import _getRootInterface + from modules.interfaces.interfaceDbBilling import getRootInterface - _getRootInterface().reconcileMandateStorageBilling(str(resolvedMandateId)) + getRootInterface().reconcileMandateStorageBilling(str(resolvedMandateId)) except Exception as ex: logger.warning("reconcileMandateStorageBilling after index failed: %s", ex) return index diff --git a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py index c9ba5f54..6c47b725 100644 --- a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py +++ b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py @@ -19,7 +19,7 @@ from modules.datamodels.datamodelSubscription import ( SubscriptionStatusEnum, BillingPeriodEnum, OPERATIVE_STATUSES, - _getPlan, + getPlan, _getSelectablePlans, ) from modules.interfaces.interfaceDbSubscription import ( @@ -117,7 +117,7 @@ class SubscriptionService: return _getSelectablePlans() def getPlan(self, planKey: str) -> Optional[SubscriptionPlan]: - return _getPlan(planKey) + return getPlan(planKey) # ========================================================================= # T1/T2: Plan activation (creates PENDING, returns checkout URL) @@ -132,7 +132,7 @@ class SubscriptionService: Cleans up any existing PENDING/SCHEDULED for this mandate first (by ID).""" mid = mandateId or self.mandateId - plan = _getPlan(planKey) + plan = getPlan(planKey) if not plan: raise ValueError(f"Unknown plan: {planKey}") @@ -488,7 +488,7 @@ class SubscriptionService: result = self._interface.updateFields(subscriptionId, {"recurring": False}) self.invalidateCache(mandateId) - plan = _getPlan(sub.get("planKey", "")) + plan = getPlan(sub.get("planKey", "")) _notifySubscriptionChange(mandateId, "cancelled", plan, subscriptionRecord=sub, platformUrl=pUrl) return result @@ -554,7 +554,7 @@ class SubscriptionService: mandateId = sub["mandateId"] self.invalidateCache(mandateId) - plan = _getPlan(sub.get("planKey", "")) + plan = getPlan(sub.get("planKey", "")) _notifySubscriptionChange(mandateId, "force_cancelled", plan, subscriptionRecord=sub, platformUrl=pUrl) return result @@ -573,8 +573,8 @@ class SubscriptionService: ) self.invalidateCache(sub["mandateId"]) - plan = _getPlan(sub.get("planKey", "")) - successorPlan = _getPlan(plan.successorPlanKey) if plan and plan.successorPlanKey else None + plan = getPlan(sub.get("planKey", "")) + successorPlan = getPlan(plan.successorPlanKey) if plan and plan.successorPlanKey else None _notifySubscriptionChange(sub["mandateId"], "trial_expired", successorPlan) logger.info("Trial expired for subscription %s", subscriptionId) @@ -690,7 +690,7 @@ def _buildInvoiceSummaryHtml( ) -> str: """Build an HTML invoice summary block for inclusion in the activation email.""" import html as htmlmod - from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface + from modules.interfaces.interfaceDbSubscription import getRootInterface as getSubRootInterface subInterface = getSubRootInterface() userCount = subInterface.countActiveUsers(mandateId) diff --git a/modules/shared/aiAuditLogger.py b/modules/shared/aiAuditLogger.py index fbcc6045..04255ce1 100644 --- a/modules/shared/aiAuditLogger.py +++ b/modules/shared/aiAuditLogger.py @@ -31,7 +31,7 @@ class AiAuditLogger: if self._initialized: return try: - from modules.connectors.connectorDbPostgre import _get_cached_connector + from modules.connectors.connectorDbPostgre import getCachedConnector from modules.shared.configuration import APP_CONFIG from modules.datamodels.datamodelAiAudit import AiAuditLogEntry @@ -40,7 +40,7 @@ class AiAuditLogger: dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) - self._db = _get_cached_connector( + self._db = getCachedConnector( dbHost=dbHost, dbDatabase="poweron_app", dbUser=dbUser, diff --git a/modules/shared/attributeUtils.py b/modules/shared/attributeUtils.py index d6228854..f7e432bc 100644 --- a/modules/shared/attributeUtils.py +++ b/modules/shared/attributeUtils.py @@ -33,9 +33,9 @@ class AttributeDefinition(BaseModel): visible: bool = True order: int = 0 placeholder: Optional[str] = None - fkSource: Optional[str] = None - fkDisplayField: Optional[str] = None - fkModel: Optional[str] = None # DB table / Pydantic model name for server-side FK sort (JOIN) + # Backend adds ``{name}Label`` on rows; FormGeneratorTable reads ``displayField`` (e.g. ``userId`` → ``userIdLabel``). + displayField: Optional[str] = None + fkModel: Optional[str] = None # Pydantic / resolver name (Mandate, User, …) for server-side FK sort + label enrichment # ------------------------------------------------------------------ # Render hints for the frontend FormGenerator / Tables. # ``frontendFormat`` is an Excel-style format string the FE applies to numeric, @@ -104,6 +104,16 @@ def _mergedAttributeLabels(modelClass: Type[BaseModel]) -> Dict[str, str]: return merged +def _mergedFieldJsonExtra(field) -> Dict[str, Any]: + """Merge Pydantic FieldInfo.extra and json_schema_extra (subclass fields override).""" + merged: Dict[str, Any] = {} + if hasattr(field, "extra") and isinstance(field.extra, dict): + merged.update(field.extra) + if hasattr(field, "json_schema_extra") and isinstance(field.json_schema_extra, dict): + merged.update(field.json_schema_extra) + return merged + + def getModelLabel(modelName: str) -> str: """Get the label for a model via resolveText().""" modelData = _getModelLabelEntry(modelName) @@ -145,9 +155,6 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag frontend_required = field.is_required() frontend_options = None frontend_visible = True # Default visible - frontend_fk_source = None # FK dropdown source (e.g., "/api/users/") - frontend_fk_display_field = None # Which field of the FK target to display (e.g., "username", "name") - fk_model = None # Same as fk_model in json_schema_extra — backend JOIN target table name # Render hints (cf. AttributeDefinition.frontendFormat / frontendFormatLabels). # Optional Excel-like format string ("R:#'###.00") plus translatable label tokens # for boolean/categorical render (e.g. ["Ja","-","Nein"] resolved via @i18nModel). @@ -203,14 +210,6 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag # Extract frontend_visible (default True, can be set to False to hide field) if "frontend_visible" in json_extra: frontend_visible = json_extra.get("frontend_visible", True) - # Extract frontend_fk_source for FK dropdown references - if "frontend_fk_source" in json_extra: - frontend_fk_source = json_extra.get("frontend_fk_source") - # Extract frontend_fk_display_field - which field of FK target to display - if "frontend_fk_display_field" in json_extra: - frontend_fk_display_field = json_extra.get("frontend_fk_display_field") - if "fk_model" in json_extra: - fk_model = json_extra.get("fk_model") if frontend_format is None and "frontend_format" in json_extra: frontend_format = json_extra.get("frontend_format") if frontend_format_labels is None and "frontend_format_labels" in json_extra: @@ -273,7 +272,6 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag pass # Hide "id" fields by default unless explicitly set to visible - # Also hide fields ending with "Id" that are FK references (unless they have fkSource) if name == "id": frontend_visible = False # Never show primary key in forms/tables @@ -291,15 +289,17 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag "options": _resolveOptionLabels(frontend_options), "default": field_default, } - - # Add FK source for dropdown rendering if specified - if frontend_fk_source: - attr_def["fkSource"] = frontend_fk_source - # Also add display field if specified (which field of FK target to show) - if frontend_fk_display_field: - attr_def["fkDisplayField"] = frontend_fk_display_field - if fk_model: - attr_def["fkModel"] = fk_model + + mergedExtra = _mergedFieldJsonExtra(field) + fkModelName = mergedExtra.get("fk_model") + fkTarget = mergedExtra.get("fk_target") + if not fkModelName and isinstance(fkTarget, dict) and fkTarget.get("table"): + fkModelName = fkTarget.get("table") + hasFk = bool(fkModelName) or (isinstance(fkTarget, dict) and bool(fkTarget.get("table"))) + if hasFk: + attr_def["displayField"] = f"{name}Label" + if fkModelName: + attr_def["fkModel"] = fkModelName # Render hints (Excel-like format string + i18n-resolved label tokens). # Labels are resolved server-side via resolveText() so the FE renders them diff --git a/modules/shared/dbRegistry.py b/modules/shared/dbRegistry.py index 057e27f8..4626a100 100644 --- a/modules/shared/dbRegistry.py +++ b/modules/shared/dbRegistry.py @@ -39,7 +39,7 @@ def registerDatabase(dbName: str, configPrefix: str = "DB") -> None: logger.debug(f"Database registered: {dbName} (configPrefix={configPrefix})") -def _getRegisteredDatabases() -> Dict[str, str]: +def getRegisteredDatabases() -> Dict[str, str]: """Return snapshot of all registered databases {dbName: configPrefix}.""" with _lock: return dict(_registry) diff --git a/modules/shared/debugLogger.py b/modules/shared/debugLogger.py index 3c5e4135..d1b22abc 100644 --- a/modules/shared/debugLogger.py +++ b/modules/shared/debugLogger.py @@ -19,7 +19,7 @@ def _resolveLogDir() -> str: logDir = os.path.join(gatewayDir, logDir) return logDir -def _ensureDir(path: str) -> None: +def ensureDir(path: str) -> None: """Create directory if it does not exist.""" os.makedirs(path, exist_ok=True) @@ -27,7 +27,7 @@ def _isDebugEnabled() -> bool: """Check if debug workflow logging is enabled.""" return APP_CONFIG.get("APP_DEBUG_CHAT_WORKFLOW_ENABLED", False) -def _getBaseDebugDir() -> str: +def getBaseDebugDir() -> str: """Get the base debug directory path from configuration.""" # Check if custom debug directory is configured customDebugDir = APP_CONFIG.get("APP_DEBUG_CHAT_WORKFLOW_DIR", None) @@ -47,7 +47,7 @@ def _getBaseDebugDir() -> str: def _getDebugDir() -> str: """Get the debug prompts directory path from configuration.""" - baseDebugDir = _getBaseDebugDir() + baseDebugDir = getBaseDebugDir() return os.path.join(baseDebugDir, 'prompts') def _getNextSequenceNumber() -> int: @@ -79,7 +79,7 @@ def writeDebugFile(content: str, fileType: str, documents: Optional[List] = None return debugDir = _getDebugDir() - _ensureDir(debugDir) + ensureDir(debugDir) seqNum = _getNextSequenceNumber() ts = datetime.now(UTC).strftime('%Y%m%d-%H%M%S') @@ -128,8 +128,8 @@ def debugLogToFile(message: str, context: str = "DEBUG") -> None: return # Get debug directory (use base debug dir, not prompts subdirectory) - debug_dir = _getBaseDebugDir() - _ensureDir(debug_dir) + debug_dir = getBaseDebugDir() + ensureDir(debug_dir) # Create debug file path debug_file = os.path.join(debug_dir, "debug_workflow.log") diff --git a/modules/shared/fkRegistry.py b/modules/shared/fkRegistry.py index ccf68666..9772b8a6 100644 --- a/modules/shared/fkRegistry.py +++ b/modules/shared/fkRegistry.py @@ -14,8 +14,8 @@ for the *target* side. By collecting all such declarations we know which DB each table lives in — no extra registration step needed. Usage: - from modules.shared.fkRegistry import _getFkRelationships - rels = _getFkRelationships() + from modules.shared.fkRegistry import getFkRelationships + rels = getFkRelationships() """ import importlib @@ -25,7 +25,7 @@ import threading from dataclasses import dataclass from typing import Dict, List, Optional -from modules.datamodels.datamodelBase import _MODEL_REGISTRY +from modules.datamodels.datamodelBase import MODEL_REGISTRY logger = logging.getLogger(__name__) @@ -33,7 +33,7 @@ _modelsLoaded = False def _ensureModelsLoaded() -> None: - """Import all datamodel modules so that __init_subclass__ fills _MODEL_REGISTRY. + """Import all datamodel modules so that __init_subclass__ fills MODEL_REGISTRY. In a running server the interfaces import the datamodels automatically. This function makes FK-Discovery work in standalone / test contexts too. @@ -96,7 +96,7 @@ def _buildTableToDbMap() -> Dict[str, str]: _ensureModelsLoaded() mapping: Dict[str, str] = {} - for modelCls in _MODEL_REGISTRY.values(): + for modelCls in MODEL_REGISTRY.values(): for fieldInfo in modelCls.model_fields.values(): extra = fieldInfo.json_schema_extra if not isinstance(extra, dict): @@ -109,11 +109,11 @@ def _buildTableToDbMap() -> Dict[str, str]: if table and db: mapping[table] = db - unmapped = [name for name in _MODEL_REGISTRY if name not in mapping] + unmapped = [name for name in MODEL_REGISTRY if name not in mapping] if unmapped: try: - from modules.shared.dbRegistry import _getRegisteredDatabases - _resolveUnmappedTablesFromCatalog(mapping, unmapped, _getRegisteredDatabases()) + from modules.shared.dbRegistry import getRegisteredDatabases + _resolveUnmappedTablesFromCatalog(mapping, unmapped, getRegisteredDatabases()) except Exception as e: logger.warning(f"Could not resolve unmapped tables from catalog: {e}") @@ -175,7 +175,7 @@ def _discoverFkRelationships() -> List[FkRelationship]: tableToDb = _buildTableToDbMap() relationships: List[FkRelationship] = [] - for tableName, modelCls in _MODEL_REGISTRY.items(): + for tableName, modelCls in MODEL_REGISTRY.items(): sourceDb = tableToDb.get(tableName) if sourceDb is None: continue @@ -211,7 +211,7 @@ def _discoverFkRelationships() -> List[FkRelationship]: return relationships -def _getFkRelationships() -> List[FkRelationship]: +def getFkRelationships() -> List[FkRelationship]: """Return the cached list of FK relationships (discovered on first call).""" global _cachedRelationships with _lock: diff --git a/modules/shared/i18nRegistry.py b/modules/shared/i18nRegistry.py index d6bbd1e2..f32315d6 100644 --- a/modules/shared/i18nRegistry.py +++ b/modules/shared/i18nRegistry.py @@ -87,7 +87,7 @@ class _I18nRegistryEntry: _REGISTRY: Dict[str, _I18nRegistryEntry] = {} # --------------------------------------------------------------------------- -# Translation cache (populated at boot by _loadCache) +# Translation cache (populated at boot by loadCache) # --------------------------------------------------------------------------- _CACHE: Dict[str, Dict[str, str]] = {} @@ -245,7 +245,7 @@ def _extractDocstringFirstLine(cls: type) -> str: # Language setter (called by middleware) # --------------------------------------------------------------------------- -def _setLanguage(lang: str): +def setLanguage(lang: str): """Set the language for the current request context.""" _CURRENT_LANGUAGE.set(lang) @@ -558,13 +558,13 @@ def _registerAccountingConnectorLabels(): """ added = 0 try: - from modules.features.trustee.accounting.accountingRegistry import _getAccountingRegistry + from modules.features.trustee.accounting.accountingRegistry import getAccountingRegistry except ImportError: logger.debug("i18n accounting connectors: registry not importable") return try: - registry = _getAccountingRegistry() + registry = getAccountingRegistry() except Exception as e: logger.warning("i18n accounting connectors: registry init failed: %s", e) return @@ -650,7 +650,7 @@ def _registerDatamodelOptionLabels(): # Boot: sync registry to DB # --------------------------------------------------------------------------- -async def _syncRegistryToDb(): +async def syncRegistryToDb(): """Boot hook: write all registered keys into UiLanguageSet(xx). 1. Scans route files for routeApiMsg("…") to eagerly register api.* keys. @@ -674,10 +674,10 @@ async def _syncRegistryToDb(): from modules.datamodels.datamodelUiLanguage import UiLanguageSet from modules.shared.configuration import APP_CONFIG - from modules.connectors.connectorDbPostgre import _get_cached_connector + from modules.connectors.connectorDbPostgre import getCachedConnector from modules.shared.timeUtils import getUtcTimestamp - db = _get_cached_connector( + db = getCachedConnector( dbHost=APP_CONFIG.get("DB_HOST", "localhost"), dbDatabase="poweron_management", dbUser=APP_CONFIG.get("DB_USER"), @@ -764,7 +764,7 @@ async def _syncRegistryToDb(): # Boot: load translation cache # --------------------------------------------------------------------------- -async def _loadCache(): +async def loadCache(): """Boot hook: load all UiLanguageSets into the in-memory cache. Also persistently repairs placeholder mismatches in the DB: @@ -778,9 +778,9 @@ async def _loadCache(): """ from modules.datamodels.datamodelUiLanguage import UiLanguageSet from modules.shared.configuration import APP_CONFIG - from modules.connectors.connectorDbPostgre import _get_cached_connector + from modules.connectors.connectorDbPostgre import getCachedConnector - db = _get_cached_connector( + db = getCachedConnector( dbHost=APP_CONFIG.get("DB_HOST", "localhost"), dbDatabase="poweron_management", dbUser=APP_CONFIG.get("DB_USER"), diff --git a/modules/shared/notifyMandateAdmins.py b/modules/shared/notifyMandateAdmins.py index 6bef921d..6ac6fa53 100644 --- a/modules/shared/notifyMandateAdmins.py +++ b/modules/shared/notifyMandateAdmins.py @@ -109,7 +109,7 @@ def _resolveAllRecipients(mandateId: str) -> List[str]: # ============================================================================ -def _resolveMandateName(mandateId: str) -> str: +def resolveMandateName(mandateId: str) -> str: """Return the human-readable mandate name (label or name), falling back to a short ID.""" try: from modules.datamodels.datamodelUam import Mandate @@ -141,7 +141,7 @@ def _getOperatorInfo() -> Dict[str, str]: return {"companyName": "", "address": "", "vatNumber": ""} -def _renderHtmlEmail( +def renderHtmlEmail( headline: str, bodyParagraphs: List[str], mandateName: str, @@ -256,8 +256,8 @@ def notifyMandateAdmins( ) return 0 - mandateName = _resolveMandateName(mandateId) - htmlMessage = _renderHtmlEmail(headline, bodyParagraphs, mandateName, footerNote, rawHtmlBlock) + mandateName = resolveMandateName(mandateId) + htmlMessage = renderHtmlEmail(headline, bodyParagraphs, mandateName, footerNote, rawHtmlBlock) messaging = getMessagingInterface() successCount = 0 diff --git a/modules/shared/timeUtils.py b/modules/shared/timeUtils.py index 79dcd762..0c7b04f1 100644 --- a/modules/shared/timeUtils.py +++ b/modules/shared/timeUtils.py @@ -36,7 +36,7 @@ _DEFAULT_REQUEST_TZ = "UTC" _CURRENT_TIMEZONE: ContextVar[str] = ContextVar("user_tz", default=_DEFAULT_REQUEST_TZ) -def _setRequestTimezone(tzName: str) -> None: +def setRequestTimezone(tzName: str) -> None: """Set the current request's user timezone (called by gateway middleware). Validates against zoneinfo; falls back to UTC for unknown/invalid names so diff --git a/modules/system/databaseHealth.py b/modules/system/databaseHealth.py index 7a968f7f..8a902e5f 100644 --- a/modules/system/databaseHealth.py +++ b/modules/system/databaseHealth.py @@ -16,8 +16,8 @@ import psycopg2 import psycopg2.extras from modules.shared.configuration import APP_CONFIG -from modules.shared.dbRegistry import _getRegisteredDatabases -from modules.shared.fkRegistry import _getFkRelationships, FkRelationship +from modules.shared.dbRegistry import getRegisteredDatabases +from modules.shared.fkRegistry import getFkRelationships, FkRelationship logger = logging.getLogger(__name__) @@ -94,7 +94,7 @@ class OrphanCleanupRefused(Exception): def _getConnection(dbName: str): """Open a psycopg2 connection to the given registered database.""" - registeredDbs = _getRegisteredDatabases() + registeredDbs = getRegisteredDatabases() configPrefix = registeredDbs.get(dbName) if configPrefix is None: raise ValueError(f"Database '{dbName}' is not registered.") @@ -126,7 +126,7 @@ def _getTableStats(dbFilter: Optional[str] = None) -> List[dict]: Returns a list of TableStats dicts, optionally filtered by database name. """ - registeredDbs = _getRegisteredDatabases() + registeredDbs = getRegisteredDatabases() if dbFilter: registeredDbs = {k: v for k, v in registeredDbs.items() if k == dbFilter} @@ -297,7 +297,7 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]: return [r for r in cached if r["sourceDb"] == dbFilter] return list(cached) - relationships = _getFkRelationships() + relationships = getFkRelationships() if dbFilter: relationships = [r for r in relationships if r.sourceDb == dbFilter] @@ -450,7 +450,7 @@ def _cleanOrphans(db: str, table: str, column: str, force: bool = False) -> int: These guards prevent catastrophic wipes (e.g. emptying FeatureInstance because the User table happened to be empty in the wrong DB at scan time). """ - relationships = _getFkRelationships() + relationships = getFkRelationships() rel = next( (r for r in relationships if r.sourceDb == db and r.sourceTable == table and r.sourceColumn == column), @@ -643,7 +643,7 @@ def _listOrphans( the SysAdmin UI can present them as a download (CSV/JSON) for review before the destructive cleanup is triggered. """ - relationships = _getFkRelationships() + relationships = getFkRelationships() rel = next( (r for r in relationships if r.sourceDb == db and r.sourceTable == table and r.sourceColumn == column), diff --git a/modules/system/mainSystem.py b/modules/system/mainSystem.py index 53405683..b7e45006 100644 --- a/modules/system/mainSystem.py +++ b/modules/system/mainSystem.py @@ -359,7 +359,7 @@ NAVIGATION_SECTIONS = [ ] -def _objectKeyToUiComponent(objectKey: str) -> str: +def objectKeyToUiComponent(objectKey: str) -> str: """ Convert objectKey to uiComponent. diff --git a/modules/workflows/automation2/executionEngine.py b/modules/workflows/automation2/executionEngine.py index b5b5b754..1d0ca5c8 100644 --- a/modules/workflows/automation2/executionEngine.py +++ b/modules/workflows/automation2/executionEngine.py @@ -26,7 +26,7 @@ from modules.workflows.automation2.executors import ( PauseForHumanTaskError, PauseForEmailWaitError, ) -from modules.features.graphicalEditor.portTypes import _normalizeToSchema +from modules.features.graphicalEditor.portTypes import normalizeToSchema from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionInactiveException as _SubscriptionInactiveException from modules.serviceCenter.services.serviceBilling.mainServiceBilling import BillingContextError as _BillingContextError @@ -102,11 +102,11 @@ def _allMergePredecessorsReady( def _normalizeResult(result: Any, nodeType: str) -> Any: - """Apply _normalizeToSchema if the node has a declared output schema.""" + """Apply normalizeToSchema if the node has a declared output schema.""" schema = _outputSchemaForNode(nodeType) if schema and schema != "Transit" and isinstance(result, dict): try: - return _normalizeToSchema(result, schema) + return normalizeToSchema(result, schema) except Exception as e: logger.warning(f"_normalizeResult failed for nodeType={nodeType}, schema={schema}: {e}") return result @@ -375,7 +375,7 @@ async def executeGraph( schema = _outputSchemaForNode(resumedType) if schema and schema != "Transit": try: - initialNodeOutputs[startAfterNodeId] = _normalizeToSchema(resumedOutput, schema) + initialNodeOutputs[startAfterNodeId] = normalizeToSchema(resumedOutput, schema) except Exception as valErr: logger.warning("executeGraph resume: schema validation failed for %s: %s", startAfterNodeId, valErr) if not runId and automation2_interface and workflowId and not is_resume: @@ -818,8 +818,8 @@ async def executeGraph( ) if _wfObj else {} _shouldNotify = _wfDict.get("notifyOnFailure", True) if _wfDict else True if _shouldNotify: - from modules.workflows.scheduler.mainScheduler import _notifyRunFailed - _notifyRunFailed( + from modules.workflows.scheduler.mainScheduler import notifyRunFailed + notifyRunFailed( workflowId or "", runId or "", str(e), mandateId=mandateId, workflowLabel=_wfDict.get("label"), diff --git a/modules/workflows/automation2/executors/actionNodeExecutor.py b/modules/workflows/automation2/executors/actionNodeExecutor.py index d9fc99a7..6162aa2d 100644 --- a/modules/workflows/automation2/executors/actionNodeExecutor.py +++ b/modules/workflows/automation2/executors/actionNodeExecutor.py @@ -11,7 +11,7 @@ from typing import Any, Dict, Optional from modules.features.graphicalEditor.portTypes import ( _normalizeError, - _normalizeToSchema, + normalizeToSchema, ) from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionInactiveException as _SubscriptionInactiveException from modules.serviceCenter.services.serviceBilling.mainServiceBilling import BillingContextError as _BillingContextError @@ -407,7 +407,7 @@ class ActionNodeExecutor: "count": int(data_dict.get("count", 0)), } _attachConnectionProvenance(cr_out, resolvedParams, outputSchema, chatService, self.services) - return _normalizeToSchema(cr_out, outputSchema) + return normalizeToSchema(cr_out, outputSchema) _attachConnectionProvenance(out, resolvedParams, outputSchema, chatService, self.services) - return _normalizeToSchema(out, outputSchema) + return normalizeToSchema(out, outputSchema) diff --git a/modules/workflows/automation2/executors/dataExecutor.py b/modules/workflows/automation2/executors/dataExecutor.py index 5a33f9e2..ef205590 100644 --- a/modules/workflows/automation2/executors/dataExecutor.py +++ b/modules/workflows/automation2/executors/dataExecutor.py @@ -4,7 +4,7 @@ import logging from typing import Any, Dict -from modules.features.graphicalEditor.portTypes import _unwrapTransit, _wrapTransit +from modules.features.graphicalEditor.portTypes import unwrapTransit, wrapTransit logger = logging.getLogger(__name__) @@ -52,7 +52,7 @@ class DataExecutor: if inp is None: return {"items": [], "count": 0, "_success": True} - data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp + data = unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp if mode == "collect": items = [data] if data is not None else [] @@ -77,7 +77,7 @@ class DataExecutor: ) -> Any: """Filter items by condition expression and/or UDM content type. Returns Transit envelope.""" inp = self._getInput(inputSources, nodeOutputs) - data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp + data = unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp params = node.get("parameters") or {} condition = params.get("condition", "") udmContentType = params.get("udmContentType", "") @@ -102,7 +102,7 @@ class DataExecutor: elif isinstance(data, list): filteredData = filtered - return _wrapTransit(filteredData, { + return wrapTransit(filteredData, { "originalCount": originalCount, "filteredCount": len(filtered), }) @@ -116,7 +116,7 @@ class DataExecutor: ) -> Any: """Deterministic consolidation: table, concat, merge, csvJoin.""" inp = self._getInput(inputSources, nodeOutputs) - data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp + data = unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp params = node.get("parameters") or {} mode = params.get("mode", "table") separator = params.get("separator", "\n") diff --git a/modules/workflows/automation2/executors/flowExecutor.py b/modules/workflows/automation2/executors/flowExecutor.py index de19d9a7..511be6ff 100644 --- a/modules/workflows/automation2/executors/flowExecutor.py +++ b/modules/workflows/automation2/executors/flowExecutor.py @@ -4,7 +4,7 @@ import logging from typing import Any, Dict -from modules.features.graphicalEditor.portTypes import _wrapTransit, _unwrapTransit +from modules.features.graphicalEditor.portTypes import wrapTransit, unwrapTransit logger = logging.getLogger(__name__) @@ -68,8 +68,8 @@ class FlowExecutor: condParam = (node.get("parameters") or {}).get("condition") inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs) ok = self._evalConditionParam(condParam, nodeOutputs) - return _wrapTransit( - _unwrapTransit(inp) if inp else inp, + return wrapTransit( + unwrapTransit(inp) if inp else inp, {"branch": 0 if ok else 1, "conditionResult": ok}, ) @@ -215,12 +215,12 @@ class FlowExecutor: inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs) for i, c in enumerate(cases): if self._evalSwitchCase(value, c): - return _wrapTransit( - _unwrapTransit(inp) if inp else inp, + return wrapTransit( + unwrapTransit(inp) if inp else inp, {"match": i, "value": value}, ) - return _wrapTransit( - _unwrapTransit(inp) if inp else inp, + return wrapTransit( + unwrapTransit(inp) if inp else inp, {"match": -1, "value": value}, ) @@ -339,7 +339,7 @@ class FlowExecutor: for portIdx, (srcId, srcOut) in inputSources.items(): out = nodeOutputs.get(srcId) if out is not None: - inputs[portIdx] = _unwrapTransit(out) + inputs[portIdx] = unwrapTransit(out) first = None merged: Dict = {} diff --git a/modules/workflows/automation2/graphUtils.py b/modules/workflows/automation2/graphUtils.py index 1f01a57d..1d2aeb13 100644 --- a/modules/workflows/automation2/graphUtils.py +++ b/modules/workflows/automation2/graphUtils.py @@ -131,9 +131,9 @@ def parse_graph_defined_schema(node: Dict[str, Any], parameter_key: str) -> Opti Build a JSON-serializable port schema dict from graph parameters (e.g. form ``fields``). Used by tooling and future API surfaces; mirrors ``parse_graph_defined_output_schema`` logic. """ - from modules.features.graphicalEditor.portTypes import _derive_form_payload_schema_from_param + from modules.features.graphicalEditor.portTypes import deriveFormPayloadSchemaFromParam - sch = _derive_form_payload_schema_from_param(node, parameter_key) + sch = deriveFormPayloadSchemaFromParam(node, parameter_key) if sch is None: return None return { @@ -371,8 +371,8 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any: return resolveParameterReferences(inner, nodeOutputs) if value.get("type") == "system": variable = value.get("variable", "") - from modules.features.graphicalEditor.portTypes import _resolveSystemVariable - return _resolveSystemVariable(variable, nodeOutputs.get("_context", {})) + from modules.features.graphicalEditor.portTypes import resolveSystemVariable + return resolveSystemVariable(variable, nodeOutputs.get("_context", {})) return {k: resolveParameterReferences(v, nodeOutputs) for k, v in value.items()} if isinstance(value, str): diff --git a/modules/workflows/scheduler/mainScheduler.py b/modules/workflows/scheduler/mainScheduler.py index 651a968d..bf2cd0fd 100644 --- a/modules/workflows/scheduler/mainScheduler.py +++ b/modules/workflows/scheduler/mainScheduler.py @@ -332,7 +332,7 @@ def _cronToIntervalSeconds(cron: str): return None -def _notifyRunFailed(workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None) -> None: +def notifyRunFailed(workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None) -> None: """Notify on workflow run failure: emit event, create in-app notification, trigger email subscription.""" try: eventManager.emit("graphicalEditor.run.failed", { diff --git a/tests/demo/conftest.py b/tests/demo/conftest.py index 79bf452b..43d8363e 100644 --- a/tests/demo/conftest.py +++ b/tests/demo/conftest.py @@ -21,8 +21,8 @@ def db(): @pytest.fixture(scope="session") def demoConfig(): """The investor demo config instance.""" - from modules.demoConfigs import _getDemoConfigByCode - cfg = _getDemoConfigByCode("investor-demo-2026") + from modules.demoConfigs import getDemoConfigByCode + cfg = getDemoConfigByCode("investor-demo-2026") assert cfg is not None, "Demo config 'investor-demo-2026' not found — check modules/demoConfigs/" return cfg diff --git a/tests/demo/test_demo_api.py b/tests/demo/test_demo_api.py index edb31086..1973d110 100644 --- a/tests/demo/test_demo_api.py +++ b/tests/demo/test_demo_api.py @@ -15,25 +15,25 @@ class TestDemoConfigDiscovery: """Test the auto-discovery module (no HTTP needed).""" def test_discoveryFindsInvestorConfig(self): - from modules.demoConfigs import _getAvailableDemoConfigs - configs = _getAvailableDemoConfigs() + from modules.demoConfigs import getAvailableDemoConfigs + configs = getAvailableDemoConfigs() assert "investor-demo-2026" in configs, f"Available configs: {list(configs.keys())}" def test_getByCodeReturnsInstance(self): - from modules.demoConfigs import _getDemoConfigByCode - cfg = _getDemoConfigByCode("investor-demo-2026") + from modules.demoConfigs import getDemoConfigByCode + cfg = getDemoConfigByCode("investor-demo-2026") assert cfg is not None assert cfg.code == "investor-demo-2026" assert cfg.label == "Investor Demo April 2026" def test_getByCodeReturnsNoneForUnknown(self): - from modules.demoConfigs import _getDemoConfigByCode - cfg = _getDemoConfigByCode("nonexistent-config") + from modules.demoConfigs import getDemoConfigByCode + cfg = getDemoConfigByCode("nonexistent-config") assert cfg is None def test_toDictHasRequiredFields(self): - from modules.demoConfigs import _getDemoConfigByCode - cfg = _getDemoConfigByCode("investor-demo-2026") + from modules.demoConfigs import getDemoConfigByCode + cfg = getDemoConfigByCode("investor-demo-2026") d = cfg.toDict() assert "code" in d assert "label" in d diff --git a/tests/demo/test_pwg_demo_bootstrap.py b/tests/demo/test_pwg_demo_bootstrap.py index 0613cafa..94c890e4 100644 --- a/tests/demo/test_pwg_demo_bootstrap.py +++ b/tests/demo/test_pwg_demo_bootstrap.py @@ -39,8 +39,8 @@ pytestmark = [pytest.mark.expensive, pytest.mark.live] @pytest.fixture(scope="session") def pwgDemoConfig(): """Auto-discovered ``PwgDemo2026`` instance.""" - from modules.demoConfigs import _getDemoConfigByCode - cfg = _getDemoConfigByCode("pwg-demo-2026") + from modules.demoConfigs import getDemoConfigByCode + cfg = getDemoConfigByCode("pwg-demo-2026") assert cfg is not None, ( "Demo config 'pwg-demo-2026' not found — check modules/demoConfigs/pwgDemo2026.py" ) diff --git a/tests/test_service_redmine_stats_cache.py b/tests/test_service_redmine_stats_cache.py index 35a76390..47d98a9d 100644 --- a/tests/test_service_redmine_stats_cache.py +++ b/tests/test_service_redmine_stats_cache.py @@ -12,7 +12,7 @@ import time from modules.features.redmine.serviceRedmineStatsCache import ( RedmineStatsCache, - _getStatsCache, + getStatsCache, ) @@ -52,6 +52,6 @@ class TestRedmineStatsCache: assert c.get(c.buildKey("inst-b", None, None, "week", [])) == "v3" def test_singletonIsStable(self) -> None: - a = _getStatsCache() - b = _getStatsCache() + a = getStatsCache() + b = getStatsCache() assert a is b diff --git a/tests/unit/datamodels/test_udm_bridge.py b/tests/unit/datamodels/test_udm_bridge.py index 64cda51a..db52ffe6 100644 --- a/tests/unit/datamodels/test_udm_bridge.py +++ b/tests/unit/datamodels/test_udm_bridge.py @@ -1,7 +1,7 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. from modules.datamodels.datamodelExtraction import ContentExtracted, ContentPart -from modules.datamodels.datamodelUdm import _contentPartsToUdm, _udmToContentParts +from modules.datamodels.datamodelUdm import contentPartsToUdm, _udmToContentParts def test_bridge_pdf_like_pages(): @@ -36,7 +36,7 @@ def test_bridge_pdf_like_pages(): ), ] extracted = ContentExtracted(id="ext1", parts=parts) - udm = _contentPartsToUdm(extracted, "pdf", "a.pdf") + udm = contentPartsToUdm(extracted, "pdf", "a.pdf") assert udm.sourceType == "pdf" assert len(udm.children) == 2 assert all(n.role == "page" for n in udm.children) @@ -45,7 +45,7 @@ def test_bridge_pdf_like_pages(): def test_udm_to_parts_roundtrip_preserves_ids(): - udm = _contentPartsToUdm( + udm = contentPartsToUdm( ContentExtracted( id="e1", parts=[ diff --git a/tests/unit/features/test_trustee_template_workflows.py b/tests/unit/features/test_trustee_template_workflows.py new file mode 100644 index 00000000..388f2d29 --- /dev/null +++ b/tests/unit/features/test_trustee_template_workflows.py @@ -0,0 +1,59 @@ +# Copyright (c) 2025 Patrick Motsch +"""Guardrails for Trustee ``getTemplateWorkflows`` graphs (new instance bootstrap).""" +from __future__ import annotations + +import json + +from modules.features.trustee.mainTrustee import getTemplateWorkflows + + +def _receiptTemplateGraph(): + templates = getTemplateWorkflows() or [] + t = next((w for w in templates if w.get("id") == "trustee-receipt-import"), None) + assert t is not None, "template trustee-receipt-import must exist" + return t.get("graph") or {} + + +def _materializeInstance(graph: dict, instanceId: str) -> dict: + raw = json.dumps(graph) + raw = raw.replace("{{featureInstanceId}}", instanceId) + return json.loads(raw) + + +class TestTrusteeTemplateReceiptImport: + """The receipt-import chain must use explicit DataRefs (Pick-not-Push). + + Empty ``documentList: []`` is not auto-wired by ``materializeConnectionRefs`` + (that helper only materializes empty ``userConnection`` references). + """ + + def test_receiptImportWiresDocumentListRefs(self): + g = _receiptTemplateGraph() + inst = "00000000-0000-0000-0000-000000000001" + g = _materializeInstance(g, inst) + + byId = {n["id"]: n for n in g.get("nodes", []) if isinstance(n, dict) and n.get("id")} + + proc = byId.get("process") + sync = byId.get("sync") + assert proc and sync + + dlp = (proc.get("parameters") or {}).get("documentList") + dls = (sync.get("parameters") or {}).get("documentList") + + assert isinstance(dlp, dict) and dlp.get("type") == "ref" + assert dlp.get("nodeId") == "extract" + assert dlp.get("path") == ["documents"] + + assert isinstance(dls, dict) and dls.get("type") == "ref" + assert dls.get("nodeId") == "process" + assert dls.get("path") == ["documents"] + + def test_receiptImportFeatureInstanceIdSubstituted(self): + g = _receiptTemplateGraph() + inst = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + g = _materializeInstance(g, inst) + for n in g.get("nodes", []): + p = n.get("parameters") or {} + if "featureInstanceId" in p: + assert p["featureInstanceId"] == inst diff --git a/tests/unit/features/trustee/test_accountingConnectorAbacus_balances.py b/tests/unit/features/trustee/test_accountingConnectorAbacus_balances.py new file mode 100644 index 00000000..ad84e171 --- /dev/null +++ b/tests/unit/features/trustee/test_accountingConnectorAbacus_balances.py @@ -0,0 +1,94 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +"""Unit tests for the Abacus connector's getAccountBalances aggregation logic.""" + +from unittest.mock import patch + +import pytest + +from modules.features.trustee.accounting.connectors.accountingConnectorAbacus import ( + AccountingConnectorAbacus, + _isIncomeStatementAccount, +) + + +class TestIsIncomeStatementAccount: + @pytest.mark.parametrize("accno,expected", [ + ("1020", False), ("2010", False), ("3000", True), ("8500", True), + ]) + def test_classification(self, accno, expected): + assert _isIncomeStatementAccount(accno) == expected + + +class TestAbacusGetAccountBalances: + @pytest.mark.asyncio + async def test_aggregatesFromGeneralJournalEntries(self): + connector = AccountingConnectorAbacus() + + rawEntries = [ + { + "Id": "e1", "JournalDate": "2025-01-15T00:00:00", + "Lines": [ + {"AccountId": "1020", "DebitAmount": 1000.0, "CreditAmount": 0.0}, + {"AccountId": "6000", "DebitAmount": 0.0, "CreditAmount": 1000.0}, + ], + }, + { + "Id": "e2", "JournalDate": "2025-12-20T00:00:00", + "Lines": [ + {"AccountId": "1020", "DebitAmount": 500.0, "CreditAmount": 0.0}, + {"AccountId": "6000", "DebitAmount": 0.0, "CreditAmount": 500.0}, + ], + }, + ] + + async def _fakeAuth(self, config): + return {"Authorization": "Bearer X"} + + async def _fakeFetch(self, config, headers, dateTo): + return rawEntries + + with patch.object(AccountingConnectorAbacus, "_buildAuthHeaders", _fakeAuth), \ + patch.object(AccountingConnectorAbacus, "_fetchAllJournalEntries", _fakeFetch): + balances = await connector.getAccountBalances({"apiBaseUrl": "http://x", "clientName": "y"}, years=[2025]) + + byPeriod = {(b.accountNumber, b.periodYear, b.periodMonth): b for b in balances} + + assert byPeriod[("1020", 2025, 1)].closingBalance == 1000.0 + assert byPeriod[("1020", 2025, 12)].closingBalance == 1500.0 + assert byPeriod[("1020", 2025, 0)].closingBalance == 1500.0 + # 6000 is income statement (3xxx-9xxx) but credit-side -- closing = -1500 (net debit-credit) + assert byPeriod[("6000", 2025, 0)].closingBalance == -1500.0 + # ER account January opening must be 0 (no prior-year carry) + assert byPeriod[("6000", 2025, 1)].openingBalance == 0.0 + + @pytest.mark.asyncio + async def test_balanceSheetCarryOverAcrossYears(self): + connector = AccountingConnectorAbacus() + + rawEntries = [ + { + "Id": "e1", "JournalDate": "2024-06-30T00:00:00", + "Lines": [ + {"AccountId": "1020", "DebitAmount": 7000.0, "CreditAmount": 0.0}, + {"AccountId": "9999", "DebitAmount": 0.0, "CreditAmount": 7000.0}, + ], + }, + ] + + async def _fakeAuth(self, config): + return {"Authorization": "Bearer X"} + + async def _fakeFetch(self, config, headers, dateTo): + return rawEntries + + with patch.object(AccountingConnectorAbacus, "_buildAuthHeaders", _fakeAuth), \ + patch.object(AccountingConnectorAbacus, "_fetchAllJournalEntries", _fakeFetch): + balances = await connector.getAccountBalances({"apiBaseUrl": "http://x", "clientName": "y"}, years=[2025]) + + byPeriod = {(b.accountNumber, b.periodYear, b.periodMonth): b for b in balances} + + # 1020 (BS) opens 2025 with 7000 from prior year, no movements in 2025 -> closes at 7000 + assert byPeriod[("1020", 2025, 1)].openingBalance == 7000.0 + assert byPeriod[("1020", 2025, 0)].openingBalance == 7000.0 + assert byPeriod[("1020", 2025, 0)].closingBalance == 7000.0 diff --git a/tests/unit/features/trustee/test_accountingConnectorBexio_balances.py b/tests/unit/features/trustee/test_accountingConnectorBexio_balances.py new file mode 100644 index 00000000..945c7c95 --- /dev/null +++ b/tests/unit/features/trustee/test_accountingConnectorBexio_balances.py @@ -0,0 +1,114 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +"""Unit tests for the Bexio connector's getAccountBalances aggregation logic.""" + +from unittest.mock import patch + +import pytest + +from modules.features.trustee.accounting.connectors.accountingConnectorBexio import ( + AccountingConnectorBexio, + _isIncomeStatementAccount, +) + + +class TestIsIncomeStatementAccount: + @pytest.mark.parametrize("accno,expected", [ + ("1020", False), ("2010", False), ("3000", True), ("9999", True), ("", False), + ]) + def test_classification(self, accno, expected): + assert _isIncomeStatementAccount(accno) == expected + + +class TestBexioGetAccountBalances: + @pytest.mark.asyncio + async def test_aggregatesBalanceSheetAccount_cumulativeAcrossMonths(self): + connector = AccountingConnectorBexio() + + accounts = [{"id": 100, "account_no": "1020"}, {"id": 200, "account_no": "6000"}] + + # Simulate a clean year for account 1020 (BS): +1000 in Jan, -300 in Feb, +500 in Dec + rawJournal = [ + {"date": "2025-01-15", "amount": 1000.0, "debit_account_id": 100, "credit_account_id": 200}, + {"date": "2025-02-10", "amount": 300.0, "debit_account_id": 200, "credit_account_id": 100}, + {"date": "2025-12-20", "amount": 500.0, "debit_account_id": 100, "credit_account_id": 200}, + ] + + async def _fakeAccounts(self, config): + return accounts + + async def _fakeJournal(self, config, dateTo): + return rawJournal + + with patch.object(AccountingConnectorBexio, "_loadRawAccounts", _fakeAccounts), \ + patch.object(AccountingConnectorBexio, "_fetchAllJournalRows", _fakeJournal): + balances = await connector.getAccountBalances({"accessToken": "x", "apiBaseUrl": "http://x"}, years=[2025]) + + byPeriod = {(b.accountNumber, b.periodMonth): b for b in balances if b.periodYear == 2025} + + # Account 1020 (BS) cumulative: Jan +1000, Feb +1000-300=700, Dec +700+500=1200 + assert byPeriod[("1020", 1)].closingBalance == 1000.0 + assert byPeriod[("1020", 2)].closingBalance == 700.0 + assert byPeriod[("1020", 11)].closingBalance == 700.0 + assert byPeriod[("1020", 12)].closingBalance == 1200.0 + assert byPeriod[("1020", 0)].closingBalance == 1200.0 # annual + assert byPeriod[("1020", 0)].openingBalance == 0.0 + assert byPeriod[("1020", 1)].openingBalance == 0.0 + assert byPeriod[("1020", 2)].openingBalance == 1000.0 # = previous month's closing + + @pytest.mark.asyncio + async def test_balanceSheetAccount_carriesPriorYearOpening(self): + connector = AccountingConnectorBexio() + accounts = [{"id": 100, "account_no": "1020"}, {"id": 200, "account_no": "6000"}] + + rawJournal = [ + {"date": "2024-06-01", "amount": 5000.0, "debit_account_id": 100, "credit_account_id": 200}, + {"date": "2025-03-15", "amount": 1000.0, "debit_account_id": 100, "credit_account_id": 200}, + ] + + async def _fakeAccounts(self, config): + return accounts + + async def _fakeJournal(self, config, dateTo): + return rawJournal + + with patch.object(AccountingConnectorBexio, "_loadRawAccounts", _fakeAccounts), \ + patch.object(AccountingConnectorBexio, "_fetchAllJournalRows", _fakeJournal): + balances = await connector.getAccountBalances({"accessToken": "x", "apiBaseUrl": "http://x"}, years=[2025]) + + byPeriod = {(b.accountNumber, b.periodMonth): b for b in balances if b.periodYear == 2025} + + # 2025 opening for 1020 = 5000 (carried over from 2024) + assert byPeriod[("1020", 1)].openingBalance == 5000.0 + assert byPeriod[("1020", 0)].openingBalance == 5000.0 + assert byPeriod[("1020", 12)].closingBalance == 6000.0 + assert byPeriod[("1020", 0)].closingBalance == 6000.0 + + @pytest.mark.asyncio + async def test_incomeStatementAccount_resetsToZeroEachYear(self): + connector = AccountingConnectorBexio() + accounts = [{"id": 200, "account_no": "6000"}, {"id": 300, "account_no": "1020"}] + + rawJournal = [ + {"date": "2024-12-31", "amount": 99999.99, "debit_account_id": 200, "credit_account_id": 300}, + {"date": "2025-06-15", "amount": 250.0, "debit_account_id": 200, "credit_account_id": 300}, + ] + + async def _fakeAccounts(self, config): + return accounts + + async def _fakeJournal(self, config, dateTo): + return rawJournal + + with patch.object(AccountingConnectorBexio, "_loadRawAccounts", _fakeAccounts), \ + patch.object(AccountingConnectorBexio, "_fetchAllJournalRows", _fakeJournal): + balances = await connector.getAccountBalances({"accessToken": "x", "apiBaseUrl": "http://x"}, years=[2025]) + + byPeriod = {(b.accountNumber, b.periodMonth): b for b in balances if b.periodYear == 2025} + + # ER account 6000: prior year had 99999.99 movement; 2025 opening MUST be 0 + assert byPeriod[("6000", 1)].openingBalance == 0.0 + assert byPeriod[("6000", 0)].openingBalance == 0.0 + assert byPeriod[("6000", 6)].closingBalance == 250.0 + assert byPeriod[("6000", 12)].closingBalance == 250.0 + assert byPeriod[("6000", 0)].closingBalance == 250.0 diff --git a/tests/unit/features/trustee/test_accountingConnectorRma_balances.py b/tests/unit/features/trustee/test_accountingConnectorRma_balances.py new file mode 100644 index 00000000..b6e43717 --- /dev/null +++ b/tests/unit/features/trustee/test_accountingConnectorRma_balances.py @@ -0,0 +1,156 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +"""Unit tests for the RMA connector's getAccountBalances implementation. + +Mocks the `_fetchSaldoRows` low-level call so we exercise the orchestration +logic (period iteration, ER/BS handling, opening/closing carry-over) without +hitting the real RMA HTTP API. +""" + +import json +from typing import Dict +from unittest.mock import patch + +import pytest + +from modules.features.trustee.accounting.connectors.accountingConnectorRma import ( + AccountingConnectorRma, + _formatLastDayOfMonth, + _isIncomeStatementAccount, + _parseSaldoBody, +) + + +class TestParseSaldoBody: + def test_jsonRowsParsed(self): + body = json.dumps({ + "row": [ + {"column": ["1020", "Bank UBS", "48507.4100"]}, + {"column": ["6000", "Personalaufwand", "12000.00"]}, + ] + }) + rows = _parseSaldoBody(body) + assert ("1020", 48507.41) in rows + assert ("6000", 12000.0) in rows + + def test_xmlRowsParsed(self): + body = ( + "
" + "1020Bank48507.41" + "2010AHV-1234.50" + "
" + ) + rows = _parseSaldoBody(body) + assert ("1020", 48507.41) in rows + assert ("2010", -1234.5) in rows + + def test_emptyAndMalformedReturnEmpty(self): + assert _parseSaldoBody("") == [] + assert _parseSaldoBody("not even json or xml") == [] + assert _parseSaldoBody('{"row": []}') == [] + + +class TestIsIncomeStatementAccount: + @pytest.mark.parametrize("accno,expected", [ + ("1020", False), + ("2010", False), + ("2800", False), + ("3200", True), + ("6000", True), + ("9100", True), + ("", False), + ("ABC", False), + ]) + def test_classification(self, accno, expected): + assert _isIncomeStatementAccount(accno) == expected + + +class TestFormatLastDayOfMonth: + def test_january(self): + assert _formatLastDayOfMonth(2025, 1) == "2025-01-31" + + def test_february_nonLeap(self): + assert _formatLastDayOfMonth(2025, 2) == "2025-02-28" + + def test_february_leap(self): + assert _formatLastDayOfMonth(2024, 2) == "2024-02-29" + + def test_december(self): + assert _formatLastDayOfMonth(2025, 12) == "2025-12-31" + + +class TestRmaGetAccountBalances: + """Reproduces the BuHa SoHa scenario: account 1020 closing balance per + 31.12.2025 = 48'507.41, with prior-year opening 30'927.62. + """ + + @pytest.mark.asyncio + async def test_buhaSohaScenario_yieldsAuthoritativeBalances(self): + connector = AccountingConnectorRma() + + priorYearEndSaldo = 30927.62 + decemberSaldo = 48507.41 + # Simplified monthly progression: linear ramp from 31000 -> 48507.41 + monthlySaldos = { + 1: 31200.00, 2: 32500.00, 3: 33800.00, 4: 35200.00, + 5: 36800.00, 6: 38500.00, 7: 40100.00, 8: 41900.00, + 9: 43800.00, 10: 45500.00, 11: 47100.00, 12: decemberSaldo, + } + + async def _fakeFetchRows(self, config, accno, fromDate, toDate): + if toDate == "2024-12-31": + return [("1020", priorYearEndSaldo)] + if toDate.startswith("2025-"): + month = int(toDate[5:7]) + return [("1020", monthlySaldos[month])] + return [] + + async def _fakeChart(self, config, accountType=None): + return [type("AC", (), {"accountNumber": "1020"})()] + + with patch.object(AccountingConnectorRma, "_fetchSaldoRows", _fakeFetchRows), \ + patch.object(AccountingConnectorRma, "getChartOfAccounts", _fakeChart): + balances = await connector.getAccountBalances({"clientName": "test", "apiBaseUrl": "http://x", "apiKey": "k"}, years=[2025]) + + byPeriod = {(b.accountNumber, b.periodYear, b.periodMonth): b for b in balances} + + annual = byPeriod[("1020", 2025, 0)] + assert annual.openingBalance == round(priorYearEndSaldo, 2) + assert annual.closingBalance == round(decemberSaldo, 2) + + dec = byPeriod[("1020", 2025, 12)] + assert dec.closingBalance == round(decemberSaldo, 2) + assert dec.openingBalance == round(monthlySaldos[11], 2) + + nov = byPeriod[("1020", 2025, 11)] + assert nov.closingBalance == round(monthlySaldos[11], 2) + + jan = byPeriod[("1020", 2025, 1)] + assert jan.openingBalance == round(priorYearEndSaldo, 2) + assert jan.closingBalance == round(monthlySaldos[1], 2) + + @pytest.mark.asyncio + async def test_incomeStatementAccountResetsToZero(self): + connector = AccountingConnectorRma() + + async def _fakeFetchRows(self, config, accno, fromDate, toDate): + if toDate == "2024-12-31": + return [("6000", 99999.99)] + if toDate == "2025-01-31": + return [("6000", 5000.00)] + if toDate == "2025-12-31": + return [("6000", 60000.00)] + return [] + + async def _fakeChart(self, config, accountType=None): + return [type("AC", (), {"accountNumber": "6000"})()] + + with patch.object(AccountingConnectorRma, "_fetchSaldoRows", _fakeFetchRows), \ + patch.object(AccountingConnectorRma, "getChartOfAccounts", _fakeChart): + balances = await connector.getAccountBalances({"clientName": "x", "apiBaseUrl": "http://x", "apiKey": "k"}, years=[2025]) + + byPeriod = {(b.accountNumber, b.periodMonth): b for b in balances if b.periodYear == 2025} + + # ER account January opening MUST be 0 (not 99999.99 from prior year) + assert byPeriod[("6000", 1)].openingBalance == 0.0 + assert byPeriod[("6000", 0)].openingBalance == 0.0 # annual bucket too diff --git a/tests/unit/features/trustee/test_accountingDataSync_balances.py b/tests/unit/features/trustee/test_accountingDataSync_balances.py new file mode 100644 index 00000000..517318c9 --- /dev/null +++ b/tests/unit/features/trustee/test_accountingDataSync_balances.py @@ -0,0 +1,196 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +"""Unit tests for the local-fallback cumulative balance computation in +``AccountingDataSync._buildLocalBalanceFallback`` and the connector handoff +in ``_persistBalances``. + +These tests exercise pure-logic paths -- no DB, no HTTP. We pass a +``FakeInterface`` that records the bulk-create rows so we can inspect what +would have been written to ``TrusteeDataAccountBalance``. +""" + +from typing import Any, Dict, List, Type +from unittest.mock import MagicMock + +import pytest + +from modules.features.trustee.accounting.accountingConnectorBase import AccountingPeriodBalance +from modules.features.trustee.accounting.accountingDataSync import ( + AccountingDataSync, + _isIncomeStatementAccount, + _resolveBalanceYears, +) + + +class _FakeDb: + """Minimal db stub: records the rows handed to ``recordCreateBulk`` and + returns canned recordsets for ``getRecordset``.""" + + def __init__(self, entries: List[Dict[str, Any]], lines: List[Dict[str, Any]]): + self._entries = entries + self._lines = lines + self.createdRows: List[Dict[str, Any]] = [] + + def getRecordset(self, model, recordFilter=None): + name = model.__name__ + if "Entry" in name and "Line" not in name: + return list(self._entries) + if "Line" in name: + return list(self._lines) + return [] + + def recordDeleteWhere(self, model, where): + return 0 + + def recordCreateBulk(self, model, rows): + self.createdRows.extend(rows) + return len(rows) + + def recordModify(self, model, recordId, payload): + return None + + def recordCreate(self, model, row): + self.createdRows.append(row) + return row.get("id", "x") + + def recordDelete(self, model, rid): + return 1 + + +class _FakeInterface: + def __init__(self, db): + self.db = db + + +class _FakeJournalEntry: + pass + + +class _FakeJournalLine: + pass + + +class _FakeBalance: + pass + + +class TestResolveBalanceYears: + def test_singleYearWindow(self): + assert _resolveBalanceYears("2025-01-01", "2025-12-31", None, None) == [2025] + + def test_multiYearWindow(self): + assert _resolveBalanceYears("2024-01-01", "2026-12-31", None, None) == [2024, 2025, 2026] + + def test_fallsBackToObservedDates(self): + assert _resolveBalanceYears(None, None, "2024-03-15", "2025-08-01") == [2024, 2025] + + def test_invertedWindowIsCorrected(self): + assert _resolveBalanceYears("2026-12-31", "2024-01-01", None, None) == [2024, 2025, 2026] + + +class TestIsIncomeStatementAccount: + @pytest.mark.parametrize("accno,expected", [ + ("1020", False), ("2010", False), ("3000", True), ("8500", True), + ]) + def test_classification(self, accno, expected): + assert _isIncomeStatementAccount(accno) == expected + + +class TestPersistBalancesConnectorPath: + def test_connectorOutputPersistedVerbatim(self): + db = _FakeDb([], []) + sync = AccountingDataSync(_FakeInterface(db)) + + connectorRows = [ + AccountingPeriodBalance( + accountNumber="1020", periodYear=2025, periodMonth=12, + openingBalance=47100.00, debitTotal=159374.89, creditTotal=79939.86, + closingBalance=48507.41, currency="CHF", + ), + ] + + n = sync._persistBalances( + "fi-1", "m-1", + _FakeJournalEntry, _FakeJournalLine, _FakeBalance, + connectorRows, "connector", + ) + + assert n == 1 + row = db.createdRows[0] + assert row["accountNumber"] == "1020" + assert row["closingBalance"] == 48507.41 + assert row["openingBalance"] == 47100.00 + assert row["featureInstanceId"] == "fi-1" + assert row["mandateId"] == "m-1" + + +class TestLocalFallbackCumulative: + """Replicates the BuHa SoHa scenario WITHOUT prior-year journal data: + the local fallback can't recreate the prior-year carry-over (by design), + but the cumulation across months within the imported window must be + correct -- previously the closingBalance was just the per-period net + movement. + """ + + def test_balanceSheetAccount_cumulatesAcrossMonths(self): + entries = [ + {"id": "e1", "bookingDate": "2025-01-15"}, + {"id": "e2", "bookingDate": "2025-02-10"}, + {"id": "e3", "bookingDate": "2025-12-20"}, + ] + lines = [ + {"journalEntryId": "e1", "accountNumber": "1020", "debitAmount": 1000.0, "creditAmount": 0.0}, + {"journalEntryId": "e2", "accountNumber": "1020", "debitAmount": 0.0, "creditAmount": 300.0}, + {"journalEntryId": "e3", "accountNumber": "1020", "debitAmount": 500.0, "creditAmount": 0.0}, + ] + db = _FakeDb(entries, lines) + sync = AccountingDataSync(_FakeInterface(db)) + + n = sync._persistBalances( + "fi-1", "m-1", + _FakeJournalEntry, _FakeJournalLine, _FakeBalance, + [], "local-fallback", + ) + + assert n > 0 + byPeriod = {(r["accountNumber"], r["periodYear"], r["periodMonth"]): r for r in db.createdRows} + assert byPeriod[("1020", 2025, 1)]["closingBalance"] == 1000.0 + assert byPeriod[("1020", 2025, 2)]["closingBalance"] == 700.0 + # December: previous closing (700) + 500 - 0 = 1200 + assert byPeriod[("1020", 2025, 12)]["closingBalance"] == 1200.0 + assert byPeriod[("1020", 2025, 0)]["closingBalance"] == 1200.0 + assert byPeriod[("1020", 2025, 2)]["openingBalance"] == 1000.0 + + def test_incomeStatementAccount_resetsAtFiscalYearStart(self): + entries = [ + {"id": "e1", "bookingDate": "2024-12-31"}, + {"id": "e2", "bookingDate": "2025-06-15"}, + {"id": "e3", "bookingDate": "2025-07-10"}, + ] + lines = [ + {"journalEntryId": "e1", "accountNumber": "6000", "debitAmount": 99999.99, "creditAmount": 0.0}, + {"journalEntryId": "e2", "accountNumber": "6000", "debitAmount": 250.0, "creditAmount": 0.0}, + {"journalEntryId": "e3", "accountNumber": "6000", "debitAmount": 100.0, "creditAmount": 0.0}, + ] + db = _FakeDb(entries, lines) + sync = AccountingDataSync(_FakeInterface(db)) + + sync._persistBalances( + "fi-1", "m-1", + _FakeJournalEntry, _FakeJournalLine, _FakeBalance, + [], "local-fallback", + ) + byPeriod = {(r["accountNumber"], r["periodYear"], r["periodMonth"]): r for r in db.createdRows} + # ER account 6000 must NOT carry 2024's 99999.99 into 2025 + # 2024 year summary closes at 99999.99, but 2025 opening must be 0 + assert byPeriod[("6000", 2024, 0)]["closingBalance"] == 99999.99 + assert byPeriod[("6000", 2025, 0)]["openingBalance"] == 0.0 + # June 2025: first activity in fiscal year, opening is the reset 0 + assert byPeriod[("6000", 2025, 6)]["openingBalance"] == 0.0 + assert byPeriod[("6000", 2025, 6)]["closingBalance"] == 250.0 + # July 2025: opening is June's closing (cumulation within same fiscal year) + assert byPeriod[("6000", 2025, 7)]["openingBalance"] == 250.0 + assert byPeriod[("6000", 2025, 7)]["closingBalance"] == 350.0 + # 2025 year summary: 250 + 100 = 350 + assert byPeriod[("6000", 2025, 0)]["closingBalance"] == 350.0 + assert byPeriod[("6000", 2025, 0)]["debitTotal"] == 350.0 diff --git a/tests/unit/graphicalEditor/test_featureInstanceRef_node_definitions.py b/tests/unit/graphicalEditor/test_featureInstanceRef_node_definitions.py new file mode 100644 index 00000000..279c6da4 --- /dev/null +++ b/tests/unit/graphicalEditor/test_featureInstanceRef_node_definitions.py @@ -0,0 +1,95 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +""" +Schicht-4 / Phase-5 follow-up: assert that all Trustee + Redmine node +definitions expose ``featureInstanceId`` as a typed +``FeatureInstanceRef[]`` parameter rendered by the dedicated +``featureInstance`` frontend renderer. + +Background +---------- +The legacy adapter shape used ``type: "string"`` + ``frontendType: "hidden"`` +for the mandate binding. That produced two visible defects: + - the Graph Editor banner reported ``Pflichtfeld ohne Quelle`` for an + invisible parameter (no UI surface, no resolution path), and + - the DataPicker could not type-filter compatible upstream candidates. + +The Typed Action Architecture ships a dedicated catalog type +(``FeatureInstanceRef``) and a discriminator notation +``FeatureInstanceRef[]``; this test guards the migration so +nobody silently re-introduces the legacy shape. See +``wiki/c-work/2-build/2026-04-feature-instance-ref-adapter-migration.md``. +""" +from __future__ import annotations + +import pytest + +from modules.features.graphicalEditor.nodeDefinitions.redmine import REDMINE_NODES +from modules.features.graphicalEditor.nodeDefinitions.trustee import TRUSTEE_NODES + + +def _featureInstanceParam(node: dict) -> dict | None: + for param in node.get("parameters", []): + if param.get("name") == "featureInstanceId": + return param + return None + + +@pytest.mark.parametrize("node", TRUSTEE_NODES, ids=lambda n: n["id"]) +def test_trusteeNodesUseTypedFeatureInstanceRef(node: dict) -> None: + """Every Trustee node must bind its mandate via the typed catalog ref.""" + param = _featureInstanceParam(node) + assert param is not None, f"{node['id']} is missing a featureInstanceId parameter" + assert param["type"] == "FeatureInstanceRef[trustee]", ( + f"{node['id']}.featureInstanceId.type must be 'FeatureInstanceRef[trustee]', " + f"got {param['type']!r}" + ) + assert param.get("frontendType") == "featureInstance", ( + f"{node['id']}.featureInstanceId.frontendType must be 'featureInstance', " + f"got {param.get('frontendType')!r}" + ) + assert param.get("required") is True + assert (param.get("frontendOptions") or {}).get("featureCode") == "trustee" + + +@pytest.mark.parametrize("node", REDMINE_NODES, ids=lambda n: n["id"]) +def test_redmineNodesUseTypedFeatureInstanceRef(node: dict) -> None: + """Every Redmine node must bind its mandate via the typed catalog ref.""" + param = _featureInstanceParam(node) + assert param is not None, f"{node['id']} is missing a featureInstanceId parameter" + assert param["type"] == "FeatureInstanceRef[redmine]", ( + f"{node['id']}.featureInstanceId.type must be 'FeatureInstanceRef[redmine]', " + f"got {param['type']!r}" + ) + assert param.get("frontendType") == "featureInstance", ( + f"{node['id']}.featureInstanceId.frontendType must be 'featureInstance', " + f"got {param.get('frontendType')!r}" + ) + assert param.get("required") is True + assert (param.get("frontendOptions") or {}).get("featureCode") == "redmine" + + +@pytest.mark.parametrize( + "nodes", + [TRUSTEE_NODES, REDMINE_NODES], + ids=["trustee", "redmine"], +) +def test_noLegacyHiddenStringFeatureInstanceParam(nodes: list[dict]) -> None: + """Regression guard: the legacy ``string + hidden`` shape must be gone. + + A hidden+required parameter produces a phantom error in the editor + banner (`findRequiredErrors` filters them out as a safety net, but the + correct fix is to remove them at the source). + """ + offenders = [] + for node in nodes: + param = _featureInstanceParam(node) + if param is None: + continue + legacyShape = param.get("type") == "string" and param.get("frontendType") == "hidden" + if legacyShape: + offenders.append(node["id"]) + assert offenders == [], ( + "These nodes still use the legacy 'string + hidden' featureInstanceId " + "shape; migrate them to FeatureInstanceRef[]: " + ", ".join(offenders) + ) diff --git a/tests/unit/graphicalEditor/test_node_adapter.py b/tests/unit/graphicalEditor/test_node_adapter.py index 7b24b01a..64915a17 100644 --- a/tests/unit/graphicalEditor/test_node_adapter.py +++ b/tests/unit/graphicalEditor/test_node_adapter.py @@ -21,7 +21,7 @@ from modules.features.graphicalEditor.nodeAdapter import ( NodeAdapter, UserParamMapping, _adapterFromLegacyNode, - _bindsActionFromLegacy, + bindsActionFromLegacy, _extractVisibleWhen, _isMethodBoundNode, _projectAllAdapters, @@ -88,10 +88,10 @@ class TestIsMethodBound: class TestBindsActionFromLegacy: def test_returnsCanonicalFqn(self): - assert _bindsActionFromLegacy(_legacyMethodNode()) == "trustee.processDocuments" + assert bindsActionFromLegacy(_legacyMethodNode()) == "trustee.processDocuments" def test_returnsNoneForPrimitive(self): - assert _bindsActionFromLegacy(_primitiveNode()) is None + assert bindsActionFromLegacy(_primitiveNode()) is None class TestUserParamFromLegacy: diff --git a/tests/unit/graphicalEditor/test_route_options_feature_instance.py b/tests/unit/graphicalEditor/test_route_options_feature_instance.py new file mode 100644 index 00000000..d626c135 --- /dev/null +++ b/tests/unit/graphicalEditor/test_route_options_feature_instance.py @@ -0,0 +1,66 @@ +# Copyright (c) 2026 Patrick Motsch +# All rights reserved. +""" +Smoke test for the new ``GET /options/feature.instance`` endpoint that backs +the frontend ``FeatureInstancePicker`` (Schicht-4 / Phase-5 follow-up). + +A heavyweight HTTP integration test would need the full FastAPI client + +DB fixtures; this lightweight test asserts at the router level that the +endpoint exists with the expected method, path, and required query +parameter, so a refactor that drops or renames it fails loudly. + +Track-doc: ``wiki/c-work/2-build/2026-04-feature-instance-ref-adapter-migration.md``. +""" +from __future__ import annotations + +import pytest + +from modules.features.graphicalEditor.routeFeatureGraphicalEditor import router + + +def _findRoute(path: str, method: str = "GET"): + for route in router.routes: + # FastAPI routes expose `path` and `methods` attributes. + if getattr(route, "path", None) == path and method in ( + getattr(route, "methods", set()) or set() + ): + return route + return None + + +_ROUTE_PATH = "/api/workflows/{instanceId}/options/feature.instance" + + +def test_optionsFeatureInstanceRouteIsRegistered() -> None: + """The picker endpoint must be available at the documented path.""" + route = _findRoute(_ROUTE_PATH, "GET") + assert route is not None, ( + f"GET {_ROUTE_PATH} is not registered on graphicalEditor router. " + "The FeatureInstancePicker will fail to load mandate-scoped instances." + ) + + +def test_optionsFeatureInstanceRouteRequiresFeatureCode() -> None: + """``featureCode`` must be a required query parameter (no default).""" + route = _findRoute(_ROUTE_PATH, "GET") + assert route is not None + endpoint = route.endpoint + sig = __import__("inspect").signature(endpoint) + featureCode = sig.parameters.get("featureCode") + assert featureCode is not None, "featureCode parameter missing" + # FastAPI's Query(...) sentinel produces a FieldInfo whose `is_required()` + # returns True; older variants encoded the same intent via + # `default is Ellipsis` or `default.default is Ellipsis`. Accept any of + # those so the test stays robust across FastAPI/Pydantic versions. + default = featureCode.default + isRequiredFn = getattr(default, "is_required", None) + isRequired = ( + (callable(isRequiredFn) and isRequiredFn()) + or default is ... + or getattr(default, "default", None) is ... + ) + assert isRequired, ( + "featureCode must be a required Query parameter; otherwise the picker " + "could ask for ALL feature instances of the mandate, which is not the " + "intent of /options/feature.instance." + ) From 8221a0da3e011cb14874b163eb4983d98397ed4c Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sun, 26 Apr 2026 08:57:49 +0200 Subject: [PATCH 4/7] fixed user references --- modules/connectors/connectorDbPostgre.py | 21 --------------------- modules/routes/routeHelpers.py | 19 +++---------------- 2 files changed, 3 insertions(+), 37 deletions(-) diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index f2e7758e..7c56c57d 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -121,27 +121,6 @@ def getModelFields(model_class) -> Dict[str, str]: return fields -def _get_fk_sort_meta(model_class) -> Dict[str, Dict[str, str]]: - """Map FK field name -> {model, labelField} from json_schema_extra (``fk_model`` + ``fk_label_field``). - - ``fk_model`` may be omitted if ``fk_target.table`` is set (table name = resolver / JOIN key). - """ - result: Dict[str, Dict[str, str]] = {} - for name, field_info in model_class.model_fields.items(): - extra = field_info.json_schema_extra - if not extra or not isinstance(extra, dict): - continue - fk_model = extra.get("fk_model") - tgt = extra.get("fk_target") - if not fk_model and isinstance(tgt, dict) and tgt.get("table"): - fk_model = tgt["table"] - label_field = extra.get("fk_label_field") - if fk_model and label_field: - result[name] = {"model": str(fk_model), "labelField": str(label_field)} - return result - - - def parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: str = "") -> None: """Parse record fields in-place: numeric typing, vector parsing, JSONB deserialization.""" import json as _json diff --git a/modules/routes/routeHelpers.py b/modules/routes/routeHelpers.py index 19bfdb8e..1a396d26 100644 --- a/modules/routes/routeHelpers.py +++ b/modules/routes/routeHelpers.py @@ -66,24 +66,12 @@ def resolveUserLabels(ids: List[str]) -> Dict[str, Optional[str]]: """Resolve user IDs to display names. Returns None for unresolvable.""" from modules.interfaces.interfaceDbApp import getRootInterface rootIface = getRootInterface() - from modules.datamodels.datamodelUam import User as _User + from modules.datamodels.datamodelUam import UserInDB as _UserInDB uniqueIds = list(set(ids)) users = rootIface.db.getRecordset( - _User, + _UserInDB, recordFilter={"id": uniqueIds}, ) - if not users and uniqueIds: - logger.warning( - "resolveUserLabels: query returned 0 users for %d ids (db=%s, table=%s). " - "Attempting full table scan...", - len(uniqueIds), getattr(rootIface.db, 'dbDatabase', '?'), _User.__name__, - ) - allUsers = rootIface.db.getRecordset(_User) - logger.warning( - "resolveUserLabels: full scan found %d users total. Looking for ids: %s", - len(allUsers or []), uniqueIds[:3], - ) - users = [u for u in (allUsers or []) if u.get("id") in set(uniqueIds)] result: Dict[str, Optional[str]] = {} found: Dict[str, dict] = {} for u in (users or []): @@ -92,9 +80,8 @@ def resolveUserLabels(ids: List[str]) -> Dict[str, Optional[str]]: for uid in ids: u = found.get(uid) if u: - result[uid] = u.get("username") or u.get("email") or None + result[uid] = u.get("displayName") or u.get("username") or u.get("email") or None else: - logger.warning("resolveUserLabels: user not found for id=%s", uid) result[uid] = None return result From 564a1200c686614da5a4cf6f72ced5720de505ca Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sun, 26 Apr 2026 18:11:42 +0200 Subject: [PATCH 5/7] datamodel sctirc fk logic in one place --- app.py | 8 + modules/aicore/aicoreModelRegistry.py | 20 +- modules/aicore/aicorePluginAnthropic.py | 96 ++++++ modules/aicore/aicorePluginOpenai.py | 129 ++++++++ modules/connectors/connectorDbPostgre.py | 67 ++++- modules/datamodels/datamodelAi.py | 2 +- modules/datamodels/datamodelAiAudit.py | 8 +- modules/datamodels/datamodelAudit.py | 8 +- modules/datamodels/datamodelBackgroundJob.py | 24 +- modules/datamodels/datamodelBase.py | 8 +- modules/datamodels/datamodelBilling.py | 43 +-- modules/datamodels/datamodelChat.py | 32 +- modules/datamodels/datamodelContent.py | 2 +- modules/datamodels/datamodelDataSource.py | 16 +- .../datamodels/datamodelFeatureDataSource.py | 10 +- modules/datamodels/datamodelFeatures.py | 4 +- modules/datamodels/datamodelFileFolder.py | 6 +- modules/datamodels/datamodelFiles.py | 10 +- modules/datamodels/datamodelInvitation.py | 6 +- modules/datamodels/datamodelKnowledge.py | 20 +- modules/datamodels/datamodelMembership.py | 28 +- modules/datamodels/datamodelMessaging.py | 14 +- modules/datamodels/datamodelNotification.py | 2 +- modules/datamodels/datamodelRbac.py | 12 +- modules/datamodels/datamodelSecurity.py | 14 +- modules/datamodels/datamodelSubscription.py | 40 +-- modules/datamodels/datamodelUam.py | 10 +- modules/datamodels/datamodelUdm.py | 4 +- modules/datamodels/datamodelUtils.py | 4 +- modules/datamodels/datamodelViews.py | 199 +++++++++++++ modules/demoConfigs/pwgDemo2026.py | 5 +- .../features/chatbot/routeFeatureChatbot.py | 11 - .../features/commcoach/datamodelCommcoach.py | 18 +- .../commcoach/interfaceFeatureCommcoach.py | 10 +- .../commcoach/routeFeatureCommcoach.py | 8 +- .../features/commcoach/serviceCommcoach.py | 55 ++-- .../features/commcoach/serviceCommcoachAi.py | 11 +- .../serviceCommcoachContextRetrieval.py | 30 +- .../commcoach/serviceCommcoachExport.py | 18 +- .../datamodelFeatureGraphicalEditor.py | 50 ++-- .../datamodelFeatureNeutralizer.py | 20 +- .../realEstate/datamodelFeatureRealEstate.py | 14 +- .../realEstate/routeFeatureRealEstate.py | 36 +-- modules/features/redmine/datamodelRedmine.py | 30 +- .../features/teamsbot/datamodelTeamsbot.py | 12 +- .../teamsbot/interfaceFeatureTeamsbot.py | 12 +- modules/features/teamsbot/service.py | 103 +++---- .../trustee/accounting/accountingBridge.py | 6 +- .../trustee/accounting/accountingDataSync.py | 143 ++++++--- .../trustee/datamodelFeatureTrustee.py | 92 +++--- .../trustee/interfaceFeatureTrustee.py | 12 +- .../features/trustee/routeFeatureTrustee.py | 198 ++++++++----- .../workspace/datamodelFeatureWorkspace.py | 6 +- modules/interfaces/interfaceDbApp.py | 19 +- modules/interfaces/interfaceDbBilling.py | 25 +- modules/interfaces/interfaceDbSubscription.py | 4 +- modules/interfaces/interfaceRbac.py | 194 ++++++++---- modules/routes/routeAdminFeatures.py | 3 + modules/routes/routeAdminRbacRules.py | 33 +-- modules/routes/routeAudit.py | 48 +-- modules/routes/routeBilling.py | 28 +- modules/routes/routeDataConnections.py | 4 +- modules/routes/routeDataFiles.py | 16 +- modules/routes/routeDataMandates.py | 21 +- modules/routes/routeDataUsers.py | 11 +- modules/routes/routeHelpers.py | 74 ++++- modules/routes/routeInvitations.py | 23 +- modules/routes/routeStore.py | 6 +- modules/routes/routeSubscription.py | 6 +- modules/routes/routeSystem.py | 7 +- .../mainBackgroundJobService.py | 8 +- .../mainServiceSubscription.py | 22 +- .../serviceSubscription/stripeBootstrap.py | 278 ++++++++++-------- modules/shared/attributeUtils.py | 45 ++- modules/shared/fkRegistry.py | 29 ++ .../methodTrustee/actions/processDocuments.py | 35 ++- .../methodTrustee/actions/queryData.py | 33 ++- .../actions/refreshAccountingData.py | 33 ++- .../workflows/processing/modes/modeDynamic.py | 4 +- .../test_accountingDataSync_balances.py | 57 +++- 80 files changed, 1808 insertions(+), 1004 deletions(-) create mode 100644 modules/datamodels/datamodelViews.py diff --git a/app.py b/app.py index d4d0ba99..1f10ef4b 100644 --- a/app.py +++ b/app.py @@ -294,6 +294,14 @@ except Exception as e: async def lifespan(app: FastAPI): logger.info("Application is starting up") + # Validate FK metadata on all Pydantic models (fail-fast, no silent fallbacks) + from modules.shared.fkRegistry import validateFkTargets + fkErrors = validateFkTargets() + if fkErrors: + for err in fkErrors: + logger.error("FK metadata validation: %s", err) + raise SystemExit(f"FK metadata validation failed ({len(fkErrors)} error(s)) — fix datamodels before starting") + # AI connectors already pre-warmed at module-load via _eager_prewarm() in aicoreModelRegistry. # Bootstrap database if needed (creates initial users, mandates, roles, etc.) diff --git a/modules/aicore/aicoreModelRegistry.py b/modules/aicore/aicoreModelRegistry.py index 844922a2..f05745ac 100644 --- a/modules/aicore/aicoreModelRegistry.py +++ b/modules/aicore/aicoreModelRegistry.py @@ -9,6 +9,7 @@ import logging import importlib import os import time +import threading from typing import Dict, List, Optional, Any, Tuple from modules.datamodels.datamodelAi import AiModel from .aicoreBase import BaseConnectorAi @@ -31,6 +32,7 @@ class ModelRegistry: self._connectors: Dict[str, BaseConnectorAi] = {} self._lastRefresh: Optional[float] = None self._refreshInterval: float = 300.0 # 5 minutes + self._refreshLock = threading.Lock() self._connectorsInitialized: bool = False self._discoveredConnectorsCache: Optional[List[BaseConnectorAi]] = None # Avoid re-instantiating on every discoverConnectors() call self._getAvailableModelsCache: Dict[Tuple[str, int], Tuple[List[AiModel], float]] = {} # (user_id, rbac_id) -> (models, ts) @@ -47,26 +49,10 @@ class ModelRegistry: self._connectors[connectorType] = connector - # Collect models from this connector try: models = connector.getCachedModels() for model in models: - # Validate displayName uniqueness - if model.displayName in self._models: - existingModel = self._models[model.displayName] - errorMsg = f"Duplicate displayName '{model.displayName}' detected! Existing model: displayName='{existingModel.displayName}', name='{existingModel.name}' (connector: {existingModel.connectorType}), New model: displayName='{model.displayName}', name='{model.name}' (connector: {connectorType}). displayName must be unique." - logger.error(errorMsg) - raise ValueError(errorMsg) - - # TODO TESTING: Override maxTokens if testing override is enabled - if TESTING_MAX_TOKENS_OVERRIDE is not None and model.maxTokens > TESTING_MAX_TOKENS_OVERRIDE: - originalMaxTokens = model.maxTokens - model.maxTokens = TESTING_MAX_TOKENS_OVERRIDE - logger.debug(f"TESTING: Overrode maxTokens for {model.displayName}: {originalMaxTokens} -> {TESTING_MAX_TOKENS_OVERRIDE}") - - # Use displayName as the key (must be unique) - self._models[model.displayName] = model - logger.debug(f"Registered model: {model.displayName} (name: {model.name}) from {connectorType}") + self._addModel(model, connectorType) except Exception as e: logger.error(f"Failed to register models from {connectorType}: {e}") raise diff --git a/modules/aicore/aicorePluginAnthropic.py b/modules/aicore/aicorePluginAnthropic.py index 12cfcbe7..1119f115 100644 --- a/modules/aicore/aicorePluginAnthropic.py +++ b/modules/aicore/aicorePluginAnthropic.py @@ -49,6 +49,102 @@ class AiAnthropic(BaseConnectorAi): def getModels(self) -> List[AiModel]: # Get all available Anthropic models. return [ + AiModel( + name="claude-opus-4-7", + displayName="Anthropic Claude Opus 4.7", + connectorType="anthropic", + apiUrl="https://api.anthropic.com/v1/messages", + temperature=0.2, + maxTokens=128000, + contextLength=1000000, + costPer1kTokensInput=0.005, # $5/M tokens (Anthropic API, 2026-04) + costPer1kTokensOutput=0.025, # $25/M tokens + speedRating=5, + qualityRating=10, + functionCall=self.callAiBasic, + functionCallStream=self.callAiBasicStream, + priority=PriorityEnum.QUALITY, + processingMode=ProcessingModeEnum.DETAILED, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.PLAN, 10), + (OperationTypeEnum.DATA_ANALYSE, 9), + (OperationTypeEnum.DATA_GENERATE, 10), + (OperationTypeEnum.DATA_EXTRACT, 9), + (OperationTypeEnum.AGENT, 10), + (OperationTypeEnum.DATA_QUERY, 3), + ), + version="claude-opus-4-7", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.025 + ), + AiModel( + name="claude-sonnet-4-6", + displayName="Anthropic Claude Sonnet 4.6", + connectorType="anthropic", + apiUrl="https://api.anthropic.com/v1/messages", + temperature=0.2, + maxTokens=64000, + contextLength=1000000, + costPer1kTokensInput=0.003, # $3/M tokens + costPer1kTokensOutput=0.015, # $15/M tokens + speedRating=7, + qualityRating=10, + functionCall=self.callAiBasic, + functionCallStream=self.callAiBasicStream, + priority=PriorityEnum.BALANCED, + processingMode=ProcessingModeEnum.ADVANCED, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.PLAN, 9), + (OperationTypeEnum.DATA_ANALYSE, 9), + (OperationTypeEnum.DATA_GENERATE, 9), + (OperationTypeEnum.DATA_EXTRACT, 8), + (OperationTypeEnum.AGENT, 9), + (OperationTypeEnum.DATA_QUERY, 9), + ), + version="claude-sonnet-4-6", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015 + ), + AiModel( + name="claude-opus-4-7", + displayName="Anthropic Claude Opus 4.7 Vision", + connectorType="anthropic", + apiUrl="https://api.anthropic.com/v1/messages", + temperature=0.2, + maxTokens=128000, + contextLength=1000000, + costPer1kTokensInput=0.005, + costPer1kTokensOutput=0.025, + speedRating=5, + qualityRating=10, + functionCall=self.callAiImage, + priority=PriorityEnum.QUALITY, + processingMode=ProcessingModeEnum.DETAILED, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.IMAGE_ANALYSE, 10) + ), + version="claude-opus-4-7", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.025 + ), + AiModel( + name="claude-sonnet-4-6", + displayName="Anthropic Claude Sonnet 4.6 Vision", + connectorType="anthropic", + apiUrl="https://api.anthropic.com/v1/messages", + temperature=0.2, + maxTokens=64000, + contextLength=1000000, + costPer1kTokensInput=0.003, + costPer1kTokensOutput=0.015, + speedRating=6, + qualityRating=10, + functionCall=self.callAiImage, + priority=PriorityEnum.QUALITY, + processingMode=ProcessingModeEnum.DETAILED, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.IMAGE_ANALYSE, 10) + ), + version="claude-sonnet-4-6", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015 + ), AiModel( name="claude-sonnet-4-5-20250929", displayName="Anthropic Claude Sonnet 4.5", diff --git a/modules/aicore/aicorePluginOpenai.py b/modules/aicore/aicorePluginOpenai.py index ae5a02b3..e07e85b9 100644 --- a/modules/aicore/aicorePluginOpenai.py +++ b/modules/aicore/aicorePluginOpenai.py @@ -123,6 +123,135 @@ class AiOpenai(BaseConnectorAi): version="gpt-4o", calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0025 + (bytesReceived / 4 / 1000) * 0.01 ), + AiModel( + name="gpt-5.5", + displayName="OpenAI GPT-5.5", + connectorType="openai", + apiUrl="https://api.openai.com/v1/chat/completions", + temperature=0.2, + maxTokens=128000, + contextLength=1050000, + costPer1kTokensInput=0.005, # $5/M tokens (OpenAI API, 2026-04) + costPer1kTokensOutput=0.03, # $30/M tokens + speedRating=8, + qualityRating=10, + functionCall=self.callAiBasic, + functionCallStream=self.callAiBasicStream, + priority=PriorityEnum.QUALITY, + processingMode=ProcessingModeEnum.DETAILED, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.PLAN, 10), + (OperationTypeEnum.DATA_ANALYSE, 10), + (OperationTypeEnum.DATA_GENERATE, 10), + (OperationTypeEnum.DATA_EXTRACT, 8), + (OperationTypeEnum.AGENT, 10), + (OperationTypeEnum.DATA_QUERY, 8), + ), + version="gpt-5.5", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.03 + ), + AiModel( + name="gpt-5.4", + displayName="OpenAI GPT-5.4", + connectorType="openai", + apiUrl="https://api.openai.com/v1/chat/completions", + temperature=0.2, + maxTokens=128000, + contextLength=1050000, + costPer1kTokensInput=0.0025, # $2.50/M tokens + costPer1kTokensOutput=0.015, # $15/M tokens + speedRating=8, + qualityRating=10, + functionCall=self.callAiBasic, + functionCallStream=self.callAiBasicStream, + priority=PriorityEnum.BALANCED, + processingMode=ProcessingModeEnum.ADVANCED, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.PLAN, 9), + (OperationTypeEnum.DATA_ANALYSE, 10), + (OperationTypeEnum.DATA_GENERATE, 10), + (OperationTypeEnum.DATA_EXTRACT, 8), + (OperationTypeEnum.AGENT, 9), + (OperationTypeEnum.DATA_QUERY, 8), + ), + version="gpt-5.4", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0025 + (bytesReceived / 4 / 1000) * 0.015 + ), + AiModel( + name="gpt-5.4-mini", + displayName="OpenAI GPT-5.4 Mini", + connectorType="openai", + apiUrl="https://api.openai.com/v1/chat/completions", + temperature=0.2, + maxTokens=128000, + contextLength=400000, + costPer1kTokensInput=0.00075, # $0.75/M tokens + costPer1kTokensOutput=0.0045, # $4.50/M tokens + speedRating=9, + qualityRating=9, + functionCall=self.callAiBasic, + functionCallStream=self.callAiBasicStream, + priority=PriorityEnum.SPEED, + processingMode=ProcessingModeEnum.BASIC, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.PLAN, 8), + (OperationTypeEnum.DATA_ANALYSE, 9), + (OperationTypeEnum.DATA_GENERATE, 9), + (OperationTypeEnum.DATA_EXTRACT, 8), + (OperationTypeEnum.AGENT, 8), + (OperationTypeEnum.DATA_QUERY, 10), + ), + version="gpt-5.4-mini", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00075 + (bytesReceived / 4 / 1000) * 0.0045 + ), + AiModel( + name="gpt-5.4-nano", + displayName="OpenAI GPT-5.4 Nano", + connectorType="openai", + apiUrl="https://api.openai.com/v1/chat/completions", + temperature=0.2, + maxTokens=128000, + contextLength=400000, + costPer1kTokensInput=0.0002, # $0.20/M tokens + costPer1kTokensOutput=0.00125, # $1.25/M tokens + speedRating=10, + qualityRating=7, + functionCall=self.callAiBasic, + functionCallStream=self.callAiBasicStream, + priority=PriorityEnum.COST, + processingMode=ProcessingModeEnum.BASIC, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.PLAN, 7), + (OperationTypeEnum.DATA_ANALYSE, 7), + (OperationTypeEnum.DATA_GENERATE, 8), + (OperationTypeEnum.DATA_EXTRACT, 9), + (OperationTypeEnum.AGENT, 7), + (OperationTypeEnum.DATA_QUERY, 10), + ), + version="gpt-5.4-nano", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0002 + (bytesReceived / 4 / 1000) * 0.00125 + ), + AiModel( + name="gpt-5.5", + displayName="OpenAI GPT-5.5 Vision", + connectorType="openai", + apiUrl="https://api.openai.com/v1/chat/completions", + temperature=0.2, + maxTokens=128000, + contextLength=1050000, + costPer1kTokensInput=0.005, + costPer1kTokensOutput=0.03, + speedRating=6, + qualityRating=10, + functionCall=self.callAiImage, + priority=PriorityEnum.QUALITY, + processingMode=ProcessingModeEnum.DETAILED, + operationTypes=createOperationTypeRatings( + (OperationTypeEnum.IMAGE_ANALYSE, 10) + ), + version="gpt-5.5", + calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.03 + ), AiModel( name="text-embedding-3-small", displayName="OpenAI Embedding Small", diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index 7c56c57d..e09c43a8 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -561,29 +561,48 @@ class DatabaseConnector: f"Could not add column '{col}' to '{table}': {add_err}" ) - # Targeted type-downgrade: if a model field has been - # changed from a structured type (JSONB) to a plain - # TEXT field, alter the column so writes don't fail. - # JSONB -> TEXT is a safe, lossless cast (JSONB is - # rendered as its JSON-text representation; the - # corresponding Pydantic ``@field_validator`` is - # responsible for re-decoding legacy data on read). + # Column type migrations for existing tables. + # TEXT→DOUBLE PRECISION handles three value shapes: + # 1. NULL / empty string → NULL + # 2. ISO date(time) like "2025-01-22" or "2025-01-22T10:00:00+00" → epoch via EXTRACT + # 3. Plain numeric string like "3.14" → direct cast + _TEXT_TO_DOUBLE = ( + 'DOUBLE PRECISION USING CASE' + ' WHEN "{col}" IS NULL OR "{col}" = \'\' THEN NULL' + ' WHEN "{col}" ~ \'^\\d{4}-\\d{2}-\\d{2}\'' + ' THEN EXTRACT(EPOCH FROM "{col}"::timestamptz)' + ' ELSE NULLIF("{col}", \'\')::double precision' + ' END' + ) + _SAFE_TYPE_CHANGES = { + ("jsonb", "TEXT"): "TEXT USING \"{col}\"::text", + ("text", "DOUBLE PRECISION"): _TEXT_TO_DOUBLE, + ("text", "INTEGER"): "INTEGER USING NULLIF(\"{col}\", '')::integer", + ("timestamp without time zone", "DOUBLE PRECISION"): 'DOUBLE PRECISION USING EXTRACT(EPOCH FROM "{col}" AT TIME ZONE \'UTC\')', + ("timestamp with time zone", "DOUBLE PRECISION"): 'DOUBLE PRECISION USING EXTRACT(EPOCH FROM "{col}")', + ("date", "DOUBLE PRECISION"): 'DOUBLE PRECISION USING EXTRACT(EPOCH FROM "{col}"::timestamp AT TIME ZONE \'UTC\')', + } for col in sorted(desired_columns & existing_columns): if col == "id": continue desired_sql = (model_fields.get(col) or "").upper() currentType = existing_column_types.get(col, "") - if desired_sql == "TEXT" and currentType == "jsonb": + migration = _SAFE_TYPE_CHANGES.get((currentType, desired_sql)) + if migration: + castExpr = migration.replace("{col}", col) try: + cursor.execute('SAVEPOINT col_migrate') cursor.execute( - f'ALTER TABLE "{table}" ALTER COLUMN "{col}" TYPE TEXT USING "{col}"::text' + f'ALTER TABLE "{table}" ALTER COLUMN "{col}" TYPE {castExpr}' ) + cursor.execute('RELEASE SAVEPOINT col_migrate') logger.info( - f"Downgraded column '{col}' from JSONB to TEXT on '{table}'" + f"Migrated column '{col}' from {currentType} to {desired_sql} on '{table}'" ) except Exception as alter_err: + cursor.execute('ROLLBACK TO SAVEPOINT col_migrate') logger.warning( - f"Could not downgrade column '{col}' on '{table}': {alter_err}" + f"Could not migrate column '{col}' on '{table}': {alter_err}" ) except Exception as ensure_err: logger.warning( @@ -1096,8 +1115,15 @@ class DatabaseConnector: values.append(f"%{v}") elif op in ("gt", "gte", "lt", "lte"): sqlOp = {"gt": ">", "gte": ">=", "lt": "<", "lte": "<="}[op] - where_parts.append(f'"{key}"::TEXT {sqlOp} %s') - values.append(str(v)) + if colType in ("INTEGER", "DOUBLE PRECISION"): + try: + where_parts.append(f'"{key}"::double precision {sqlOp} %s') + values.append(float(v)) + except (ValueError, TypeError): + continue + else: + where_parts.append(f'"{key}"::TEXT {sqlOp} %s') + values.append(str(v)) elif op == "between": fromVal = v.get("from", "") if isinstance(v, dict) else "" toVal = v.get("to", "") if isinstance(v, dict) else "" @@ -1122,6 +1148,21 @@ class DatabaseConnector: toTs = _dt.strptime(str(toVal), '%Y-%m-%d').replace(hour=23, minute=59, second=59, tzinfo=_tz.utc).timestamp() where_parts.append(f'"{key}" <= %s') values.append(toTs) + elif isNumericCol: + try: + if fromVal and toVal: + where_parts.append( + f'"{key}"::double precision >= %s AND "{key}"::double precision <= %s' + ) + values.extend([float(fromVal), float(toVal)]) + elif fromVal: + where_parts.append(f'"{key}"::double precision >= %s') + values.append(float(fromVal)) + elif toVal: + where_parts.append(f'"{key}"::double precision <= %s') + values.append(float(toVal)) + except (ValueError, TypeError): + continue else: if fromVal and toVal: where_parts.append(f'"{key}"::TEXT >= %s AND "{key}"::TEXT <= %s') diff --git a/modules/datamodels/datamodelAi.py b/modules/datamodels/datamodelAi.py index a581a7e8..cfc10db2 100644 --- a/modules/datamodels/datamodelAi.py +++ b/modules/datamodels/datamodelAi.py @@ -125,7 +125,7 @@ class AiModel(BaseModel): # Metadata version: Optional[str] = Field(default=None, description="Model version") - lastUpdated: Optional[str] = Field(default=None, description="Last update timestamp") + lastUpdated: Optional[float] = Field(default=None, description="Last update timestamp (UTC unix)", json_schema_extra={"frontend_type": "timestamp"}) model_config = ConfigDict(arbitrary_types_allowed=True) # Allow Callable type diff --git a/modules/datamodels/datamodelAiAudit.py b/modules/datamodels/datamodelAiAudit.py index 1ab1b360..833a175a 100644 --- a/modules/datamodels/datamodelAiAudit.py +++ b/modules/datamodels/datamodelAiAudit.py @@ -34,7 +34,7 @@ class AiAuditLogEntry(BaseModel): userId: str = Field( description="ID of the user who triggered the AI call", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) username: Optional[str] = Field( default=None, @@ -43,17 +43,17 @@ class AiAuditLogEntry(BaseModel): ) mandateId: str = Field( description="Mandate context of the call", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) featureInstanceId: Optional[str] = Field( default=None, description="Feature instance context", - json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) featureCode: Optional[str] = Field( default=None, description="Feature code (e.g. workspace, trustee)", - json_schema_extra={"label": "Feature", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"}}, + json_schema_extra={"label": "Feature", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code", "labelField": "code"}}, ) instanceLabel: Optional[str] = Field( default=None, diff --git a/modules/datamodels/datamodelAudit.py b/modules/datamodels/datamodelAudit.py index 705b87e8..c3417e6a 100644 --- a/modules/datamodels/datamodelAudit.py +++ b/modules/datamodels/datamodelAudit.py @@ -100,7 +100,7 @@ class AuditLogEntry(BaseModel): timestamp: float = Field( default_factory=getUtcTimestamp, description="UTC timestamp when the event occurred", - json_schema_extra={"label": "Zeitstempel", "frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True} + json_schema_extra={"label": "Zeitstempel", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": True} ) # Actor identification @@ -111,7 +111,7 @@ class AuditLogEntry(BaseModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) @@ -130,7 +130,7 @@ class AuditLogEntry(BaseModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) @@ -142,7 +142,7 @@ class AuditLogEntry(BaseModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) diff --git a/modules/datamodels/datamodelBackgroundJob.py b/modules/datamodels/datamodelBackgroundJob.py index 45a26b2c..fa99ea34 100644 --- a/modules/datamodels/datamodelBackgroundJob.py +++ b/modules/datamodels/datamodelBackgroundJob.py @@ -64,7 +64,7 @@ class BackgroundJob(PowerOnModel): description="Mandate scope (used for access checks). None for system-wide jobs.", json_schema_extra={ "label": "Mandanten-ID", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: Optional[str] = Field( @@ -72,7 +72,7 @@ class BackgroundJob(PowerOnModel): description="Feature instance scope (optional)", json_schema_extra={ "label": "Feature-Instanz", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) triggeredBy: Optional[str] = Field( @@ -113,18 +113,18 @@ class BackgroundJob(PowerOnModel): json_schema_extra={"label": "Fehler"}, ) - createdAt: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), - description="When the job was submitted", - json_schema_extra={"label": "Eingereicht"}, + createdAt: float = Field( + default_factory=lambda: datetime.now(timezone.utc).timestamp(), + description="When the job was submitted (UTC unix)", + json_schema_extra={"label": "Eingereicht", "frontend_type": "timestamp"}, ) - startedAt: Optional[datetime] = Field( + startedAt: Optional[float] = Field( None, - description="When the handler began running", - json_schema_extra={"label": "Gestartet"}, + description="When the handler began running (UTC unix)", + json_schema_extra={"label": "Gestartet", "frontend_type": "timestamp"}, ) - finishedAt: Optional[datetime] = Field( + finishedAt: Optional[float] = Field( None, - description="When the handler reached a terminal status", - json_schema_extra={"label": "Beendet"}, + description="When the handler reached a terminal status (UTC unix)", + json_schema_extra={"label": "Beendet", "frontend_type": "timestamp"}, ) diff --git a/modules/datamodels/datamodelBase.py b/modules/datamodels/datamodelBase.py index 2a65bcdc..8fc4fa44 100644 --- a/modules/datamodels/datamodelBase.py +++ b/modules/datamodels/datamodelBase.py @@ -46,9 +46,7 @@ class PowerOnModel(BaseModel): "frontend_required": False, "frontend_visible": False, "system": True, - "fk_model": "User", - "fk_label_field": "username", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) sysModifiedAt: Optional[float] = Field( @@ -73,8 +71,6 @@ class PowerOnModel(BaseModel): "frontend_required": False, "frontend_visible": False, "system": True, - "fk_model": "User", - "fk_label_field": "username", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) diff --git a/modules/datamodels/datamodelBilling.py b/modules/datamodels/datamodelBilling.py index f662e28c..d3967f12 100644 --- a/modules/datamodels/datamodelBilling.py +++ b/modules/datamodels/datamodelBilling.py @@ -49,12 +49,12 @@ class BillingAccount(PowerOnModel): mandateId: str = Field( ..., description="Foreign key to Mandate", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) userId: Optional[str] = Field( None, description="Foreign key to User (None = mandate pool account, set = user audit account)", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) balance: float = Field(default=0.0, description="Current balance in CHF", json_schema_extra={"label": "Guthaben (CHF)"}) warningThreshold: float = Field( @@ -62,10 +62,10 @@ class BillingAccount(PowerOnModel): description="Warning threshold in CHF", json_schema_extra={"label": "Warnschwelle (CHF)"}, ) - lastWarningAt: Optional[datetime] = Field( + lastWarningAt: Optional[float] = Field( None, - description="Last warning sent timestamp", - json_schema_extra={"label": "Letzte Warnung"}, + description="Last warning sent timestamp (UTC unix)", + json_schema_extra={"label": "Letzte Warnung", "frontend_type": "timestamp"}, ) enabled: bool = Field(default=True, description="Account is active", json_schema_extra={"label": "Aktiv"}) @@ -81,7 +81,7 @@ class BillingTransaction(PowerOnModel): accountId: str = Field( ..., description="Foreign key to BillingAccount", - json_schema_extra={"label": "Konto-ID", "fk_target": {"db": "poweron_billing", "table": "BillingAccount"}}, + json_schema_extra={"label": "Konto-ID", "fk_target": {"db": "poweron_billing", "table": "BillingAccount", "labelField": None}}, ) transactionType: TransactionTypeEnum = Field(..., description="Transaction type", json_schema_extra={"label": "Typ"}) amount: float = Field(..., description="Amount in CHF (always positive)", json_schema_extra={"label": "Betrag (CHF)"}) @@ -100,19 +100,19 @@ class BillingTransaction(PowerOnModel): featureInstanceId: Optional[str] = Field( None, description="Feature instance ID", - json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) featureCode: Optional[str] = Field( None, description="Feature code (e.g., automation)", - json_schema_extra={"label": "Feature-Code", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"}}, + json_schema_extra={"label": "Feature-Code", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code", "labelField": "code"}}, ) aicoreProvider: Optional[str] = Field(None, description="AICore provider (anthropic, openai, etc.)", json_schema_extra={"label": "AI-Anbieter"}) aicoreModel: Optional[str] = Field(None, description="AICore model name (e.g., claude-4-sonnet, gpt-4o)", json_schema_extra={"label": "AI-Modell"}) createdByUserId: Optional[str] = Field( None, description="User who created/caused this transaction", - json_schema_extra={"label": "Erstellt von Benutzer", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Erstellt von Benutzer", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) # AI call metadata (for per-call analytics) @@ -133,7 +133,7 @@ class BillingSettings(BaseModel): mandateId: str = Field( ..., description="Foreign key to Mandate (UNIQUE)", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) warningThresholdPercent: float = Field( @@ -158,7 +158,7 @@ class BillingSettings(BaseModel): ) rechargeMaxPerMonth: int = Field(default=3, description="Max auto-recharges per month", json_schema_extra={"label": "Max. Nachladungen/Monat"}) rechargesThisMonth: int = Field(default=0, description="Counter: auto-recharges used this month", json_schema_extra={"label": "Nachladungen diesen Monat"}) - monthResetAt: Optional[datetime] = Field(None, description="When rechargesThisMonth was last reset", json_schema_extra={"label": "Monats-Reset"}) + monthResetAt: Optional[float] = Field(None, description="When rechargesThisMonth was last reset (UTC unix)", json_schema_extra={"label": "Monats-Reset", "frontend_type": "timestamp"}) # Notifications notifyEmails: List[str] = Field( @@ -174,10 +174,10 @@ class BillingSettings(BaseModel): description="Peak indexed data volume MB this billing period", json_schema_extra={"label": "Speicher-Peak (MB)"}, ) - storagePeriodStartAt: Optional[datetime] = Field( + storagePeriodStartAt: Optional[float] = Field( None, - description="Subscription billing period start used for storage reset", - json_schema_extra={"label": "Speicher-Periodenbeginn"}, + description="Subscription billing period start used for storage reset (UTC unix)", + json_schema_extra={"label": "Speicher-Periodenbeginn", "frontend_type": "timestamp"}, ) storageBilledUpToMB: float = Field( default=0.0, @@ -193,9 +193,10 @@ class StripeWebhookEvent(BaseModel): description="Primary key", ) event_id: str = Field(..., description="Stripe event ID (evt_xxx)") - processed_at: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), - description="When the event was processed", + processed_at: float = Field( + default_factory=lambda: datetime.now(timezone.utc).timestamp(), + description="When the event was processed (UTC unix)", + json_schema_extra={"frontend_type": "timestamp"}, ) @@ -210,10 +211,14 @@ class UsageStatistics(BaseModel): accountId: str = Field( ..., description="Foreign key to BillingAccount", - json_schema_extra={"label": "Konto-ID", "fk_target": {"db": "poweron_billing", "table": "BillingAccount"}}, + json_schema_extra={"label": "Konto-ID", "fk_target": {"db": "poweron_billing", "table": "BillingAccount", "labelField": None}}, ) periodType: PeriodTypeEnum = Field(..., description="Period type", json_schema_extra={"label": "Periodentyp"}) - periodStart: date = Field(..., description="Period start date", json_schema_extra={"label": "Periodenbeginn"}) + periodStart: date = Field( + ..., + description="Period start date", + json_schema_extra={"label": "Periodenbeginn", "frontend_type": "date"}, + ) # Aggregated values totalCostCHF: float = Field(default=0.0, description="Total cost in CHF", json_schema_extra={"label": "Gesamtkosten (CHF)"}) diff --git a/modules/datamodels/datamodelChat.py b/modules/datamodels/datamodelChat.py index e660af0a..f846b52c 100644 --- a/modules/datamodels/datamodelChat.py +++ b/modules/datamodels/datamodelChat.py @@ -16,12 +16,12 @@ class ChatLog(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"label": "ID"}) workflowId: str = Field( description="Foreign key to workflow", - json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow"}}, + json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow", "labelField": "name"}}, ) message: str = Field(description="Log message", json_schema_extra={"label": "Nachricht"}) type: str = Field(description="Log type (info, warning, error, etc.)", json_schema_extra={"label": "Typ"}) timestamp: float = Field(default_factory=getUtcTimestamp, - description="When the log entry was created (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel"}) + description="When the log entry was created (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel", "frontend_type": "timestamp"}) status: Optional[str] = Field(None, description="Status of the log entry", json_schema_extra={"label": "Status"}) progress: Optional[float] = Field(None, description="Progress indicator (0.0 to 1.0)", json_schema_extra={"label": "Fortschritt"}) performance: Optional[Dict[str, Any]] = Field(None, description="Performance metrics", json_schema_extra={"label": "Leistung"}) @@ -37,11 +37,11 @@ class ChatDocument(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"label": "ID"}) messageId: str = Field( description="Foreign key to message", - json_schema_extra={"label": "Nachrichten-ID", "fk_target": {"db": "poweron_chat", "table": "ChatMessage"}}, + json_schema_extra={"label": "Nachrichten-ID", "fk_target": {"db": "poweron_chat", "table": "ChatMessage", "labelField": None}}, ) fileId: str = Field( description="Foreign key to file", - json_schema_extra={"label": "Datei-ID", "fk_target": {"db": "poweron_management", "table": "FileItem"}}, + json_schema_extra={"label": "Datei-ID", "fk_target": {"db": "poweron_management", "table": "FileItem", "labelField": "fileName"}}, ) fileName: str = Field(description="Name of the file", json_schema_extra={"label": "Dateiname"}) fileSize: int = Field(description="Size of the file", json_schema_extra={"label": "Dateigröße"}) @@ -81,12 +81,12 @@ class ChatMessage(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"label": "ID"}) workflowId: str = Field( description="Foreign key to workflow", - json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow"}}, + json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow", "labelField": "name"}}, ) parentMessageId: Optional[str] = Field( None, description="Parent message ID for threading", - json_schema_extra={"label": "Übergeordnete Nachrichten-ID", "fk_target": {"db": "poweron_chat", "table": "ChatMessage"}}, + json_schema_extra={"label": "Übergeordnete Nachrichten-ID", "fk_target": {"db": "poweron_chat", "table": "ChatMessage", "labelField": None}}, ) documents: List[ChatDocument] = Field(default_factory=list, description="Associated documents", json_schema_extra={"label": "Dokumente"}) documentsLabel: Optional[str] = Field(None, description="Label for the set of documents", json_schema_extra={"label": "Dokumenten-Label"}) @@ -97,7 +97,7 @@ class ChatMessage(PowerOnModel): sequenceNr: Optional[int] = Field(default=0, description="Sequence number of the message (set automatically)", json_schema_extra={"label": "Sequenznummer"}) publishedAt: Optional[float] = Field(default=None, - description="When the message was published (UTC timestamp in seconds)", json_schema_extra={"label": "Veröffentlicht am"}) + description="When the message was published (UTC timestamp in seconds)", json_schema_extra={"label": "Veröffentlicht am", "frontend_type": "timestamp"}) success: Optional[bool] = Field(None, description="Whether the message processing was successful", json_schema_extra={"label": "Erfolg"}) actionId: Optional[str] = Field(None, description="ID of the action that produced this message", json_schema_extra={"label": "Aktions-ID"}) actionMethod: Optional[str] = Field(None, description="Method of the action that produced this message", json_schema_extra={"label": "Aktionsmethode"}) @@ -125,7 +125,7 @@ class ChatWorkflow(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) linkedWorkflowId: Optional[str] = Field( @@ -219,7 +219,7 @@ class UserInputRequest(BaseModel): workflowId: Optional[str] = Field( None, description="Optional ID of the workflow to continue", - json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow"}}, + json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow", "labelField": "name"}}, ) allowedProviders: Optional[List[str]] = Field(None, description="List of allowed AI providers (multiselect)", json_schema_extra={"label": "Erlaubte Anbieter"}) @@ -281,8 +281,8 @@ class ObservationPreview(BaseModel): # Extended metadata fields mimeType: Optional[str] = Field(default=None, description="MIME type", json_schema_extra={"label": "MIME-Typ"}) size: Optional[str] = Field(default=None, description="File size", json_schema_extra={"label": "Größe"}) - created: Optional[str] = Field(default=None, description="Creation timestamp", json_schema_extra={"label": "Erstellt"}) - modified: Optional[str] = Field(default=None, description="Modification timestamp", json_schema_extra={"label": "Geändert"}) + created: Optional[float] = Field(default=None, description="Creation timestamp (UTC unix)", json_schema_extra={"label": "Erstellt", "frontend_type": "timestamp"}) + modified: Optional[float] = Field(default=None, description="Modification timestamp (UTC unix)", json_schema_extra={"label": "Geändert", "frontend_type": "timestamp"}) typeGroup: Optional[str] = Field(default=None, description="Document type group", json_schema_extra={"label": "Typgruppe"}) documentId: Optional[str] = Field(default=None, description="Document ID", json_schema_extra={"label": "Dokument-ID"}) reference: Optional[str] = Field(default=None, description="Document reference", json_schema_extra={"label": "Referenz"}) @@ -332,7 +332,7 @@ class ActionItem(BaseModel): retryCount: int = Field(default=0, description="Number of retries attempted", json_schema_extra={"label": "Wiederholungen"}) retryMax: int = Field(default=3, description="Maximum number of retries", json_schema_extra={"label": "Max. Wiederholungen"}) processingTime: Optional[float] = Field(None, description="Processing time in seconds", json_schema_extra={"label": "Bearbeitungszeit"}) - timestamp: float = Field(..., description="When the action was executed (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel"}) + timestamp: float = Field(..., description="When the action was executed (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel", "frontend_type": "timestamp"}) result: Optional[str] = Field(None, description="Result of the action", json_schema_extra={"label": "Ergebnis"}) def setSuccess(self, result: str = None) -> None: @@ -361,13 +361,13 @@ class TaskItem(BaseModel): workflowId: str = Field( ..., description="Workflow ID", - json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow"}}, + json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow", "labelField": "name"}}, ) userInput: str = Field(..., description="User input that triggered the task", json_schema_extra={"label": "Benutzereingabe"}) status: TaskStatus = Field(default=TaskStatus.PENDING, description="Task status", json_schema_extra={"label": "Status"}) error: Optional[str] = Field(None, description="Error message if task failed", json_schema_extra={"label": "Fehler"}) - startedAt: Optional[float] = Field(None, description="When the task started (UTC timestamp in seconds)", json_schema_extra={"label": "Gestartet am"}) - finishedAt: Optional[float] = Field(None, description="When the task finished (UTC timestamp in seconds)", json_schema_extra={"label": "Beendet am"}) + startedAt: Optional[float] = Field(None, description="When the task started (UTC timestamp in seconds)", json_schema_extra={"label": "Gestartet am", "frontend_type": "timestamp"}) + finishedAt: Optional[float] = Field(None, description="When the task finished (UTC timestamp in seconds)", json_schema_extra={"label": "Beendet am", "frontend_type": "timestamp"}) actionList: List[ActionItem] = Field(default_factory=list, description="List of actions to execute", json_schema_extra={"label": "Aktionen"}) retryCount: int = Field(default=0, description="Number of retries attempted", json_schema_extra={"label": "Wiederholungen"}) retryMax: int = Field(default=3, description="Maximum number of retries", json_schema_extra={"label": "Max. Wiederholungen"}) @@ -402,7 +402,7 @@ class TaskHandover(BaseModel): improvements: List[str] = Field(default_factory=list, description="Improvement suggestions", json_schema_extra={"label": "Verbesserungen"}) workflowSummary: Optional[str] = Field(None, description="Summarized workflow context", json_schema_extra={"label": "Workflow-Zusammenfassung"}) messageHistory: List[str] = Field(default_factory=list, description="Key message summaries", json_schema_extra={"label": "Nachrichtenverlauf"}) - timestamp: float = Field(..., description="When the handover was created (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel"}) + timestamp: float = Field(..., description="When the handover was created (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel", "frontend_type": "timestamp"}) handoverType: str = Field(default="task", description="Type of handover: task, phase, or workflow", json_schema_extra={"label": "Übergabetyp"}) class TaskContext(BaseModel): diff --git a/modules/datamodels/datamodelContent.py b/modules/datamodels/datamodelContent.py index fc9dc4b6..c28036cf 100644 --- a/modules/datamodels/datamodelContent.py +++ b/modules/datamodels/datamodelContent.py @@ -34,7 +34,7 @@ class ContentObject(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) fileId: str = Field( description="FK to the physical file", - json_schema_extra={"fk_target": {"db": "poweron_management", "table": "FileItem"}}, + json_schema_extra={"fk_target": {"db": "poweron_management", "table": "FileItem", "labelField": "fileName"}}, ) contentType: str = Field(description="text, image, videostream, audiostream, other") data: str = Field(default="", description="Content data (text, base64, URL)") diff --git a/modules/datamodels/datamodelDataSource.py b/modules/datamodels/datamodelDataSource.py index cad125ef..10d2976c 100644 --- a/modules/datamodels/datamodelDataSource.py +++ b/modules/datamodels/datamodelDataSource.py @@ -23,7 +23,7 @@ class DataSource(PowerOnModel): ) connectionId: str = Field( description="FK to UserConnection", - json_schema_extra={"label": "Verbindungs-ID", "fk_target": {"db": "poweron_app", "table": "UserConnection"}}, + json_schema_extra={"label": "Verbindungs-ID", "fk_target": {"db": "poweron_app", "table": "UserConnection", "labelField": "externalUsername"}}, ) sourceType: str = Field( description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder, clickupList (path under /team/...)", @@ -45,17 +45,17 @@ class DataSource(PowerOnModel): featureInstanceId: Optional[str] = Field( default=None, description="Scoped to feature instance", - json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) mandateId: Optional[str] = Field( default=None, description="Mandate scope", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) userId: str = Field( default="", description="Owner user ID", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) autoSync: bool = Field( default=False, @@ -65,7 +65,7 @@ class DataSource(PowerOnModel): lastSynced: Optional[float] = Field( default=None, description="Last sync timestamp", - json_schema_extra={"label": "Letzter Sync"}, + json_schema_extra={"label": "Letzter Sync", "frontend_type": "timestamp"}, ) scope: str = Field( default="personal", @@ -91,5 +91,9 @@ class ExternalEntry(BaseModel): isFolder: bool = Field(default=False, description="True if directory/folder") size: Optional[int] = Field(default=None, description="File size in bytes") mimeType: Optional[str] = Field(default=None, description="MIME type (files only)") - lastModified: Optional[float] = Field(default=None, description="Last modification timestamp") + lastModified: Optional[float] = Field( + default=None, + description="Last modification timestamp", + json_schema_extra={"frontend_type": "timestamp"}, + ) metadata: Dict[str, Any] = Field(default_factory=dict, description="Provider-specific metadata") diff --git a/modules/datamodels/datamodelFeatureDataSource.py b/modules/datamodels/datamodelFeatureDataSource.py index 96b574a6..dd2c4035 100644 --- a/modules/datamodels/datamodelFeatureDataSource.py +++ b/modules/datamodels/datamodelFeatureDataSource.py @@ -23,11 +23,11 @@ class FeatureDataSource(PowerOnModel): ) featureInstanceId: str = Field( description="FK to FeatureInstance", - json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) featureCode: str = Field( description="Feature code (e.g. trustee, commcoach)", - json_schema_extra={"label": "Feature", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"}}, + json_schema_extra={"label": "Feature", "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code", "labelField": "code"}}, ) tableName: str = Field( description="Table name from DATA_OBJECTS meta (e.g. TrusteePosition)", @@ -44,16 +44,16 @@ class FeatureDataSource(PowerOnModel): mandateId: str = Field( default="", description="Mandate scope", - json_schema_extra={"label": "Mandant", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandant", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) userId: str = Field( default="", description="Owner user ID", - json_schema_extra={"label": "Benutzer", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) workspaceInstanceId: str = Field( description="Workspace feature instance where this source is used", - json_schema_extra={"label": "Workspace", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Workspace", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) scope: str = Field( default="personal", diff --git a/modules/datamodels/datamodelFeatures.py b/modules/datamodels/datamodelFeatures.py index e8e51370..138ab0dd 100644 --- a/modules/datamodels/datamodelFeatures.py +++ b/modules/datamodels/datamodelFeatures.py @@ -43,7 +43,7 @@ class FeatureInstance(PowerOnModel): "frontend_type": "select", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code"}, + "fk_target": {"db": "poweron_app", "table": "Feature", "column": "code", "labelField": "code"}, }, ) mandateId: str = Field( @@ -53,7 +53,7 @@ class FeatureInstance(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) label: str = Field( diff --git a/modules/datamodels/datamodelFileFolder.py b/modules/datamodels/datamodelFileFolder.py index e3d2ce87..4829385e 100644 --- a/modules/datamodels/datamodelFileFolder.py +++ b/modules/datamodels/datamodelFileFolder.py @@ -29,7 +29,7 @@ class FileFolder(PowerOnModel): "frontend_type": "text", "frontend_readonly": False, "frontend_required": False, - "fk_target": {"db": "poweron_management", "table": "FileFolder"}, + "fk_target": {"db": "poweron_management", "table": "FileFolder", "labelField": "name"}, }, ) mandateId: Optional[str] = Field( @@ -40,7 +40,7 @@ class FileFolder(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: Optional[str] = Field( @@ -51,7 +51,7 @@ class FileFolder(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) scope: str = Field( diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py index d9b78ddf..82628e0c 100644 --- a/modules/datamodels/datamodelFiles.py +++ b/modules/datamodels/datamodelFiles.py @@ -30,9 +30,7 @@ class FileItem(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_model": "Mandate", - "fk_label_field": "label", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: Optional[str] = Field( @@ -43,9 +41,7 @@ class FileItem(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_model": "FeatureInstance", - "fk_label_field": "label", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) mimeType: str = Field( @@ -80,7 +76,7 @@ class FileItem(PowerOnModel): "frontend_type": "text", "frontend_readonly": False, "frontend_required": False, - "fk_target": {"db": "poweron_management", "table": "FileFolder"}, + "fk_target": {"db": "poweron_management", "table": "FileFolder", "labelField": "name"}, }, ) description: Optional[str] = Field( diff --git a/modules/datamodels/datamodelInvitation.py b/modules/datamodels/datamodelInvitation.py index 57efb9bb..befb6ae9 100644 --- a/modules/datamodels/datamodelInvitation.py +++ b/modules/datamodels/datamodelInvitation.py @@ -37,7 +37,7 @@ class Invitation(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: Optional[str] = Field( @@ -48,7 +48,7 @@ class Invitation(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) roleIds: List[str] = Field( @@ -80,7 +80,7 @@ class Invitation(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) usedAt: Optional[float] = Field( diff --git a/modules/datamodels/datamodelKnowledge.py b/modules/datamodels/datamodelKnowledge.py index e440d657..163328a4 100644 --- a/modules/datamodels/datamodelKnowledge.py +++ b/modules/datamodels/datamodelKnowledge.py @@ -30,17 +30,17 @@ class FileContentIndex(PowerOnModel): ) userId: str = Field( description="Owner user ID", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) featureInstanceId: str = Field( default="", description="Feature instance scope", - json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) mandateId: str = Field( default="", description="Mandate scope", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) fileName: str = Field( description="Original file name", @@ -78,7 +78,7 @@ class FileContentIndex(PowerOnModel): extractedAt: float = Field( default_factory=getUtcTimestamp, description="Extraction timestamp", - json_schema_extra={"label": "Extrahiert am"}, + json_schema_extra={"label": "Extrahiert am", "frontend_type": "timestamp"}, ) status: str = Field( default="pending", @@ -116,16 +116,16 @@ class ContentChunk(PowerOnModel): ) fileId: str = Field( description="FK to the source file", - json_schema_extra={"label": "Datei-ID", "fk_target": {"db": "poweron_management", "table": "FileItem"}}, + json_schema_extra={"label": "Datei-ID", "fk_target": {"db": "poweron_management", "table": "FileItem", "labelField": "fileName"}}, ) userId: str = Field( description="Owner user ID", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) featureInstanceId: str = Field( default="", description="Feature instance scope", - json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) contentType: str = Field( description="Content type: text, image, videostream, audiostream, other", @@ -214,16 +214,16 @@ class WorkflowMemory(PowerOnModel): ) workflowId: str = Field( description="FK to the workflow", - json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow"}}, + json_schema_extra={"label": "Workflow-ID", "fk_target": {"db": "poweron_chat", "table": "ChatWorkflow", "labelField": "name"}}, ) userId: str = Field( description="Owner user ID", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) featureInstanceId: str = Field( default="", description="Feature instance scope", - json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) key: str = Field( description="Key identifier (e.g. 'entity:companyName')", diff --git a/modules/datamodels/datamodelMembership.py b/modules/datamodels/datamodelMembership.py index 5c7280d0..97f865d6 100644 --- a/modules/datamodels/datamodelMembership.py +++ b/modules/datamodels/datamodelMembership.py @@ -31,9 +31,7 @@ class UserMandate(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "fk_model": "User", - "fk_label_field": "username", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) mandateId: str = Field( @@ -43,9 +41,7 @@ class UserMandate(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "fk_model": "Mandate", - "fk_label_field": "label", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) enabled: bool = Field( @@ -73,9 +69,7 @@ class FeatureAccess(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "fk_model": "User", - "fk_label_field": "username", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) featureInstanceId: str = Field( @@ -85,9 +79,7 @@ class FeatureAccess(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "fk_model": "FeatureInstance", - "fk_label_field": "label", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) enabled: bool = Field( @@ -115,7 +107,7 @@ class UserMandateRole(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "UserMandate"}, + "fk_target": {"db": "poweron_app", "table": "UserMandate", "labelField": None}, }, ) roleId: str = Field( @@ -125,9 +117,7 @@ class UserMandateRole(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "fk_model": "Role", - "fk_label_field": "roleLabel", - "fk_target": {"db": "poweron_app", "table": "Role"}, + "fk_target": {"db": "poweron_app", "table": "Role", "labelField": "roleLabel"}, }, ) @@ -150,7 +140,7 @@ class FeatureAccessRole(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "FeatureAccess"}, + "fk_target": {"db": "poweron_app", "table": "FeatureAccess", "labelField": None}, }, ) roleId: str = Field( @@ -160,8 +150,6 @@ class FeatureAccessRole(PowerOnModel): "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, - "fk_model": "Role", - "fk_label_field": "roleLabel", - "fk_target": {"db": "poweron_app", "table": "Role"}, + "fk_target": {"db": "poweron_app", "table": "Role", "labelField": "roleLabel"}, }, ) diff --git a/modules/datamodels/datamodelMessaging.py b/modules/datamodels/datamodelMessaging.py index 87845da8..9fcb9944 100644 --- a/modules/datamodels/datamodelMessaging.py +++ b/modules/datamodels/datamodelMessaging.py @@ -64,7 +64,7 @@ class MessagingSubscription(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -74,7 +74,7 @@ class MessagingSubscription(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Feature-Instanz-ID", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) description: Optional[str] = Field( @@ -131,7 +131,7 @@ class MessagingSubscriptionRegistration(BaseModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -141,7 +141,7 @@ class MessagingSubscriptionRegistration(BaseModel): "frontend_readonly": True, "frontend_required": False, "label": "Feature-Instanz-ID", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) subscriptionId: str = Field( @@ -160,7 +160,7 @@ class MessagingSubscriptionRegistration(BaseModel): "frontend_readonly": True, "frontend_required": False, "label": "Benutzer-ID", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) channel: MessagingChannel = Field( @@ -249,7 +249,7 @@ class MessagingDelivery(BaseModel): "frontend_readonly": True, "frontend_required": False, "label": "Benutzer-ID", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) channel: MessagingChannel = Field( @@ -296,7 +296,7 @@ class MessagingDelivery(BaseModel): default=None, description="When the delivery was sent (UTC timestamp in seconds)", json_schema_extra={ - "frontend_type": "datetime", + "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Gesendet am", diff --git a/modules/datamodels/datamodelNotification.py b/modules/datamodels/datamodelNotification.py index 3a8fb631..535e6a65 100644 --- a/modules/datamodels/datamodelNotification.py +++ b/modules/datamodels/datamodelNotification.py @@ -65,7 +65,7 @@ class UserNotification(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) diff --git a/modules/datamodels/datamodelRbac.py b/modules/datamodels/datamodelRbac.py index 45aa76a7..83a4525d 100644 --- a/modules/datamodels/datamodelRbac.py +++ b/modules/datamodels/datamodelRbac.py @@ -63,9 +63,7 @@ class Role(PowerOnModel): "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, - "fk_model": "Mandate", - "fk_label_field": "label", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: Optional[str] = Field( @@ -77,9 +75,7 @@ class Role(PowerOnModel): "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, - "fk_model": "FeatureInstance", - "fk_label_field": "label", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) featureCode: Optional[str] = Field( @@ -115,9 +111,7 @@ class AccessRule(PowerOnModel): "frontend_type": "select", "frontend_readonly": True, "frontend_required": True, - "fk_model": "Role", - "fk_label_field": "roleLabel", - "fk_target": {"db": "poweron_app", "table": "Role"}, + "fk_target": {"db": "poweron_app", "table": "Role", "labelField": "roleLabel"}, }, ) context: AccessRuleContext = Field( diff --git a/modules/datamodels/datamodelSecurity.py b/modules/datamodels/datamodelSecurity.py index cd48fb08..1240f088 100644 --- a/modules/datamodels/datamodelSecurity.py +++ b/modules/datamodels/datamodelSecurity.py @@ -47,7 +47,7 @@ class Token(PowerOnModel): ) userId: str = Field( ..., - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) authority: AuthAuthority = Field( ..., @@ -56,7 +56,7 @@ class Token(PowerOnModel): connectionId: Optional[str] = Field( None, description="ID of the connection this token belongs to", - json_schema_extra={"label": "Verbindungs-ID", "fk_target": {"db": "poweron_app", "table": "UserConnection"}}, + json_schema_extra={"label": "Verbindungs-ID", "fk_target": {"db": "poweron_app", "table": "UserConnection", "labelField": "externalUsername"}}, ) tokenPurpose: Optional[TokenPurpose] = Field( default=None, @@ -73,7 +73,7 @@ class Token(PowerOnModel): ) expiresAt: float = Field( description="When the token expires (UTC timestamp in seconds)", - json_schema_extra={"label": "Laeuft ab am"}, + json_schema_extra={"label": "Laeuft ab am", "frontend_type": "timestamp"}, ) tokenRefresh: Optional[str] = Field( default=None, @@ -87,12 +87,12 @@ class Token(PowerOnModel): revokedAt: Optional[float] = Field( None, description="When the token was revoked (UTC timestamp in seconds)", - json_schema_extra={"label": "Widerrufen am"}, + json_schema_extra={"label": "Widerrufen am", "frontend_type": "timestamp"}, ) revokedBy: Optional[str] = Field( None, description="User ID who revoked the token (admin/self)", - json_schema_extra={"label": "Widerrufen von", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Widerrufen von", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) reason: Optional[str] = Field( None, @@ -139,7 +139,7 @@ class AuthEvent(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) eventType: str = Field( @@ -149,7 +149,7 @@ class AuthEvent(PowerOnModel): timestamp: float = Field( default_factory=getUtcTimestamp, description="Unix timestamp when the event occurred", - json_schema_extra={"label": "Zeitstempel", "frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True}, + json_schema_extra={"label": "Zeitstempel", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": True}, ) ipAddress: Optional[str] = Field( default=None, diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 46ce1f31..847285cd 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -207,7 +207,7 @@ class MandateSubscription(PowerOnModel): mandateId: str = Field( ..., description="Foreign key to Mandate", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) planKey: str = Field( ..., @@ -226,35 +226,35 @@ class MandateSubscription(PowerOnModel): json_schema_extra={"label": "Wiederkehrend"}, ) - startedAt: datetime = Field( - default_factory=lambda: datetime.now(timezone.utc), - description="Record creation timestamp", - json_schema_extra={"label": "Gestartet"}, + startedAt: float = Field( + default_factory=lambda: datetime.now(timezone.utc).timestamp(), + description="Record creation timestamp (UTC unix)", + json_schema_extra={"label": "Gestartet", "frontend_type": "timestamp"}, ) - effectiveFrom: Optional[datetime] = Field( + effectiveFrom: Optional[float] = Field( None, - description="When this subscription becomes operative. None = immediate. Set for SCHEDULED subs.", - json_schema_extra={"label": "Wirksam ab"}, + description="When this subscription becomes operative (UTC unix). None = immediate.", + json_schema_extra={"label": "Wirksam ab", "frontend_type": "timestamp"}, ) - endedAt: Optional[datetime] = Field( + endedAt: Optional[float] = Field( None, - description="When subscription ended (terminal)", - json_schema_extra={"label": "Beendet"}, + description="When subscription ended (UTC unix)", + json_schema_extra={"label": "Beendet", "frontend_type": "timestamp"}, ) - currentPeriodStart: Optional[datetime] = Field( + currentPeriodStart: Optional[float] = Field( None, - description="Current billing period start (synced from Stripe)", - json_schema_extra={"label": "Periodenbeginn"}, + description="Current billing period start (UTC unix, synced from Stripe)", + json_schema_extra={"label": "Periodenbeginn", "frontend_type": "timestamp"}, ) - currentPeriodEnd: Optional[datetime] = Field( + currentPeriodEnd: Optional[float] = Field( None, - description="Current billing period end (synced from Stripe)", - json_schema_extra={"label": "Periodenende"}, + description="Current billing period end (UTC unix, synced from Stripe)", + json_schema_extra={"label": "Periodenende", "frontend_type": "timestamp"}, ) - trialEndsAt: Optional[datetime] = Field( + trialEndsAt: Optional[float] = Field( None, - description="Trial expiry timestamp", - json_schema_extra={"label": "Trial endet"}, + description="Trial expiry timestamp (UTC unix)", + json_schema_extra={"label": "Trial endet", "frontend_type": "timestamp"}, ) snapshotPricePerUserCHF: float = Field( diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 5cfb4c37..86d55a02 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -397,9 +397,7 @@ class UserConnection(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Benutzer-ID", - "fk_model": "User", - "fk_label_field": "username", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) authority: AuthAuthority = Field( @@ -648,7 +646,7 @@ class UserInDB(User): resetTokenExpires: Optional[float] = Field( None, description="Reset token expiration (UTC timestamp in seconds)", - json_schema_extra={"label": "Token läuft ab"}, + json_schema_extra={"label": "Token läuft ab", "frontend_type": "timestamp"}, ) @@ -689,12 +687,12 @@ class UserVoicePreferences(PowerOnModel): ) userId: str = Field( description="User ID", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) mandateId: Optional[str] = Field( default=None, description="Mandate scope (None = global for user)", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) sttLanguage: str = Field( default="de-DE", diff --git a/modules/datamodels/datamodelUdm.py b/modules/datamodels/datamodelUdm.py index 794b71f0..c91baa90 100644 --- a/modules/datamodels/datamodelUdm.py +++ b/modules/datamodels/datamodelUdm.py @@ -14,8 +14,8 @@ from modules.datamodels.datamodelExtraction import ContentExtracted, ContentPart class UdmMetadata(BaseModel): title: Optional[str] = None author: Optional[str] = None - createdAt: Optional[str] = None - modifiedAt: Optional[str] = None + createdAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) + modifiedAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) sourcePath: str = "" tags: List[str] = Field(default_factory=list) custom: Dict[str, Any] = Field(default_factory=dict) diff --git a/modules/datamodels/datamodelUtils.py b/modules/datamodels/datamodelUtils.py index 0c1bb8c6..0bd0ed71 100644 --- a/modules/datamodels/datamodelUtils.py +++ b/modules/datamodels/datamodelUtils.py @@ -27,9 +27,7 @@ class Prompt(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_model": "Mandate", - "fk_label_field": "label", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) isSystem: bool = Field( diff --git a/modules/datamodels/datamodelViews.py b/modules/datamodels/datamodelViews.py new file mode 100644 index 00000000..aca32f56 --- /dev/null +++ b/modules/datamodels/datamodelViews.py @@ -0,0 +1,199 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +View models for the /api/attributes/ endpoint. + +These extend base DB models with computed / enriched fields that the gateway +adds at response time (JOINs, aggregations, synthetics). They are NEVER used +for DB operations — only for ``getModelAttributeDefinitions()`` so the frontend +can resolve column types via ``resolveColumnTypes`` without hardcoding. + +Naming convention: ``{BaseModel}View``. + +``getModelClasses()`` in ``attributeUtils.py`` auto-discovers every +``datamodel*.py`` under ``modules/datamodels/`` — so placing them here is +sufficient for registration. +""" + +from typing import Optional, List +from pydantic import Field + +from modules.datamodels.datamodelBase import MODEL_REGISTRY, PowerOnModel +from modules.datamodels.datamodelMembership import UserMandate, FeatureAccess +from modules.datamodels.datamodelBilling import BillingTransaction +from modules.datamodels.datamodelSubscription import MandateSubscription +from modules.datamodels.datamodelUiLanguage import UiLanguageSet +from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutralizerAttributes +from modules.shared.i18nRegistry import i18nModel + + +# ============================================================================ +# Punkt 1a: UserMandate + enriched user fields +# ============================================================================ + +@i18nModel("Benutzer-Mandant (Ansicht)") +class UserMandateView(UserMandate): + """UserMandate erweitert um aufgeloeste Benutzerfelder und Rollenlabels.""" + + username: Optional[str] = Field( + default=None, + description="Username (resolved from userId)", + json_schema_extra={"label": "Benutzername", "frontend_type": "text", "frontend_readonly": True}, + ) + email: Optional[str] = Field( + default=None, + description="E-Mail address (resolved from userId)", + json_schema_extra={"label": "E-Mail", "frontend_type": "text", "frontend_readonly": True}, + ) + fullName: Optional[str] = Field( + default=None, + description="Full name (resolved from userId)", + json_schema_extra={"label": "Vollstaendiger Name", "frontend_type": "text", "frontend_readonly": True}, + ) + roleLabels: Optional[List[str]] = Field( + default=None, + description="Role labels (resolved from junction table)", + json_schema_extra={"label": "Rollen", "frontend_type": "text", "frontend_readonly": True}, + ) + + +# ============================================================================ +# Punkt 1b: FeatureAccess + enriched user fields +# ============================================================================ + +@i18nModel("Feature-Zugang (Ansicht)") +class FeatureAccessView(FeatureAccess): + """FeatureAccess erweitert um aufgeloeste Benutzerfelder und Rollenlabels.""" + + username: Optional[str] = Field( + default=None, + description="Username (resolved from userId)", + json_schema_extra={"label": "Benutzername", "frontend_type": "text", "frontend_readonly": True}, + ) + email: Optional[str] = Field( + default=None, + description="E-Mail address (resolved from userId)", + json_schema_extra={"label": "E-Mail", "frontend_type": "text", "frontend_readonly": True}, + ) + fullName: Optional[str] = Field( + default=None, + description="Full name (resolved from userId)", + json_schema_extra={"label": "Vollstaendiger Name", "frontend_type": "text", "frontend_readonly": True}, + ) + roleLabels: Optional[List[str]] = Field( + default=None, + description="Role labels (resolved from junction table)", + json_schema_extra={"label": "Rollen", "frontend_type": "text", "frontend_readonly": True}, + ) + + +# ============================================================================ +# Punkt 1d: BillingTransaction + enriched mandate/user names +# ============================================================================ + +@i18nModel("Transaktion (Ansicht)") +class BillingTransactionView(BillingTransaction): + """BillingTransaction erweitert um aufgeloeste Mandanten-/Benutzernamen.""" + + mandateName: Optional[str] = Field( + default=None, + description="Mandate name (resolved from accountId/mandateId)", + json_schema_extra={"label": "Mandant", "frontend_type": "text", "frontend_readonly": True}, + ) + userName: Optional[str] = Field( + default=None, + description="User name (resolved from createdByUserId)", + json_schema_extra={"label": "Benutzer", "frontend_type": "text", "frontend_readonly": True}, + ) + + +# ============================================================================ +# Punkt 3a: MandateSubscription + aggregated fields +# ============================================================================ + +@i18nModel("Abonnement (Ansicht)") +class MandateSubscriptionView(MandateSubscription): + """MandateSubscription erweitert um aggregierte Laufzeitwerte.""" + + mandateName: Optional[str] = Field( + default=None, + description="Mandate name (resolved from mandateId)", + json_schema_extra={"label": "Mandant", "frontend_type": "text", "frontend_readonly": True}, + ) + planTitle: Optional[str] = Field( + default=None, + description="Plan title (resolved from planKey)", + json_schema_extra={"label": "Plan", "frontend_type": "text", "frontend_readonly": True}, + ) + activeUsers: Optional[int] = Field( + default=None, + description="Number of active users in the mandate", + json_schema_extra={"label": "Benutzer", "frontend_type": "number", "frontend_readonly": True}, + ) + activeInstances: Optional[int] = Field( + default=None, + description="Number of active feature instances in the mandate", + json_schema_extra={"label": "Module", "frontend_type": "number", "frontend_readonly": True}, + ) + monthlyRevenueCHF: Optional[float] = Field( + default=None, + description="Calculated monthly revenue in CHF", + json_schema_extra={"label": "Umsatz pro Monat", "frontend_type": "number", "frontend_readonly": True}, + ) + + +# ============================================================================ +# Punkt 3b: UiLanguageSet + computed counts +# ============================================================================ + +@i18nModel("Sprachset (Ansicht)") +class UiLanguageSetView(UiLanguageSet): + """UiLanguageSet erweitert um berechnete Uebersetzungszaehler.""" + + uiCount: Optional[int] = Field( + default=None, + description="Number of UI translation entries", + json_schema_extra={"label": "UI", "frontend_type": "number", "frontend_readonly": True}, + ) + gatewayCount: Optional[int] = Field( + default=None, + description="Number of gateway/API translation entries", + json_schema_extra={"label": "API", "frontend_type": "number", "frontend_readonly": True}, + ) + entriesCount: Optional[int] = Field( + default=None, + description="Total number of translation entries", + json_schema_extra={"label": "Gesamt", "frontend_type": "number", "frontend_readonly": True}, + ) + + +# ============================================================================ +# Punkt 1c: DataNeutralizerAttributes + enriched fields +# +# DataNeutralizerAttributes extends BaseModel (not PowerOnModel), so its +# subclass does NOT auto-register in MODEL_REGISTRY. We register manually. +# ============================================================================ + +@i18nModel("Neutralisierungs-Zuordnung (Ansicht)") +class DataNeutralizerAttributesView(DataNeutralizerAttributes): + """DataNeutralizerAttributes erweitert um synthetische/aufgeloeste Felder.""" + + placeholder: Optional[str] = Field( + default=None, + description="Synthetic placeholder string [patternType.id]", + json_schema_extra={"label": "Platzhalter", "frontend_type": "text", "frontend_readonly": True}, + ) + username: Optional[str] = Field( + default=None, + description="Username (resolved from userId)", + json_schema_extra={"label": "Benutzer", "frontend_type": "text", "frontend_readonly": True}, + ) + instanceLabel: Optional[str] = Field( + default=None, + description="Feature instance label (resolved from featureInstanceId)", + json_schema_extra={"label": "Feature-Instanz", "frontend_type": "text", "frontend_readonly": True}, + ) + + +# Manual registration for non-PowerOnModel view +MODEL_REGISTRY["DataNeutralizerAttributesView"] = DataNeutralizerAttributesView # type: ignore[assignment] diff --git a/modules/demoConfigs/pwgDemo2026.py b/modules/demoConfigs/pwgDemo2026.py index f80760f9..f0dc5e6d 100644 --- a/modules/demoConfigs/pwgDemo2026.py +++ b/modules/demoConfigs/pwgDemo2026.py @@ -503,11 +503,12 @@ class PwgDemo2026(_BaseDemoConfig): if monthlyRent <= 0: continue for month in range(1, 13): - bookingDate = f"{year}-{month:02d}-01" + from datetime import datetime as _dtCls, timezone as _tzCls + bookingTs = _dtCls(year, month, 1, tzinfo=_tzCls.utc).timestamp() entryRef = f"PWG-{tenant.get('contactNumber')}-{year}{month:02d}" entry = TrusteeDataJournalEntry( externalId=entryRef, - bookingDate=bookingDate, + bookingDate=bookingTs, reference=entryRef, description=f"Mietzins {month:02d}/{year} {name}", currency="CHF", diff --git a/modules/features/chatbot/routeFeatureChatbot.py b/modules/features/chatbot/routeFeatureChatbot.py index 4ee82fc5..6b79287b 100644 --- a/modules/features/chatbot/routeFeatureChatbot.py +++ b/modules/features/chatbot/routeFeatureChatbot.py @@ -35,17 +35,6 @@ from modules.features.chatbot.mainChatbot import getEventManager from modules.shared.i18nRegistry import apiRouteContext routeApiMsg = apiRouteContext("routeFeatureChatbot") -# Pre-warm AI connectors when this router loads (before first request). -# Ensures connectors are ready; avoids 4–8 s delay on first chatbot message. -try: - import modules.aicore.aicoreModelRegistry # noqa: F401 - from modules.aicore.aicoreModelRegistry import modelRegistry - modelRegistry.ensureConnectorsRegistered() - modelRegistry.refreshModels(force=True) - logging.getLogger(__name__).info("Chatbot router: AI connectors pre-warmed") -except Exception as e: - logging.getLogger(__name__).warning(f"Chatbot AI pre-warm failed: {e}") - # Configure logger logger = logging.getLogger(__name__) diff --git a/modules/features/commcoach/datamodelCommcoach.py b/modules/features/commcoach/datamodelCommcoach.py index 82be6044..afc14df5 100644 --- a/modules/features/commcoach/datamodelCommcoach.py +++ b/modules/features/commcoach/datamodelCommcoach.py @@ -90,7 +90,7 @@ class CoachingContext(PowerOnModel): metadata: Optional[str] = Field(default=None, description="JSON object with flexible metadata") sessionCount: int = Field(default=0) taskCount: int = Field(default=0) - lastSessionAt: Optional[str] = Field(default=None) + lastSessionAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) rollingOverview: Optional[str] = Field(default=None, description="AI summary of older sessions for long context history") rollingOverviewUpToSessionCount: Optional[int] = Field(default=None, description="Session count covered by rollingOverview") @@ -113,8 +113,8 @@ class CoachingSession(PowerOnModel): messageCount: int = Field(default=0) competenceScore: Optional[float] = Field(default=None, ge=0.0, le=100.0) emailSent: bool = Field(default=False) - startedAt: Optional[str] = Field(default=None) - endedAt: Optional[str] = Field(default=None) + startedAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) + endedAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) class CoachingMessage(PowerOnModel): @@ -141,8 +141,8 @@ class CoachingTask(PowerOnModel): description: Optional[str] = Field(default=None) status: CoachingTaskStatus = Field(default=CoachingTaskStatus.OPEN) priority: CoachingTaskPriority = Field(default=CoachingTaskPriority.MEDIUM) - dueDate: Optional[str] = Field(default=None) - completedAt: Optional[str] = Field(default=None) + dueDate: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "date"}) + completedAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) class CoachingScore(PowerOnModel): @@ -171,7 +171,7 @@ class CoachingUserProfile(PowerOnModel): longestStreak: int = Field(default=0) totalSessions: int = Field(default=0) totalMinutes: int = Field(default=0) - lastSessionAt: Optional[str] = Field(default=None) + lastSessionAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) # ============================================================================ @@ -204,7 +204,7 @@ class CoachingBadge(PowerOnModel): mandateId: str = Field(description="Mandate ID") instanceId: str = Field(description="Feature instance ID") badgeKey: str = Field(description="Badge identifier, e.g. 'streak_7'") - awardedAt: Optional[str] = Field(default=None) + awardedAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) # ============================================================================ @@ -238,14 +238,14 @@ class CreateTaskRequest(BaseModel): title: str description: Optional[str] = None priority: Optional[CoachingTaskPriority] = CoachingTaskPriority.MEDIUM - dueDate: Optional[str] = None + dueDate: Optional[float] = None class UpdateTaskRequest(BaseModel): title: Optional[str] = None description: Optional[str] = None priority: Optional[CoachingTaskPriority] = None - dueDate: Optional[str] = None + dueDate: Optional[float] = None class UpdateTaskStatusRequest(BaseModel): diff --git a/modules/features/commcoach/interfaceFeatureCommcoach.py b/modules/features/commcoach/interfaceFeatureCommcoach.py index 2a3f3d12..e4485591 100644 --- a/modules/features/commcoach/interfaceFeatureCommcoach.py +++ b/modules/features/commcoach/interfaceFeatureCommcoach.py @@ -12,7 +12,7 @@ from typing import Dict, Any, List, Optional from modules.datamodels.datamodelUam import User from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.shared.dbRegistry import registerDatabase -from modules.shared.timeUtils import getIsoTimestamp +from modules.shared.timeUtils import getIsoTimestamp, getUtcTimestamp from modules.shared.configuration import APP_CONFIG from modules.shared.i18nRegistry import resolveText, t @@ -112,7 +112,7 @@ class CommcoachObjects: CoachingSession, recordFilter={"contextId": contextId, "userId": userId}, ) - records.sort(key=lambda r: r.get("startedAt") or r.get("createdAt") or "", reverse=True) + records.sort(key=lambda r: r.get("startedAt") or 0, reverse=True) return records def getSession(self, sessionId: str) -> Optional[Dict[str, Any]]: @@ -129,7 +129,7 @@ class CommcoachObjects: def createSession(self, data: Dict[str, Any]) -> Dict[str, Any]: data["createdAt"] = getIsoTimestamp() data["updatedAt"] = getIsoTimestamp() - data["startedAt"] = getIsoTimestamp() + data["startedAt"] = getUtcTimestamp() return self.db.recordCreate(CoachingSession, data) def updateSession(self, sessionId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: @@ -281,7 +281,7 @@ class CommcoachObjects: def getBadges(self, userId: str, instanceId: str) -> List[Dict[str, Any]]: from .datamodelCommcoach import CoachingBadge records = self.db.getRecordset(CoachingBadge, recordFilter={"userId": userId, "instanceId": instanceId}) - records.sort(key=lambda r: r.get("awardedAt") or "", reverse=True) + records.sort(key=lambda r: r.get("awardedAt") or 0, reverse=True) return records def hasBadge(self, userId: str, instanceId: str, badgeKey: str) -> bool: @@ -291,7 +291,7 @@ class CommcoachObjects: def awardBadge(self, data: Dict[str, Any]) -> Dict[str, Any]: from .datamodelCommcoach import CoachingBadge - data["awardedAt"] = getIsoTimestamp() + data["awardedAt"] = getUtcTimestamp() data["createdAt"] = getIsoTimestamp() return self.db.recordCreate(CoachingBadge, data) diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py index bb83c13c..c308684a 100644 --- a/modules/features/commcoach/routeFeatureCommcoach.py +++ b/modules/features/commcoach/routeFeatureCommcoach.py @@ -471,10 +471,10 @@ async def cancelSession( raise HTTPException(status_code=404, detail=routeApiMsg("Session not found")) _validateOwnership(session, context) - from modules.shared.timeUtils import getIsoTimestamp + from modules.shared.timeUtils import getUtcTimestamp interface.updateSession(sessionId, { "status": CoachingSessionStatus.CANCELLED.value, - "endedAt": getIsoTimestamp(), + "endedAt": getUtcTimestamp(), }) return {"cancelled": True} @@ -768,8 +768,8 @@ async def updateTaskStatus( updates = {"status": body.status.value} if body.status == CoachingTaskStatus.DONE: - from modules.shared.timeUtils import getIsoTimestamp - updates["completedAt"] = getIsoTimestamp() + from modules.shared.timeUtils import getUtcTimestamp + updates["completedAt"] = getUtcTimestamp() updated = interface.updateTask(taskId, updates) return {"task": updated} diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py index 8765e30c..4ebe84ff 100644 --- a/modules/features/commcoach/serviceCommcoach.py +++ b/modules/features/commcoach/serviceCommcoach.py @@ -14,7 +14,7 @@ from typing import Optional, Dict, Any, List from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum -from modules.shared.timeUtils import getIsoTimestamp +from modules.shared.timeUtils import getIsoTimestamp, getUtcTimestamp from .datamodelCommcoach import ( CoachingMessage, CoachingMessageRole, CoachingMessageContentType, @@ -1107,7 +1107,7 @@ class CommcoachService: if len(messages) < 2: interface.updateSession(sessionId, { "status": CoachingSessionStatus.COMPLETED.value, - "endedAt": getIsoTimestamp(), + "endedAt": getUtcTimestamp(), "compressedHistorySummary": None, "compressedHistoryUpToMessageCount": None, }) @@ -1252,21 +1252,18 @@ class CommcoachService: logger.warning(f"Coaching session indexing failed (non-blocking): {e}") # Calculate duration - startedAt = session.get("startedAt", "") + startedAt = session.get("startedAt") durationSeconds = 0 if startedAt: - try: - from datetime import datetime - start = datetime.fromisoformat(startedAt.replace("Z", "+00:00")) - end = datetime.now(start.tzinfo) if start.tzinfo else datetime.now() - durationSeconds = int((end - start).total_seconds()) - except Exception: - pass + from datetime import datetime, timezone + start = datetime.fromtimestamp(startedAt, tz=timezone.utc) + end = datetime.now(timezone.utc) + durationSeconds = int((end - start).total_seconds()) # Update session - clear compressed history so it never leaks into new sessions sessionUpdates = { "status": CoachingSessionStatus.COMPLETED.value, - "endedAt": getIsoTimestamp(), + "endedAt": getUtcTimestamp(), "summary": summary, "durationSeconds": durationSeconds, "messageCount": len(messages), @@ -1285,7 +1282,7 @@ class CommcoachService: completedCount = len([s for s in allSessions if s.get("status") == CoachingSessionStatus.COMPLETED.value]) interface.updateContext(contextId, { "sessionCount": completedCount, - "lastSessionAt": getIsoTimestamp(), + "lastSessionAt": getUtcTimestamp(), }) # Update user profile streak @@ -1324,26 +1321,23 @@ class CommcoachService: if not profile: profile = interface.getOrCreateProfile(self.userId, self.mandateId, self.instanceId) - from datetime import datetime, timedelta + from datetime import datetime, timezone lastSessionAt = profile.get("lastSessionAt") currentStreak = profile.get("streakDays", 0) longestStreak = profile.get("longestStreak", 0) totalSessions = profile.get("totalSessions", 0) - today = datetime.now().date() + today = datetime.now(timezone.utc).date() isConsecutive = False if lastSessionAt: - try: - lastDate = datetime.fromisoformat(lastSessionAt.replace("Z", "+00:00")).date() - diff = (today - lastDate).days - if diff == 1: - isConsecutive = True - elif diff == 0: - isConsecutive = True # Same day, maintain streak - except Exception: - pass + lastDate = datetime.fromtimestamp(lastSessionAt, tz=timezone.utc).date() + diff = (today - lastDate).days + if diff == 1: + isConsecutive = True + elif diff == 0: + isConsecutive = True newStreak = (currentStreak + 1) if isConsecutive else 1 newLongest = max(longestStreak, newStreak) @@ -1352,7 +1346,7 @@ class CommcoachService: "streakDays": newStreak, "longestStreak": newLongest, "totalSessions": totalSessions + 1, - "lastSessionAt": getIsoTimestamp(), + "lastSessionAt": getUtcTimestamp(), }) except Exception as e: logger.warning(f"Failed to update streak: {e}") @@ -1418,14 +1412,13 @@ class CommcoachService: completedSessions = [s for s in allSessions if s.get("status") == CoachingSessionStatus.COMPLETED.value] for s in completedSessions: - startedAt = s.get("startedAt") or s.get("createdAt") or "" + startedAt = s.get("startedAt") if startedAt: - try: - from datetime import datetime - dt = datetime.fromisoformat(str(startedAt).replace("Z", "+00:00")) - s["date"] = dt.strftime("%d.%m.%Y") - except Exception: - s["date"] = "" + from datetime import datetime, timezone + dt = datetime.fromtimestamp(startedAt, tz=timezone.utc) + s["date"] = dt.strftime("%d.%m.%Y") + else: + s["date"] = "" result = { "intent": intent, diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py index 8b916005..e3394125 100644 --- a/modules/features/commcoach/serviceCommcoachAi.py +++ b/modules/features/commcoach/serviceCommcoachAi.py @@ -206,14 +206,11 @@ Tool-Nutzung: if retrievedSession: dateStr = "" - startedAt = retrievedSession.get("startedAt") or retrievedSession.get("createdAt") + startedAt = retrievedSession.get("startedAt") if startedAt: - try: - from datetime import datetime - dt = datetime.fromisoformat(str(startedAt).replace("Z", "+00:00")) - dateStr = dt.strftime("%d.%m.%Y") - except Exception: - pass + from datetime import datetime, timezone + dt = datetime.fromtimestamp(startedAt, tz=timezone.utc) + dateStr = dt.strftime("%d.%m.%Y") prompt += f"\n\nVom Benutzer angefragte Session ({dateStr}):" prompt += f"\n{retrievedSession.get('summary', '')[:500]}" diff --git a/modules/features/commcoach/serviceCommcoachContextRetrieval.py b/modules/features/commcoach/serviceCommcoachContextRetrieval.py index f1ccb9a3..e841dec4 100644 --- a/modules/features/commcoach/serviceCommcoachContextRetrieval.py +++ b/modules/features/commcoach/serviceCommcoachContextRetrieval.py @@ -7,7 +7,7 @@ Intent detection, retrieval strategies, and context assembly for intelligent ses import re import logging -from datetime import datetime +from datetime import datetime, timezone from typing import Optional, Dict, Any, List, Tuple from enum import Enum @@ -106,18 +106,15 @@ def findSessionByDate( for s in sessions: if s.get("status") != "completed": continue - startedAt = s.get("startedAt") or s.get("endedAt") or s.get("createdAt") + startedAt = s.get("startedAt") or s.get("endedAt") if not startedAt: continue - try: - dt = datetime.fromisoformat(startedAt.replace("Z", "+00:00")) - sessionDate = dt.date() - diff = abs((sessionDate - targetDateOnly).days) - if bestDiff is None or diff < bestDiff: - bestDiff = diff - bestMatch = s - except Exception: - continue + dt = datetime.fromtimestamp(startedAt, tz=timezone.utc) + sessionDate = dt.date() + diff = abs((sessionDate - targetDateOnly).days) + if bestDiff is None or diff < bestDiff: + bestDiff = diff + bestMatch = s return bestMatch @@ -231,17 +228,14 @@ def buildSessionSummariesForPrompt( and s.get("summary") and s.get("id") != excludeSessionId ] - completed.sort(key=lambda x: x.get("startedAt") or x.get("createdAt") or "", reverse=True) + completed.sort(key=lambda x: x.get("startedAt") or 0, reverse=True) result = [] for s in completed[:limit]: - startedAt = s.get("startedAt") or s.get("createdAt") or "" + startedAt = s.get("startedAt") dateStr = "" if startedAt: - try: - dt = datetime.fromisoformat(startedAt.replace("Z", "+00:00")) - dateStr = dt.strftime("%d.%m.%Y") - except Exception: - pass + dt = datetime.fromtimestamp(startedAt, tz=timezone.utc) + dateStr = dt.strftime("%d.%m.%Y") result.append({ "summary": s.get("summary", ""), "date": dateStr, diff --git a/modules/features/commcoach/serviceCommcoachExport.py b/modules/features/commcoach/serviceCommcoachExport.py index 13786d3e..614a3fe6 100644 --- a/modules/features/commcoach/serviceCommcoachExport.py +++ b/modules/features/commcoach/serviceCommcoachExport.py @@ -8,7 +8,7 @@ Generates Markdown and PDF exports for dossiers and sessions. import logging import json from typing import Dict, Any, List, Optional -from datetime import datetime +from datetime import datetime, timezone logger = logging.getLogger(__name__) @@ -49,7 +49,7 @@ def buildDossierMarkdown(context: Dict[str, Any], sessions: List[Dict[str, Any]] lines.append(f"- {text}") completedSessions = [s for s in sessions if s.get("status") == "completed"] - completedSessions.sort(key=lambda s: s.get("startedAt") or s.get("createdAt") or "") + completedSessions.sort(key=lambda s: s.get("startedAt") or 0) if completedSessions: lines += ["", "## Sessions", ""] for i, s in enumerate(completedSessions, 1): @@ -227,14 +227,14 @@ def _mdToXml(text: str) -> str: -def _formatDate(isoStr: Optional[str]) -> str: - if not isoStr: - return datetime.now().strftime("%d.%m.%Y") - try: - dt = datetime.fromisoformat(str(isoStr).replace("Z", "+00:00")) +def _formatDate(val) -> str: + if not val: + return datetime.now(timezone.utc).strftime("%d.%m.%Y") + if isinstance(val, (int, float)): + dt = datetime.fromtimestamp(float(val), tz=timezone.utc) return dt.strftime("%d.%m.%Y") - except Exception: - return isoStr + dt = datetime.fromisoformat(str(val).replace("Z", "+00:00")) + return dt.strftime("%d.%m.%Y") def _parseJson(value, fallback): diff --git a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py index 63572649..05473fc7 100644 --- a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py @@ -68,9 +68,7 @@ class AutoWorkflow(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID", - "fk_label_field": "label", - "fk_model": "Mandate", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -80,9 +78,7 @@ class AutoWorkflow(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Feature-Instanz-ID", - "fk_label_field": "label", - "fk_model": "FeatureInstance", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) label: str = Field( @@ -112,7 +108,7 @@ class AutoWorkflow(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Vorlagen-Quelle", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow", "labelField": "label"}, }, ) templateScope: Optional[str] = Field( @@ -133,7 +129,7 @@ class AutoWorkflow(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Aktuelle Version", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoVersion"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoVersion", "labelField": "versionNumber"}, }, ) active: bool = Field( @@ -182,7 +178,7 @@ class AutoVersion(PowerOnModel): "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow", "labelField": "label"}, }, ) versionNumber: int = Field( @@ -208,7 +204,7 @@ class AutoVersion(PowerOnModel): publishedAt: Optional[float] = Field( default=None, description="Timestamp when version was published", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Veröffentlicht am"}, + json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Veröffentlicht am"}, ) publishedBy: Optional[str] = Field( default=None, @@ -218,9 +214,7 @@ class AutoVersion(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Veröffentlicht von", - "fk_model": "User", - "fk_label_field": "username", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) @@ -243,7 +237,7 @@ class AutoRun(PowerOnModel): "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow", "labelField": "label"}, }, ) label: Optional[str] = Field( @@ -259,9 +253,7 @@ class AutoRun(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID", - "fk_label_field": "label", - "fk_model": "Mandate", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) ownerId: Optional[str] = Field( @@ -272,9 +264,7 @@ class AutoRun(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Auslöser", - "fk_model": "User", - "fk_label_field": "username", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) versionId: Optional[str] = Field( @@ -285,7 +275,7 @@ class AutoRun(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Versions-ID", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoVersion"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoVersion", "labelField": "versionNumber"}, }, ) status: str = Field( @@ -301,12 +291,12 @@ class AutoRun(PowerOnModel): startedAt: Optional[float] = Field( default=None, description="Run start timestamp", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Gestartet am"}, + json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Gestartet am"}, ) completedAt: Optional[float] = Field( default=None, description="Run completion timestamp", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Abgeschlossen am"}, + json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Abgeschlossen am"}, ) nodeOutputs: Dict[str, Any] = Field( default_factory=dict, @@ -358,7 +348,7 @@ class AutoStepLog(PowerOnModel): "frontend_readonly": True, "frontend_required": True, "label": "Lauf-ID", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoRun"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoRun", "labelField": "label"}, }, ) nodeId: str = Field( @@ -392,12 +382,12 @@ class AutoStepLog(PowerOnModel): startedAt: Optional[float] = Field( default=None, description="Step start timestamp", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Gestartet am"}, + json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Gestartet am"}, ) completedAt: Optional[float] = Field( default=None, description="Step completion timestamp", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Abgeschlossen am"}, + json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Abgeschlossen am"}, ) durationMs: Optional[int] = Field( default=None, @@ -434,7 +424,7 @@ class AutoTask(PowerOnModel): "frontend_readonly": True, "frontend_required": True, "label": "Lauf-ID", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoRun"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoRun", "labelField": "label"}, }, ) workflowId: str = Field( @@ -444,7 +434,7 @@ class AutoTask(PowerOnModel): "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow"}, + "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow", "labelField": "label"}, }, ) nodeId: str = Field( @@ -468,7 +458,7 @@ class AutoTask(PowerOnModel): "frontend_readonly": False, "frontend_required": False, "label": "Zugewiesen an", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) status: str = Field( @@ -484,7 +474,7 @@ class AutoTask(PowerOnModel): expiresAt: Optional[float] = Field( default=None, description="Expiration timestamp for the task", - json_schema_extra={"frontend_type": "datetime", "frontend_required": False, "label": "Läuft ab am"}, + json_schema_extra={"frontend_type": "timestamp", "frontend_required": False, "label": "Läuft ab am"}, ) diff --git a/modules/features/neutralization/datamodelFeatureNeutralizer.py b/modules/features/neutralization/datamodelFeatureNeutralizer.py index cd9b67f8..d83820fa 100644 --- a/modules/features/neutralization/datamodelFeatureNeutralizer.py +++ b/modules/features/neutralization/datamodelFeatureNeutralizer.py @@ -32,7 +32,7 @@ class DataNeutraliserConfig(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -42,7 +42,7 @@ class DataNeutraliserConfig(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) userId: str = Field( @@ -52,7 +52,7 @@ class DataNeutraliserConfig(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) enabled: bool = Field( @@ -107,7 +107,7 @@ class DataNeutralizerAttributes(BaseModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -117,7 +117,7 @@ class DataNeutralizerAttributes(BaseModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) userId: str = Field( @@ -127,7 +127,7 @@ class DataNeutralizerAttributes(BaseModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) originalText: str = Field( @@ -142,7 +142,7 @@ class DataNeutralizerAttributes(BaseModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_management", "table": "FileItem"}, + "fk_target": {"db": "poweron_management", "table": "FileItem", "labelField": "fileName"}, }, ) patternType: str = Field( @@ -160,16 +160,16 @@ class DataNeutralizationSnapshot(BaseModel): ) mandateId: str = Field( description="Mandate scope", - json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate"}}, + json_schema_extra={"label": "Mandanten-ID", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}, ) featureInstanceId: str = Field( default="", description="Feature instance scope", - json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + json_schema_extra={"label": "Feature-Instanz-ID", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) userId: str = Field( description="User who triggered neutralization", - json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "User"}}, + json_schema_extra={"label": "Benutzer-ID", "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}}, ) sourceLabel: str = Field( description="Human label, e.g. 'Prompt', 'Kontext', 'Nachricht 3'", diff --git a/modules/features/realEstate/datamodelFeatureRealEstate.py b/modules/features/realEstate/datamodelFeatureRealEstate.py index 4f2ebcd3..5ae732fe 100644 --- a/modules/features/realEstate/datamodelFeatureRealEstate.py +++ b/modules/features/realEstate/datamodelFeatureRealEstate.py @@ -288,7 +288,7 @@ class Kanton(PowerOnModel): "frontend_type": "text", "frontend_readonly": False, "frontend_required": False, - "fk_target": {"db": "poweron_realestate", "table": "Land"}, + "fk_target": {"db": "poweron_realestate", "table": "Land", "labelField": "label"}, }, ) abk: Optional[str] = Field( @@ -348,7 +348,7 @@ class Gemeinde(BaseModel): "frontend_type": "text", "frontend_readonly": False, "frontend_required": False, - "fk_target": {"db": "poweron_realestate", "table": "Kanton"}, + "fk_target": {"db": "poweron_realestate", "table": "Kanton", "labelField": "label"}, }, ) plz: Optional[str] = Field( @@ -398,7 +398,7 @@ class Parzelle(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandats-ID", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -408,7 +408,7 @@ class Parzelle(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Feature-Instanz-ID", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) @@ -472,7 +472,7 @@ class Parzelle(PowerOnModel): "frontend_type": "text", "frontend_readonly": False, "frontend_required": False, - "fk_target": {"db": "poweron_realestate", "table": "Gemeinde"}, + "fk_target": {"db": "poweron_realestate", "table": "Gemeinde", "labelField": "label"}, }, ) @@ -638,7 +638,7 @@ class Projekt(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Mandats-ID", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -648,7 +648,7 @@ class Projekt(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Feature-Instanz-ID", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) label: str = Field( diff --git a/modules/features/realEstate/routeFeatureRealEstate.py b/modules/features/realEstate/routeFeatureRealEstate.py index a8da37b4..05f029e7 100644 --- a/modules/features/realEstate/routeFeatureRealEstate.py +++ b/modules/features/realEstate/routeFeatureRealEstate.py @@ -228,31 +228,27 @@ def get_projects( recordFilter = {"featureInstanceId": instanceId} if mode in ("filterValues", "ids"): - from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory + from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels items = interface.getProjekte(recordFilter=recordFilter) itemDicts = [i.model_dump() if hasattr(i, 'model_dump') else i for i in items] if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") + enrichRowsWithFkLabels(itemDicts, Projekt) return handleFilterValuesInMemory(itemDicts, column, pagination) return handleIdsInMemory(itemDicts, pagination) items = interface.getProjekte(recordFilter=recordFilter) paginationParams = _parsePagination(pagination) if paginationParams: - if paginationParams.sort: - for sort_field in reversed(paginationParams.sort): - field_name = sort_field.field - direction = sort_field.direction.lower() - items.sort( - key=lambda x: getattr(x, field_name, None), - reverse=(direction == "desc") - ) - total_items = len(items) + from modules.routes.routeHelpers import applyFiltersAndSort + itemDicts = [i.model_dump() if hasattr(i, 'model_dump') else i for i in items] + filtered = applyFiltersAndSort(itemDicts, paginationParams) + total_items = len(filtered) total_pages = (total_items + paginationParams.pageSize - 1) // paginationParams.pageSize start_idx = (paginationParams.page - 1) * paginationParams.pageSize end_idx = start_idx + paginationParams.pageSize - paginated_items = items[start_idx:end_idx] + paginated_items = filtered[start_idx:end_idx] return PaginatedResponse( items=paginated_items, pagination=PaginationMetadata( @@ -373,31 +369,27 @@ def get_parcels( recordFilter = {"featureInstanceId": instanceId} if mode in ("filterValues", "ids"): - from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory + from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels items = interface.getParzellen(recordFilter=recordFilter) itemDicts = [i.model_dump() if hasattr(i, 'model_dump') else i for i in items] if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") + enrichRowsWithFkLabels(itemDicts, Parzelle) return handleFilterValuesInMemory(itemDicts, column, pagination) return handleIdsInMemory(itemDicts, pagination) items = interface.getParzellen(recordFilter=recordFilter) paginationParams = _parsePagination(pagination) if paginationParams: - if paginationParams.sort: - for sort_field in reversed(paginationParams.sort): - field_name = sort_field.field - direction = sort_field.direction.lower() - items.sort( - key=lambda x: getattr(x, field_name, None), - reverse=(direction == "desc") - ) - total_items = len(items) + from modules.routes.routeHelpers import applyFiltersAndSort + itemDicts = [i.model_dump() if hasattr(i, 'model_dump') else i for i in items] + filtered = applyFiltersAndSort(itemDicts, paginationParams) + total_items = len(filtered) total_pages = (total_items + paginationParams.pageSize - 1) // paginationParams.pageSize start_idx = (paginationParams.page - 1) * paginationParams.pageSize end_idx = start_idx + paginationParams.pageSize - paginated_items = items[start_idx:end_idx] + paginated_items = filtered[start_idx:end_idx] return PaginatedResponse( items=paginated_items, pagination=PaginationMetadata( diff --git a/modules/features/redmine/datamodelRedmine.py b/modules/features/redmine/datamodelRedmine.py index b5e72cc3..61555826 100644 --- a/modules/features/redmine/datamodelRedmine.py +++ b/modules/features/redmine/datamodelRedmine.py @@ -75,7 +75,7 @@ class RedmineInstanceConfig(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) mandateId: Optional[str] = Field( @@ -86,7 +86,7 @@ class RedmineInstanceConfig(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) baseUrl: str = Field( @@ -195,7 +195,7 @@ class RedmineTicketMirror(PowerOnModel): featureInstanceId: str = Field( description="FK -> FeatureInstance.id", json_schema_extra={"label": "Feature-Instanz", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) mandateId: Optional[str] = Field( default=None, @@ -226,14 +226,14 @@ class RedmineTicketMirror(PowerOnModel): closedOnTs: Optional[float] = Field( default=None, description="Best-effort UTC epoch when the ticket transitioned to a closed status. Approximated as updatedOnTs for closed tickets at sync time; used by Stats to render the open-vs-total snapshot chart.", - json_schema_extra={"label": "closedOn (epoch)", "frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "frontend_hidden": True}, + json_schema_extra={"label": "closedOn (epoch)", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "frontend_hidden": True}, ) createdOn: Optional[str] = Field(default=None, json_schema_extra={"label": "Erstellt am (Redmine)", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) updatedOn: Optional[str] = Field(default=None, json_schema_extra={"label": "Geaendert am (Redmine)", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) createdOnTs: Optional[float] = Field(default=None, description="UTC epoch parsed from createdOn (for SQL filtering)", - json_schema_extra={"label": "createdOn (epoch)", "frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "frontend_hidden": True}) + json_schema_extra={"label": "createdOn (epoch)", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "frontend_hidden": True}) updatedOnTs: Optional[float] = Field(default=None, description="UTC epoch parsed from updatedOn (for SQL filtering)", - json_schema_extra={"label": "updatedOn (epoch)", "frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "frontend_hidden": True}) + json_schema_extra={"label": "updatedOn (epoch)", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "frontend_hidden": True}) customFields: Optional[List[Dict[str, Any]]] = Field( default=None, description="List of {id,name,value} as returned by Redmine; stored as JSON", @@ -270,7 +270,7 @@ class RedmineRelationMirror(PowerOnModel): featureInstanceId: str = Field( description="FK -> FeatureInstance.id", json_schema_extra={"label": "Feature-Instanz", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}, ) redmineRelationId: int = Field( description="Redmine relation id (unique per feature instance)", @@ -468,17 +468,17 @@ class RedmineSyncResultDto(BaseModel): ticketsUpserted: int = 0 relationsUpserted: int = 0 durationMs: int = 0 - lastSyncAt: float + lastSyncAt: float = Field(json_schema_extra={"frontend_type": "timestamp"}) error: Optional[str] = None class RedmineSyncStatusDto(BaseModel): instanceId: str - lastSyncAt: Optional[float] = None - lastFullSyncAt: Optional[float] = None + lastSyncAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) + lastFullSyncAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) lastSyncDurationMs: Optional[int] = None lastSyncTicketCount: Optional[int] = None - lastSyncErrorAt: Optional[float] = None + lastSyncErrorAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) lastSyncErrorMessage: Optional[str] = None mirroredTicketCount: int = 0 mirroredRelationCount: int = 0 @@ -513,11 +513,11 @@ class RedmineConfigDto(BaseModel): rootTrackerName: str = "Userstory" defaultPeriodValue: Optional[Dict[str, Any]] = None schemaCacheTtlSeconds: int = 24 * 60 * 60 - schemaCachedAt: Optional[float] = None + schemaCachedAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) isActive: bool = True - lastConnectedAt: Optional[float] = None - lastSyncAt: Optional[float] = None - lastFullSyncAt: Optional[float] = None + lastConnectedAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) + lastSyncAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) + lastFullSyncAt: Optional[float] = Field(default=None, json_schema_extra={"frontend_type": "timestamp"}) lastSyncTicketCount: Optional[int] = None lastSyncErrorMessage: Optional[str] = None diff --git a/modules/features/teamsbot/datamodelTeamsbot.py b/modules/features/teamsbot/datamodelTeamsbot.py index 76c9fb83..f7d12fda 100644 --- a/modules/features/teamsbot/datamodelTeamsbot.py +++ b/modules/features/teamsbot/datamodelTeamsbot.py @@ -91,8 +91,8 @@ class TeamsbotSession(PowerOnModel): meetingLink: str = Field(description="Teams meeting join link") botName: str = Field(default="AI Assistant", description="Display name of the bot in the meeting") status: TeamsbotSessionStatus = Field(default=TeamsbotSessionStatus.PENDING, description="Current session status") - startedAt: Optional[str] = Field(default=None, description="ISO timestamp when session started") - endedAt: Optional[str] = Field(default=None, description="ISO timestamp when session ended") + startedAt: Optional[float] = Field(default=None, description="UTC unix timestamp when session started", json_schema_extra={"frontend_type": "timestamp"}) + endedAt: Optional[float] = Field(default=None, description="UTC unix timestamp when session ended", json_schema_extra={"frontend_type": "timestamp"}) startedByUserId: str = Field(description="User ID who started the session") bridgeSessionId: Optional[str] = Field(default=None, description="Session ID on the .NET Media Bridge") meetingChatId: Optional[str] = Field(default=None, description="Teams meeting chat ID for Graph API messages") @@ -109,7 +109,7 @@ class TeamsbotTranscript(PowerOnModel): sessionId: str = Field(description="Session ID (FK)") speaker: Optional[str] = Field(default=None, description="Speaker name or identifier") text: str = Field(description="Transcribed text") - timestamp: str = Field(description="ISO timestamp of the speech segment") + timestamp: float = Field(description="UTC unix timestamp of the speech segment", json_schema_extra={"frontend_type": "timestamp"}) confidence: float = Field(default=0.0, ge=0.0, le=1.0, description="STT confidence score") language: Optional[str] = Field(default=None, description="Detected language code (e.g., de-DE)") isFinal: bool = Field(default=True, description="Whether this is a final or interim result") @@ -128,7 +128,7 @@ class TeamsbotBotResponse(PowerOnModel): modelName: Optional[str] = Field(default=None, description="AI model used for this response") processingTime: float = Field(default=0.0, description="Processing time in seconds") priceCHF: float = Field(default=0.0, description="Cost of this AI call in CHF") - timestamp: Optional[str] = Field(default=None, description="ISO timestamp of the response") + timestamp: Optional[float] = Field(default=None, description="UTC unix timestamp of the response", json_schema_extra={"frontend_type": "timestamp"}) # ============================================================================ @@ -315,8 +315,8 @@ class TeamsbotDirectorPrompt(PowerOnModel): fileIds: List[str] = Field(default_factory=list, description="UDB-selected file/object IDs to attach as RAG context") status: TeamsbotDirectorPromptStatus = Field(default=TeamsbotDirectorPromptStatus.QUEUED, description="Lifecycle status") statusMessage: Optional[str] = Field(default=None, description="Optional error or status detail") - createdAt: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat(), description="ISO timestamp when created") - consumedAt: Optional[str] = Field(default=None, description="ISO timestamp when consumed (one-shot) or marked done") + createdAt: float = Field(default_factory=lambda: datetime.now(timezone.utc).timestamp(), description="UTC unix timestamp when created", json_schema_extra={"frontend_type": "timestamp"}) + consumedAt: Optional[float] = Field(default=None, description="UTC unix timestamp when consumed (one-shot) or marked done", json_schema_extra={"frontend_type": "timestamp"}) agentRunId: Optional[str] = Field(default=None, description="Reference to the agent run that processed this prompt") responseText: Optional[str] = Field(default=None, description="Final agent text delivered to the meeting") diff --git a/modules/features/teamsbot/interfaceFeatureTeamsbot.py b/modules/features/teamsbot/interfaceFeatureTeamsbot.py index 2408e4cb..a7dedd6e 100644 --- a/modules/features/teamsbot/interfaceFeatureTeamsbot.py +++ b/modules/features/teamsbot/interfaceFeatureTeamsbot.py @@ -87,7 +87,7 @@ class TeamsbotObjects: if not includeEnded: records = [r for r in records if r.get("status") != TeamsbotSessionStatus.ENDED.value] # Sort by startedAt descending - records.sort(key=lambda r: r.get("startedAt") or "", reverse=True) + records.sort(key=lambda r: r.get("startedAt") or 0, reverse=True) return records def getActiveSessions(self, instanceId: str) -> List[Dict[str, Any]]: @@ -133,7 +133,7 @@ class TeamsbotObjects: TeamsbotTranscript, recordFilter={"sessionId": sessionId}, ) - records.sort(key=lambda r: r.get("timestamp") or "") + records.sort(key=lambda r: r.get("timestamp") or 0) if offset: records = records[offset:] if limit: @@ -146,7 +146,7 @@ class TeamsbotObjects: TeamsbotTranscript, recordFilter={"sessionId": sessionId}, ) - records.sort(key=lambda r: r.get("timestamp") or "") + records.sort(key=lambda r: r.get("timestamp") or 0) return records[-count:] def createTranscript(self, transcriptData: Dict[str, Any]) -> Dict[str, Any]: @@ -176,7 +176,7 @@ class TeamsbotObjects: TeamsbotBotResponse, recordFilter={"sessionId": sessionId}, ) - records.sort(key=lambda r: r.get("timestamp") or "") + records.sort(key=lambda r: r.get("timestamp") or 0) return records def createBotResponse(self, responseData: Dict[str, Any]) -> Dict[str, Any]: @@ -293,7 +293,7 @@ class TeamsbotObjects: if operatorUserId: recordFilter["operatorUserId"] = operatorUserId records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter=recordFilter) - records.sort(key=lambda r: r.get("createdAt") or "") + records.sort(key=lambda r: r.get("createdAt") or 0) return records def getActivePersistentPrompts(self, sessionId: str) -> List[Dict[str, Any]]: @@ -310,7 +310,7 @@ class TeamsbotObjects: TeamsbotDirectorPromptStatus.FAILED.value, } active = [r for r in records if r.get("status") not in terminal] - active.sort(key=lambda r: r.get("createdAt") or "") + active.sort(key=lambda r: r.get("createdAt") or 0) return active def updateDirectorPrompt(self, promptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: diff --git a/modules/features/teamsbot/service.py b/modules/features/teamsbot/service.py index 6d9df074..1d3939ac 100644 --- a/modules/features/teamsbot/service.py +++ b/modules/features/teamsbot/service.py @@ -11,13 +11,14 @@ import re import asyncio import time import base64 +from datetime import datetime, timezone from typing import Optional, Dict, Any, List, Callable from fastapi import WebSocket from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum -from modules.shared.timeUtils import getUtcTimestamp, getIsoTimestamp +from modules.shared.timeUtils import getUtcTimestamp from modules.serviceCenter import getService as _getServiceCenterService from modules.serviceCenter.context import ServiceCenterContext @@ -554,7 +555,7 @@ async def _emitSessionEvent(sessionId: str, eventType: str, data: Any): Creates the queue on-demand so events are never silently dropped.""" if sessionId not in sessionEvents: sessionEvents[sessionId] = asyncio.Queue() - await sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getIsoTimestamp()}) + await sessionEvents[sessionId].put({"type": eventType, "data": data, "timestamp": getUtcTimestamp()}) def _normalizeGatewayHostForBotWs(host: str) -> str: @@ -780,7 +781,7 @@ class TeamsbotService: interface.updateSession(sessionId, { "status": TeamsbotSessionStatus.ENDED.value, - "endedAt": getIsoTimestamp(), + "endedAt": getUtcTimestamp(), }) await _emitSessionEvent(sessionId, "statusChange", {"status": "ended"}) @@ -794,7 +795,7 @@ class TeamsbotService: interface.updateSession(sessionId, { "status": TeamsbotSessionStatus.ERROR.value, "errorMessage": str(e), - "endedAt": getIsoTimestamp(), + "endedAt": getUtcTimestamp(), }) # Cleanup event queue @@ -855,7 +856,7 @@ class TeamsbotService: try: await _emitSessionEvent(sessionId, "botConnectionState", { "connected": True, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) except Exception: pass @@ -1029,7 +1030,7 @@ class TeamsbotService: "status": f"playback_{status}", "hasWebSocket": True, "message": ackMessage, - "timestamp": playback.get("timestamp") or getIsoTimestamp(), + "timestamp": playback.get("timestamp") or getUtcTimestamp(), "format": playback.get("format"), "bytesBase64": playback.get("bytesBase64"), }) @@ -1045,7 +1046,7 @@ class TeamsbotService: "mfaType": mfaType, "displayNumber": displayNumber, "prompt": prompt, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) from .routeFeatureTeamsbot import mfaCodeQueues, mfaWaitTasks @@ -1094,7 +1095,7 @@ class TeamsbotService: "reason": reason, "message": errorData.get("message", "Chat message could not be sent"), "text": failedText, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) elif msgType == "mfaResolved": @@ -1107,7 +1108,7 @@ class TeamsbotService: mfaCodeQueues.pop(sessionId, None) await _emitSessionEvent(sessionId, "mfaResolved", { "success": success, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) except Exception as e: @@ -1122,7 +1123,7 @@ class TeamsbotService: try: await _emitSessionEvent(sessionId, "botConnectionState", { "connected": False, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) except Exception: pass @@ -1156,9 +1157,9 @@ class TeamsbotService: if errorMessage: updates["errorMessage"] = errorMessage if dbStatus == TeamsbotSessionStatus.ACTIVE.value: - updates["startedAt"] = getIsoTimestamp() + updates["startedAt"] = getUtcTimestamp() elif dbStatus in [TeamsbotSessionStatus.ENDED.value, TeamsbotSessionStatus.ERROR.value]: - updates["endedAt"] = getIsoTimestamp() + updates["endedAt"] = getUtcTimestamp() interface.updateSession(sessionId, updates) await _emitSessionEvent(sessionId, "statusChange", {"status": status, "errorMessage": errorMessage}) @@ -1350,7 +1351,7 @@ class TeamsbotService: sessionId=sessionId, speaker=speaker, text=text, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), confidence=1.0, language=self.config.language, isFinal=True, @@ -1363,7 +1364,7 @@ class TeamsbotService: "speaker": speaker, "text": text, "confidence": 1.0, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), "isContinuation": False, "source": "chatHistory", "isHistory": True, @@ -1407,7 +1408,7 @@ class TeamsbotService: sessionId=sessionId, speaker=speaker, text=text, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), confidence=1.0, language=self.config.language, isFinal=isFinal, @@ -1450,7 +1451,7 @@ class TeamsbotService: "speaker": speaker, "text": displayText, "confidence": 1.0, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), "isContinuation": isMerge, "source": source, "speakerResolvedFromHint": ( @@ -1690,7 +1691,7 @@ class TeamsbotService: await _emitSessionEvent(sessionId, "speechCancelled", { "reason": reason, "generation": gen, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) except Exception: pass @@ -2079,7 +2080,7 @@ class TeamsbotService: try: await _emitSessionEvent(sessionId, "quickAck", { "text": ackText, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) cancelHook = self._makeAnswerCancelHook() async with self._meetingTtsLock: @@ -2387,7 +2388,7 @@ class TeamsbotService: "status": "requested", "hasWebSocket": websocket is not None, "message": "TTS generation requested", - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) logger.info( f"Session {sessionId}: TTS requested (websocket_available={websocket is not None})" @@ -2400,7 +2401,7 @@ class TeamsbotService: "status": "unavailable", "hasWebSocket": False, "message": "TTS skipped — bot websocket unavailable", - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) if not sendChat: sendChat = True @@ -2428,7 +2429,7 @@ class TeamsbotService: "hasWebSocket": True, "chunks": ttsOutcome.get("chunks"), "played": ttsOutcome.get("played"), - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) else: logger.warning( @@ -2440,7 +2441,7 @@ class TeamsbotService: "chunks": ttsOutcome.get("chunks"), "played": ttsOutcome.get("played"), "message": ttsOutcome.get("error"), - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) if not sendChat: sendChat = True # Fallback to chat if voice-only and TTS failed @@ -2469,7 +2470,7 @@ class TeamsbotService: modelName=response.modelName, processingTime=response.processingTime, priceCHF=response.priceCHF, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), ).model_dump() createdResponse = interface.createBotResponse(botResponseData) @@ -2501,7 +2502,7 @@ class TeamsbotService: sessionId=sessionId, speaker=self.config.botName, text=storedText, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), confidence=1.0, language=self.config.language, isFinal=True, @@ -2520,7 +2521,7 @@ class TeamsbotService: "speaker": self.config.botName, "text": storedText, "confidence": 1.0, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), "isContinuation": False, "source": "botResponse", "speakerResolvedFromHint": False, @@ -2557,7 +2558,7 @@ class TeamsbotService: modelName=response.modelName, processingTime=response.processingTime, priceCHF=response.priceCHF, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), ).model_dump() createdResponse = interface.createBotResponse(botResponseData) await _emitSessionEvent(sessionId, "botResponse", { @@ -2707,7 +2708,7 @@ class TeamsbotService: sessionId=sessionId, speaker=self.config.botName, text=chatText, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), confidence=1.0, language=self.config.language, isFinal=True, @@ -2732,7 +2733,7 @@ class TeamsbotService: "speaker": self.config.botName, "text": chatText, "confidence": 1.0, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), "isContinuation": False, "source": "chat", "speakerResolvedFromHint": False, @@ -2749,13 +2750,15 @@ class TeamsbotService: from . import interfaceFeatureTeamsbot as interfaceDb interface = interfaceDb.getInterface(self.currentUser, self.mandateId, self.instanceId) transcripts = interface.getTranscripts(sessionId) - fromDt = params.get("fromdatetime") or params.get("fromDateTime") - toDt = params.get("todatetime") or params.get("toDateTime") + fromDtRaw = params.get("fromdatetime") or params.get("fromDateTime") + toDtRaw = params.get("todatetime") or params.get("toDateTime") + fromTs = datetime.fromisoformat(fromDtRaw).replace(tzinfo=timezone.utc).timestamp() if fromDtRaw else None + toTs = datetime.fromisoformat(toDtRaw).replace(tzinfo=timezone.utc).timestamp() if toDtRaw else None chatOnly = [t for t in transcripts if t.get("source") in ("chat", "chatHistory")] - if fromDt: - chatOnly = [t for t in chatOnly if (t.get("timestamp") or "") >= fromDt] - if toDt: - chatOnly = [t for t in chatOnly if (t.get("timestamp") or "") <= toDt] + if fromTs is not None: + chatOnly = [t for t in chatOnly if (t.get("timestamp") or 0) >= fromTs] + if toTs is not None: + chatOnly = [t for t in chatOnly if (t.get("timestamp") or 0) <= toTs] summary = "\n".join(f"[{t.get('speaker', '?')}]: {t.get('text', '')}" for t in chatOnly[-20:]) if not summary: summary = "Keine Chat-Nachrichten im angegebenen Zeitraum." @@ -3002,7 +3005,7 @@ class TeamsbotService: "text": (prompt.get("text") or "").strip(), "fileIds": list(prompt.get("fileIds") or []), "note": (internalNote or meetingText or "").strip(), - "recordedAt": getIsoTimestamp(), + "recordedAt": getUtcTimestamp(), }) if len(self._recentDirectorBriefings) > _RECENT_DIRECTOR_BRIEFINGS_MAX: self._recentDirectorBriefings = self._recentDirectorBriefings[ @@ -3066,7 +3069,7 @@ class TeamsbotService: return False interface.updateDirectorPrompt(promptId, { "status": TeamsbotDirectorPromptStatus.CONSUMED.value, - "consumedAt": getIsoTimestamp(), + "consumedAt": getUtcTimestamp(), "statusMessage": "Removed by operator", }) self._activePersistentPrompts = [ @@ -3187,7 +3190,7 @@ class TeamsbotService: } if not isPersistent: updates["status"] = TeamsbotDirectorPromptStatus.CONSUMED.value - updates["consumedAt"] = getIsoTimestamp() + updates["consumedAt"] = getUtcTimestamp() interface.updateDirectorPrompt(promptId, updates) await _emitSessionEvent(sessionId, "directorPrompt", { "id": promptId, @@ -3300,7 +3303,7 @@ class TeamsbotService: await _emitSessionEvent(sessionId, "agentRun", { "status": "interimNotice", "message": text, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) async def _runAgentForMeeting( @@ -3352,7 +3355,7 @@ class TeamsbotService: "source": sourceLabel, "promptId": promptId, "status": "started", - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) # Director prompts run silently by default — no spontaneous "moment please" @@ -3577,7 +3580,7 @@ class TeamsbotService: "chunks": ttsOutcome.get("chunks"), "played": ttsOutcome.get("played"), "error": ttsOutcome.get("error"), - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) if not ttsOutcome.get("success"): logger.warning( @@ -3615,7 +3618,7 @@ class TeamsbotService: modelName="agent", processingTime=0.0, priceCHF=0.0, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), ).model_dump() createdResponse = interface.createBotResponse(botResponseData) @@ -3635,7 +3638,7 @@ class TeamsbotService: sessionId=sessionId, speaker=self.config.botName, text=text, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), confidence=1.0, language=self.config.language, isFinal=True, @@ -3661,7 +3664,7 @@ class TeamsbotService: "speaker": self.config.botName, "text": text, "confidence": 1.0, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), "isContinuation": False, "source": "botResponse", "speakerResolvedFromHint": False, @@ -3710,7 +3713,7 @@ class TeamsbotService: modelName="agent", processingTime=0.0, priceCHF=0.0, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), ).model_dump() createdResponse = interface.createBotResponse(botResponseData) @@ -3828,7 +3831,7 @@ class TeamsbotService: "status": "requested", "hasWebSocket": True, "message": "Greeting TTS requested", - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) cancelHook = self._makeAnswerCancelHook() async with self._meetingTtsLock: @@ -3851,7 +3854,7 @@ class TeamsbotService: "hasWebSocket": True, "chunks": ttsOutcome.get("chunks"), "played": ttsOutcome.get("played"), - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) else: logger.warning( @@ -3861,7 +3864,7 @@ class TeamsbotService: "status": "failed", "hasWebSocket": True, "message": ttsOutcome.get("error"), - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) if sendToChat: @@ -3881,7 +3884,7 @@ class TeamsbotService: sessionId=sessionId, speaker=self.config.botName, text=greetingText, - timestamp=getIsoTimestamp(), + timestamp=getUtcTimestamp(), confidence=1.0, language=greetingLang, isFinal=True, @@ -3905,14 +3908,14 @@ class TeamsbotService: "responseType": TeamsbotResponseType.AUDIO.value, "detectedIntent": "greeting", "reasoning": "Automatic join greeting", - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), }) await _emitSessionEvent(sessionId, "transcript", { "id": greetingTranscript.get("id"), "speaker": self.config.botName, "text": greetingText, "confidence": 1.0, - "timestamp": getIsoTimestamp(), + "timestamp": getUtcTimestamp(), "isContinuation": False, "source": "botResponse", "speakerResolvedFromHint": False, diff --git a/modules/features/trustee/accounting/accountingBridge.py b/modules/features/trustee/accounting/accountingBridge.py index 2a267b73..fec36d2d 100644 --- a/modules/features/trustee/accounting/accountingBridge.py +++ b/modules/features/trustee/accounting/accountingBridge.py @@ -8,6 +8,7 @@ Encapsulates: config loading -> connector resolution -> duplicate check -> push import json import logging import time +from datetime import datetime as _dt, timezone as _tz from typing import List, Dict, Any, Optional from .accountingConnectorBase import ( @@ -103,9 +104,12 @@ class AccountingBridge: costCenter=position.get("costCenter"), )) + valutaTs = position.get("valuta") + bookingDateStr = _dt.fromtimestamp(valutaTs, tz=_tz.utc).strftime("%Y-%m-%d") if valutaTs else "" + return AccountingBooking( reference=position.get("bookingReference") or position.get("id", ""), - bookingDate=position.get("valuta") or "", + bookingDate=bookingDateStr, description=position.get("desc", ""), lines=lines, ) diff --git a/modules/features/trustee/accounting/accountingDataSync.py b/modules/features/trustee/accounting/accountingDataSync.py index 0770ead5..5827dd11 100644 --- a/modules/features/trustee/accounting/accountingDataSync.py +++ b/modules/features/trustee/accounting/accountingDataSync.py @@ -21,6 +21,7 @@ import logging import os import time from collections import defaultdict +from datetime import datetime as _dt, timezone as _tz from pathlib import Path from typing import Callable, Dict, Any, List, Optional, Type @@ -33,6 +34,23 @@ logger = logging.getLogger(__name__) _HEARTBEAT_EVERY = 500 +def _isoDateToTimestamp(raw: Any) -> Optional[float]: + """Convert an ISO date string (``YYYY-MM-DD`` or datetime) to a UTC + midnight unix timestamp. Returns ``None`` only when *raw* is + falsy/None. Raises ``ValueError`` for non-empty but unparseable + values so import errors are never silently swallowed. + """ + if raw is None or raw == "": + return None + s = str(raw).split("T")[0].strip()[:10] + if not s: + return None + try: + return _dt.strptime(s, "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() + except ValueError: + raise ValueError(f"Cannot parse bookingDate '{raw}' as YYYY-MM-DD") + + def _isIncomeStatementAccount(accountNumber: str) -> bool: """Swiss KMU-Kontenrahmen heuristic: 1xxx + 2xxx -> balance sheet (cumulative carry-over across years); 3xxx..9xxx -> income statement @@ -360,8 +378,8 @@ class AccountingDataSync: logger.exception(f"AccountingDataSync: failed to write core lastSync* fields for cfg {cfgId}: {coreErr}") summary["errors"].append(f"Persist lastSync core: {coreErr}") extPayload = { - "lastSyncDateFrom": dateFrom, - "lastSyncDateTo": dateTo, + "lastSyncDateFrom": _isoDateToTimestamp(dateFrom), + "lastSyncDateTo": _isoDateToTimestamp(dateTo), "lastSyncCounts": { "accounts": int(summary.get("accounts", 0)), "journalEntries": int(summary.get("journalEntries", 0)), @@ -432,18 +450,19 @@ class AccountingDataSync: newestDate: Optional[str] = None for raw in rawEntries: entryId = str(_uuid.uuid4()) - bookingDate = raw.get("bookingDate") - if bookingDate: - normalized = str(bookingDate).split("T")[0][:10] - if normalized: - if oldestDate is None or normalized < oldestDate: - oldestDate = normalized - if newestDate is None or normalized > newestDate: - newestDate = normalized + rawDate = raw.get("bookingDate") + bookingTs = _isoDateToTimestamp(rawDate) + if rawDate: + isoDay = str(rawDate).split("T")[0][:10] + if isoDay: + if oldestDate is None or isoDay < oldestDate: + oldestDate = isoDay + if newestDate is None or isoDay > newestDate: + newestDate = isoDay entryRows.append({ "id": entryId, "externalId": raw.get("externalId"), - "bookingDate": bookingDate, + "bookingDate": bookingTs, "reference": raw.get("reference"), "description": raw.get("description", ""), "currency": raw.get("currency", "CHF"), @@ -501,17 +520,14 @@ class AccountingDataSync: """Persist account balances per (account, period) into ``TrusteeDataAccountBalance``. Source of truth (``source="connector"``): the list returned by - ``BaseAccountingConnector.getAccountBalances`` is persisted 1:1. + ``BaseAccountingConnector.getAccountBalances`` is persisted with + ``openingBalance``/``closingBalance`` from the connector. If the + connector doesn't supply ``debitTotal``/``creditTotal`` (e.g. RMA's + ``/gl/saldo`` only returns net balance), those fields are enriched + from the already-imported journal lines. Fallback (``source="local-fallback"``): aggregate the just-persisted - journal lines into **cumulative** balances. Unlike the previous - implementation, this version (a) carries the cumulative balance - forward across months/years for balance-sheet accounts, (b) resets - income-statement accounts at fiscal-year start, and (c) computes - ``openingBalance`` correctly as the previous period's - ``closingBalance``. ``openingBalance`` of the very first imported - period stays at 0 (no prior data available -- by design; see plan - document for rationale). + journal lines into **cumulative** balances. """ t0 = time.time() self._bulkClear(modelBalance, featureInstanceId) @@ -519,6 +535,9 @@ class AccountingDataSync: if connectorBalances: rows = [_balanceModelToRow(b, scope) for b in connectorBalances] + movements = self._aggregateJournalMovements(featureInstanceId, modelEntry, modelLine) + if movements: + self._enrichRowsWithMovements(rows, movements) n = self._bulkCreate(modelBalance, rows) logger.info( f"Persisted {n} balances for {featureInstanceId} in {time.time() - t0:.1f}s " @@ -534,19 +553,19 @@ class AccountingDataSync: ) return n - def _buildLocalBalanceFallback( + def _aggregateJournalMovements( self, featureInstanceId: str, modelEntry: Type, modelLine: Type, - scope: Dict[str, Any], - ) -> List[Dict[str, Any]]: - """Aggregate ``TrusteeDataJournalLine`` rows into cumulative period balances. + ) -> Dict[tuple, Dict[str, float]]: + """Aggregate debit/credit movements per ``(accountNumber, year, month)`` + from the already-persisted journal lines. - Returns rows ready for ``_bulkCreate``. Walks every account - chronologically through all years observed in the journal so the - cumulative balance and per-period opening are exact (within the - bounds of the imported window). + Returns ``{(accNo, year, month): {"debit": float, "credit": float}}``. + Used by both the local-fallback balance builder and the connector-balance + enrichment (RMA's ``/gl/saldo`` delivers net balance but no debit/credit + breakdown). """ entries = self._if.db.getRecordset( modelEntry, recordFilter={"featureInstanceId": featureInstanceId}, @@ -563,8 +582,6 @@ class AccountingDataSync: ) or [] movements: Dict[tuple, Dict[str, float]] = defaultdict(lambda: {"debit": 0.0, "credit": 0.0}) - observedYears: set = set() - observedAccounts: set = set() for ln in lines: if isinstance(ln, dict): jeid = ln.get("journalEntryId", "") @@ -577,19 +594,71 @@ class AccountingDataSync: debit = float(getattr(ln, "debitAmount", 0)) credit = float(getattr(ln, "creditAmount", 0)) - bdate = entryDates.get(jeid, "") + bdate = entryDates.get(jeid) if not accNo or not bdate: continue - parts = str(bdate).split("-") - if len(parts) < 2: - continue try: - year = int(parts[0]) - month = int(parts[1]) - except ValueError: + dt = _dt.fromtimestamp(float(bdate), tz=_tz.utc) + year = dt.year + month = dt.month + except (ValueError, TypeError, OSError): continue movements[(accNo, year, month)]["debit"] += debit movements[(accNo, year, month)]["credit"] += credit + return movements + + @staticmethod + def _enrichRowsWithMovements( + rows: List[Dict[str, Any]], + movements: Dict[tuple, Dict[str, float]], + ) -> None: + """Patch ``debitTotal`` / ``creditTotal`` on balance rows from journal movements. + + For monthly rows: use the exact month's movement. + For annual rows (``periodMonth=0``): sum all 12 months of that year+account. + Only overwrites if the existing value is 0 (connector didn't provide it). + """ + for row in rows: + if row.get("debitTotal", 0) != 0 or row.get("creditTotal", 0) != 0: + continue + accNo = row.get("accountNumber", "") + year = row.get("periodYear", 0) + month = row.get("periodMonth", 0) + if month > 0: + mov = movements.get((accNo, year, month)) + if mov: + row["debitTotal"] = round(mov["debit"], 2) + row["creditTotal"] = round(mov["credit"], 2) + else: + yearDebit = 0.0 + yearCredit = 0.0 + for m in range(1, 13): + mov = movements.get((accNo, year, m)) + if mov: + yearDebit += mov["debit"] + yearCredit += mov["credit"] + if yearDebit or yearCredit: + row["debitTotal"] = round(yearDebit, 2) + row["creditTotal"] = round(yearCredit, 2) + + def _buildLocalBalanceFallback( + self, + featureInstanceId: str, + modelEntry: Type, + modelLine: Type, + scope: Dict[str, Any], + ) -> List[Dict[str, Any]]: + """Aggregate ``TrusteeDataJournalLine`` rows into cumulative period balances. + + Returns rows ready for ``_bulkCreate``. Walks every account + chronologically through all years observed in the journal so the + cumulative balance and per-period opening are exact (within the + bounds of the imported window). + """ + movements = self._aggregateJournalMovements(featureInstanceId, modelEntry, modelLine) + observedYears: set = set() + observedAccounts: set = set() + for (accNo, year, month) in movements: observedYears.add(year) observedAccounts.add(accNo) diff --git a/modules/features/trustee/datamodelFeatureTrustee.py b/modules/features/trustee/datamodelFeatureTrustee.py index a87f6f55..70e02c45 100644 --- a/modules/features/trustee/datamodelFeatureTrustee.py +++ b/modules/features/trustee/datamodelFeatureTrustee.py @@ -46,7 +46,7 @@ class TrusteeOrganisation(PowerOnModel): description="Mandate ID (system-level organisation)", json_schema_extra={ "label": "Mandat", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -57,7 +57,7 @@ class TrusteeOrganisation(PowerOnModel): description="Feature Instance ID for instance-level isolation", json_schema_extra={ "label": "Feature-Instanz", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -92,7 +92,7 @@ class TrusteeRole(PowerOnModel): description="Mandate ID", json_schema_extra={ "label": "Mandat", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -103,7 +103,7 @@ class TrusteeRole(PowerOnModel): description="Feature Instance ID for instance-level isolation", json_schema_extra={ "label": "Feature-Instanz", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -132,7 +132,7 @@ class TrusteeAccess(PowerOnModel): "frontend_readonly": False, "frontend_required": True, "frontend_options": "/api/trustee/{instanceId}/organisations/options", - "fk_target": {"db": "poweron_trustee", "table": "TrusteeOrganisation"}, + "fk_target": {"db": "poweron_trustee", "table": "TrusteeOrganisation", "labelField": "label"}, } ) roleId: str = Field( @@ -143,7 +143,7 @@ class TrusteeAccess(PowerOnModel): "frontend_readonly": False, "frontend_required": True, "frontend_options": "/api/trustee/{instanceId}/roles/options", - "fk_target": {"db": "poweron_trustee", "table": "TrusteeRole"}, + "fk_target": {"db": "poweron_trustee", "table": "TrusteeRole", "labelField": "desc"}, } ) userId: str = Field( @@ -154,7 +154,7 @@ class TrusteeAccess(PowerOnModel): "frontend_readonly": False, "frontend_required": True, "frontend_options": "/api/users/options", - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, } ) contractId: Optional[str] = Field( @@ -167,7 +167,7 @@ class TrusteeAccess(PowerOnModel): "frontend_required": False, "frontend_options": "/api/trustee/{instanceId}/contracts/options", "frontend_depends_on": "organisationId", - "fk_target": {"db": "poweron_trustee", "table": "TrusteeContract"}, + "fk_target": {"db": "poweron_trustee", "table": "TrusteeContract", "labelField": "label"}, } ) mandateId: Optional[str] = Field( @@ -175,7 +175,7 @@ class TrusteeAccess(PowerOnModel): description="Mandate ID", json_schema_extra={ "label": "Mandat", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -186,7 +186,7 @@ class TrusteeAccess(PowerOnModel): description="Feature Instance ID for instance-level isolation", json_schema_extra={ "label": "Feature-Instanz", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -215,7 +215,7 @@ class TrusteeContract(PowerOnModel): "frontend_readonly": False, # Editable at creation, then readonly "frontend_required": True, "frontend_options": "/api/trustee/{instanceId}/organisations/options", - "fk_target": {"db": "poweron_trustee", "table": "TrusteeOrganisation"}, + "fk_target": {"db": "poweron_trustee", "table": "TrusteeOrganisation", "labelField": "label"}, } ) label: str = Field( @@ -242,7 +242,7 @@ class TrusteeContract(PowerOnModel): description="Mandate ID", json_schema_extra={ "label": "Mandat", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -253,7 +253,7 @@ class TrusteeContract(PowerOnModel): description="Feature Instance ID for instance-level isolation", json_schema_extra={ "label": "Feature-Instanz", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False @@ -311,7 +311,7 @@ class TrusteeDocument(PowerOnModel): "frontend_type": "file_reference", "frontend_readonly": False, "frontend_required": False, - "fk_target": {"db": "poweron_management", "table": "FileItem"}, + "fk_target": {"db": "poweron_management", "table": "FileItem", "labelField": "fileName"}, } ) documentName: str = Field( @@ -359,7 +359,7 @@ class TrusteeDocument(PowerOnModel): description="Mandate ID (auto-set from context)", json_schema_extra={ "label": "Mandat", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, @@ -371,7 +371,7 @@ class TrusteeDocument(PowerOnModel): description="Feature Instance ID for instance-level isolation (auto-set from context)", json_schema_extra={ "label": "Feature-Instanz", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, @@ -439,7 +439,7 @@ class TrusteePosition(PowerOnModel): "frontend_readonly": False, "frontend_required": False, "frontend_options": "/api/trustee/{instanceId}/documents/options", - "fk_target": {"db": "poweron_trustee", "table": "TrusteeDocument"}, + "fk_target": {"db": "poweron_trustee", "table": "TrusteeDocument", "labelField": "documentName"}, } ) bankDocumentId: Optional[str] = Field( @@ -451,12 +451,12 @@ class TrusteePosition(PowerOnModel): "frontend_readonly": False, "frontend_required": False, "frontend_options": "/api/trustee/{instanceId}/documents/options", - "fk_target": {"db": "poweron_trustee", "table": "TrusteeDocument"}, + "fk_target": {"db": "poweron_trustee", "table": "TrusteeDocument", "labelField": "documentName"}, } ) - valuta: Optional[str] = Field( + valuta: Optional[float] = Field( default=None, - description="Value date (ISO format: YYYY-MM-DD)", + description="Value date (UTC midnight unix timestamp)", json_schema_extra={ "label": "Valutadatum", "frontend_type": "date", @@ -684,9 +684,9 @@ class TrusteePosition(PowerOnModel): "frontend_required": False } ) - dueDate: Optional[str] = Field( + dueDate: Optional[float] = Field( default=None, - description="Payment due date (ISO format: YYYY-MM-DD)", + description="Payment due date (UTC midnight unix timestamp)", json_schema_extra={ "label": "Fälligkeitsdatum", "frontend_type": "date", @@ -699,7 +699,7 @@ class TrusteePosition(PowerOnModel): description="Mandate ID (auto-set from context)", json_schema_extra={ "label": "Mandat", - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, @@ -711,7 +711,7 @@ class TrusteePosition(PowerOnModel): description="Feature Instance ID for instance-level isolation (auto-set from context)", json_schema_extra={ "label": "Feature-Instanz", - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, @@ -742,15 +742,15 @@ class TrusteeDataAccount(PowerOnModel): accountGroup: Optional[str] = Field(default=None, description="Account group/category", json_schema_extra={"label": "Gruppe"}) currency: str = Field(default="CHF", description="Account currency", json_schema_extra={"label": "Währung"}) isActive: bool = Field(default=True, json_schema_extra={"label": "Aktiv"}) - mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) - featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}) + mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) + featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}) @i18nModel("Buchung (Sync)") class TrusteeDataJournalEntry(PowerOnModel): """Journal entry header synced from external accounting system.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"}) externalId: Optional[str] = Field(default=None, description="ID in the source system", json_schema_extra={"label": "Externe ID"}) - bookingDate: Optional[str] = Field(default=None, description="Booking date (YYYY-MM-DD)", json_schema_extra={"label": "Datum"}) + bookingDate: Optional[float] = Field(default=None, description="Booking date (UTC unix timestamp)", json_schema_extra={"label": "Datum", "frontend_type": "timestamp"}) reference: Optional[str] = Field(default=None, description="Booking reference / voucher number", json_schema_extra={"label": "Referenz"}) description: str = Field(default="", description="Booking text", json_schema_extra={"label": "Beschreibung"}) currency: str = Field(default="CHF", json_schema_extra={"label": "Währung"}) @@ -763,14 +763,14 @@ class TrusteeDataJournalEntry(PowerOnModel): "frontend_format": "R:#'###.00", }, ) - mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) - featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}) + mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) + featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}) @i18nModel("Buchungszeile (Sync)") class TrusteeDataJournalLine(PowerOnModel): """Journal entry line (debit/credit) synced from external accounting system.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"}) - journalEntryId: str = Field(description="FK → TrusteeDataJournalEntry.id", json_schema_extra={"label": "Buchung", "fk_target": {"db": "poweron_trustee", "table": "TrusteeDataJournalEntry"}}) + journalEntryId: str = Field(description="FK → TrusteeDataJournalEntry.id", json_schema_extra={"label": "Buchung", "fk_target": {"db": "poweron_trustee", "table": "TrusteeDataJournalEntry", "labelField": "reference"}}) accountNumber: str = Field(description="Account number", json_schema_extra={"label": "Konto"}) debitAmount: float = Field(default=0.0, json_schema_extra={"label": "Soll", "frontend_format": "R:#'###.00"}) creditAmount: float = Field(default=0.0, json_schema_extra={"label": "Haben", "frontend_format": "R:#'###.00"}) @@ -778,8 +778,8 @@ class TrusteeDataJournalLine(PowerOnModel): taxCode: Optional[str] = Field(default=None, json_schema_extra={"label": "Steuercode"}) costCenter: Optional[str] = Field(default=None, json_schema_extra={"label": "Kostenstelle"}) description: str = Field(default="", json_schema_extra={"label": "Beschreibung"}) - mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) - featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}) + mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) + featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}) @i18nModel("Kontakt (Sync)") class TrusteeDataContact(PowerOnModel): @@ -796,8 +796,8 @@ class TrusteeDataContact(PowerOnModel): email: Optional[str] = Field(default=None, json_schema_extra={"label": "E-Mail"}) phone: Optional[str] = Field(default=None, json_schema_extra={"label": "Telefon"}) vatNumber: Optional[str] = Field(default=None, json_schema_extra={"label": "MWST-Nr."}) - mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) - featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}) + mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) + featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}) @i18nModel("Kontosaldo (Sync)") class TrusteeDataAccountBalance(PowerOnModel): @@ -811,8 +811,8 @@ class TrusteeDataAccountBalance(PowerOnModel): creditTotal: float = Field(default=0.0, json_schema_extra={"label": "Haben-Umsatz", "frontend_format": "R:#'###.00"}) closingBalance: float = Field(default=0.0, json_schema_extra={"label": "Schlusssaldo", "frontend_format": "R:#'###.00"}) currency: str = Field(default="CHF", json_schema_extra={"label": "Währung"}) - mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) - featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}) + mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) + featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}) @i18nModel("Buchhaltungs-Konfiguration") class TrusteeAccountingConfig(PowerOnModel): @@ -822,20 +822,20 @@ class TrusteeAccountingConfig(PowerOnModel): Credentials are stored encrypted (decrypted at runtime by the AccountingBridge). """ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"}) - featureInstanceId: str = Field(description="FK -> FeatureInstance.id (1:1)", json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}) + featureInstanceId: str = Field(description="FK -> FeatureInstance.id (1:1)", json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}) connectorType: str = Field(description="Connector type key, e.g. 'rma', 'bexio', 'abacus'", json_schema_extra={"label": "System"}) displayLabel: str = Field(default="", description="User-visible label for this integration", json_schema_extra={"label": "Bezeichnung"}) encryptedConfig: str = Field(default="", description="Encrypted JSON blob with connector credentials", json_schema_extra={"label": "Verschlüsselte Konfiguration"}) isActive: bool = Field(default=True, json_schema_extra={"label": "Aktiv"}) - lastSyncAt: Optional[float] = Field(default=None, description="Timestamp of last sync attempt", json_schema_extra={"label": "Letzte Synchronisation"}) + lastSyncAt: Optional[float] = Field(default=None, description="Timestamp of last sync attempt", json_schema_extra={"label": "Letzte Synchronisation", "frontend_type": "timestamp"}) lastSyncStatus: Optional[str] = Field(default=None, description="Last sync result: success, error, partial", json_schema_extra={"label": "Status"}) lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error", json_schema_extra={"label": "Fehlermeldung"}) - lastSyncDateFrom: Optional[str] = Field(default=None, description="dateFrom (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster von"}) - lastSyncDateTo: Optional[str] = Field(default=None, description="dateTo (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster bis"}) + lastSyncDateFrom: Optional[float] = Field(default=None, description="dateFrom (UTC midnight unix timestamp) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster von", "frontend_type": "date"}) + lastSyncDateTo: Optional[float] = Field(default=None, description="dateTo (UTC midnight unix timestamp) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster bis", "frontend_type": "date"}) lastSyncCounts: Optional[Dict[str, Any]] = Field(default=None, description="Last import summary: per-entity counts (accounts, journalEntries, journalLines, contacts, accountBalances) plus oldestBookingDate / newestBookingDate (ISO YYYY-MM-DD) for completeness verification", json_schema_extra={"label": "Letzte Import-Zaehler"}) cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})", json_schema_extra={"label": "Cached Kontoplan"}) - chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt"}) - mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) + chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt", "frontend_type": "timestamp"}) + mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) @i18nModel("Buchhaltungs-Synchronisation") class TrusteeAccountingSync(PowerOnModel): @@ -846,16 +846,16 @@ class TrusteeAccountingSync(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"}) positionId: str = Field( description="FK -> TrusteePosition.id", - json_schema_extra={"label": "Position", "fk_target": {"db": "poweron_trustee", "table": "TrusteePosition"}}, + json_schema_extra={"label": "Position", "fk_target": {"db": "poweron_trustee", "table": "TrusteePosition", "labelField": None}}, ) - featureInstanceId: str = Field(description="FK -> FeatureInstance.id", json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}}) + featureInstanceId: str = Field(description="FK -> FeatureInstance.id", json_schema_extra={"label": "Feature-Instanz", "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}}) connectorType: str = Field(description="Connector type at time of sync", json_schema_extra={"label": "System"}) externalId: Optional[str] = Field(default=None, description="ID assigned by the external system", json_schema_extra={"label": "Externe ID"}) externalReference: Optional[str] = Field(default=None, description="Reference in the external system", json_schema_extra={"label": "Externe Referenz"}) syncStatus: str = Field(default="pending", description="pending | synced | error | cancelled", json_schema_extra={"label": "Status"}) syncDirection: str = Field(default="push", description="push (local->ext) or pull (ext->local)", json_schema_extra={"label": "Richtung"}) - syncedAt: Optional[float] = Field(default=None, description="Timestamp of successful sync", json_schema_extra={"label": "Synchronisiert am"}) + syncedAt: Optional[float] = Field(default=None, description="Timestamp of successful sync", json_schema_extra={"label": "Synchronisiert am", "frontend_type": "timestamp"}) errorMessage: Optional[str] = Field(default=None, json_schema_extra={"label": "Fehler"}) bookingPayload: Optional[dict] = Field(default=None, description="Payload sent to the external system (audit)", json_schema_extra={"label": "Buchungs-Payload"}) - mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}}) + mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) diff --git a/modules/features/trustee/interfaceFeatureTrustee.py b/modules/features/trustee/interfaceFeatureTrustee.py index 9f1c911a..2f8aabf6 100644 --- a/modules/features/trustee/interfaceFeatureTrustee.py +++ b/modules/features/trustee/interfaceFeatureTrustee.py @@ -126,13 +126,11 @@ def _sanitisePositionPayload(data: Dict[str, Any]) -> Dict[str, Any]: """Failsafe normalisation for TrusteePosition payloads before DB writes.""" safeData = dict(data or {}) - isoValuta = _normaliseIsoDate(safeData.get("valuta")) - safeData["valuta"] = isoValuta + valutaTs = _normaliseTimestamp(safeData.get("valuta")) + safeData["valuta"] = valutaTs - safeData["transactionDateTime"] = _normaliseTimestamp( - safeData.get("transactionDateTime"), - fallbackIsoDate=isoValuta, - ) + txTs = _normaliseTimestamp(safeData.get("transactionDateTime")) + safeData["transactionDateTime"] = txTs if txTs is not None else valutaTs safeData["bookingAmount"] = _toSafeFloat(safeData.get("bookingAmount"), defaultValue=0.0) safeData["originalAmount"] = _toSafeFloat( @@ -148,7 +146,7 @@ def _sanitisePositionPayload(data: Dict[str, Any]) -> Dict[str, Any]: safeData["originalCurrency"] = str(originalCurrency).upper() if "dueDate" in safeData and safeData["dueDate"]: - safeData["dueDate"] = _normaliseIsoDate(safeData["dueDate"]) + safeData["dueDate"] = _normaliseTimestamp(safeData["dueDate"]) _VALID_DOC_TYPES = {"invoice", "expense_receipt", "bank_document", "contract", "unknown"} docType = safeData.get("documentType") diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py index 021251fc..ebef127c 100644 --- a/modules/features/trustee/routeFeatureTrustee.py +++ b/modules/features/trustee/routeFeatureTrustee.py @@ -393,9 +393,10 @@ def get_position_options( items = result.items if hasattr(result, 'items') else result def _makePositionLabel(p: TrusteePosition) -> str: + from datetime import datetime as _dt, timezone as _tz parts = [] if p.valuta: - parts.append(str(p.valuta)[:10]) # Datum ohne Zeit + parts.append(_dt.fromtimestamp(p.valuta, tz=_tz.utc).strftime("%Y-%m-%d")) if p.company: parts.append(p.company[:30]) if p.desc: @@ -978,33 +979,27 @@ def get_documents( def _handleDocumentMode(instanceId, mandateId, mode, column, pagination, context): """Handle mode=filterValues and mode=ids for trustee documents.""" - from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory + from modules.routes.routeHelpers import handleIdsInMemory if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - try: - from modules.interfaces.interfaceRbac import getDistinctColumnValuesWithRBAC - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) - from modules.routes.routeHelpers import parseCrossFilterPagination - crossFilterPagination = parseCrossFilterPagination(column, pagination) - from fastapi.responses import JSONResponse - values = getDistinctColumnValuesWithRBAC( - connector=interface.db, - modelClass=TrusteeDocument, - column=column, - currentUser=interface.currentUser, - pagination=crossFilterPagination, - recordFilter=None, - mandateId=interface.mandateId, - featureInstanceId=interface.featureInstanceId, - featureCode=interface.FEATURE_CODE - ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) - except Exception: - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) - result = interface.getAllDocuments(None) - items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in (result.items if hasattr(result, 'items') else result)] - return handleFilterValuesInMemory(items, column, pagination) + from modules.interfaces.interfaceRbac import getDistinctColumnValuesWithRBAC + from modules.routes.routeHelpers import parseCrossFilterPagination + from fastapi.responses import JSONResponse + interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) + crossFilterPagination = parseCrossFilterPagination(column, pagination) + values = getDistinctColumnValuesWithRBAC( + connector=interface.db, + modelClass=TrusteeDocument, + column=column, + currentUser=interface.currentUser, + pagination=crossFilterPagination, + recordFilter=None, + mandateId=interface.mandateId, + featureInstanceId=interface.featureInstanceId, + featureCode=interface.FEATURE_CODE + ) + return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) if mode == "ids": interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllDocuments(None) @@ -1227,33 +1222,27 @@ def get_positions( def _handlePositionMode(instanceId, mandateId, mode, column, pagination, context): """Handle mode=filterValues and mode=ids for trustee positions.""" - from modules.routes.routeHelpers import handleFilterValuesInMemory, handleIdsInMemory + from modules.routes.routeHelpers import handleIdsInMemory if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - try: - from modules.interfaces.interfaceRbac import getDistinctColumnValuesWithRBAC - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) - from modules.routes.routeHelpers import parseCrossFilterPagination - crossFilterPagination = parseCrossFilterPagination(column, pagination) - from fastapi.responses import JSONResponse - values = getDistinctColumnValuesWithRBAC( - connector=interface.db, - modelClass=TrusteePosition, - column=column, - currentUser=interface.currentUser, - pagination=crossFilterPagination, - recordFilter=None, - mandateId=interface.mandateId, - featureInstanceId=interface.featureInstanceId, - featureCode=interface.FEATURE_CODE - ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) - except Exception: - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) - result = interface.getAllPositions(None) - items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in (result.items if hasattr(result, 'items') else result)] - return handleFilterValuesInMemory(items, column, pagination) + from modules.interfaces.interfaceRbac import getDistinctColumnValuesWithRBAC + from modules.routes.routeHelpers import parseCrossFilterPagination + from fastapi.responses import JSONResponse + interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) + crossFilterPagination = parseCrossFilterPagination(column, pagination) + values = getDistinctColumnValuesWithRBAC( + connector=interface.db, + modelClass=TrusteePosition, + column=column, + currentUser=interface.currentUser, + pagination=crossFilterPagination, + recordFilter=None, + mandateId=interface.mandateId, + featureInstanceId=interface.featureInstanceId, + featureCode=interface.FEATURE_CODE + ) + return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) if mode == "ids": interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllPositions(None) @@ -2338,6 +2327,63 @@ def delete_instance_role_rule( # (Unified Filter API: mode=filterValues / mode=ids). +def _buildFeatureInternalResolvers(modelClass, db) -> Dict[str, Any]: + """Build ``extraResolvers`` for FK fields that point to other Trustee models. + + The builtin ``enrichRowsWithFkLabels`` only covers Mandate / FeatureInstance / + User / Role. Feature-internal FKs (e.g. ``journalEntryId`` -> ``TrusteeDataJournalEntry``) + need a resolver that queries the Trustee DB. This function discovers such fields + from the Pydantic model's ``fk_target`` annotations and creates a resolver per field. + + Label strategy per target model: + - ``TrusteeDataJournalEntry``: ``" | "`` + - Generic fallback: ``""`` or ``""`` + """ + resolvers: Dict[str, Any] = {} + for name, fieldInfo in modelClass.model_fields.items(): + extra = fieldInfo.json_schema_extra + if not extra or not isinstance(extra, dict): + continue + tgt = extra.get("fk_target") + if not isinstance(tgt, dict): + continue + tableName = tgt.get("table", "") + if tableName not in _TRUSTEE_ENTITY_MODELS: + continue + targetModel = _TRUSTEE_ENTITY_MODELS[tableName] + + def _makeResolver(model, field=name): + def _resolve(ids: List[str]) -> Dict[str, Optional[str]]: + result: Dict[str, Optional[str]] = {i: None for i in ids} + try: + recs = db.getRecordset(model, recordFilter={"id": list(set(ids))}) or [] + except Exception: + return result + for r in recs: + row = r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else {} + rid = row.get("id", "") + parts = [] + for col in ("externalId", "reference", "bookingDate", "label", "name", "accountNumber"): + val = row.get(col) + if val is not None and val != "": + if col == "bookingDate" and isinstance(val, (int, float)): + from datetime import datetime, timezone + try: + parts.append(datetime.fromtimestamp(val, tz=timezone.utc).strftime("%Y-%m-%d")) + except Exception: + parts.append(str(val)) + else: + parts.append(str(val)) + if len(parts) >= 2: + break + result[rid] = " | ".join(parts) if parts else rid[:8] + return result + return _resolve + + resolvers[name] = _makeResolver(targetModel) + return resolvers + + def _paginatedReadEndpoint( *, instanceId: str, @@ -2359,7 +2405,6 @@ def _paginatedReadEndpoint( getDistinctColumnValuesWithRBAC, ) from modules.routes.routeHelpers import ( - handleFilterValuesInMemory, handleIdsInMemory, parseCrossFilterPagination, enrichRowsWithFkLabels, @@ -2372,34 +2417,19 @@ def _paginatedReadEndpoint( if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - try: - crossFilterPagination = parseCrossFilterPagination(column, pagination) - values = getDistinctColumnValuesWithRBAC( - connector=interface.db, - modelClass=modelClass, - column=column, - currentUser=interface.currentUser, - pagination=crossFilterPagination, - recordFilter=None, - mandateId=interface.mandateId, - featureInstanceId=interface.featureInstanceId, - featureCode=interface.FEATURE_CODE, - ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) - except Exception: - result = getRecordsetPaginatedWithRBAC( - connector=interface.db, - modelClass=modelClass, - currentUser=interface.currentUser, - pagination=None, - recordFilter=None, - mandateId=interface.mandateId, - featureInstanceId=interface.featureInstanceId, - featureCode=interface.FEATURE_CODE, - ) - items = result.items if hasattr(result, "items") else result - items = [r.model_dump() if hasattr(r, "model_dump") else r for r in items] - return handleFilterValuesInMemory(items, column, pagination) + crossFilterPagination = parseCrossFilterPagination(column, pagination) + values = getDistinctColumnValuesWithRBAC( + connector=interface.db, + modelClass=modelClass, + column=column, + currentUser=interface.currentUser, + pagination=crossFilterPagination, + recordFilter=None, + mandateId=interface.mandateId, + featureInstanceId=interface.featureInstanceId, + featureCode=interface.FEATURE_CODE, + ) + return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) if mode == "ids": result = getRecordsetPaginatedWithRBAC( @@ -2431,8 +2461,13 @@ def _paginatedReadEndpoint( def _itemsToDicts(rawItems): return [r.model_dump() if hasattr(r, "model_dump") else r for r in rawItems] + featureResolvers = _buildFeatureInternalResolvers(modelClass, interface.db) + if paginationParams and hasattr(result, "items"): - enriched = enrichRowsWithFkLabels(_itemsToDicts(result.items), modelClass) + enriched = enrichRowsWithFkLabels( + _itemsToDicts(result.items), modelClass, + extraResolvers=featureResolvers or None, + ) return { "items": enriched, "pagination": PaginationMetadata( @@ -2445,7 +2480,10 @@ def _paginatedReadEndpoint( ).model_dump(), } items = result.items if hasattr(result, "items") else result - enriched = enrichRowsWithFkLabels(_itemsToDicts(items), modelClass) + enriched = enrichRowsWithFkLabels( + _itemsToDicts(items), modelClass, + extraResolvers=featureResolvers or None, + ) return {"items": enriched, "pagination": None} diff --git a/modules/features/workspace/datamodelFeatureWorkspace.py b/modules/features/workspace/datamodelFeatureWorkspace.py index a6d3c2a4..b12d4b84 100644 --- a/modules/features/workspace/datamodelFeatureWorkspace.py +++ b/modules/features/workspace/datamodelFeatureWorkspace.py @@ -24,7 +24,7 @@ class WorkspaceUserSettings(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "User"}, + "fk_target": {"db": "poweron_app", "table": "UserInDB", "labelField": "username"}, }, ) mandateId: str = Field( @@ -34,7 +34,7 @@ class WorkspaceUserSettings(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "Mandate"}, + "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}, }, ) featureInstanceId: str = Field( @@ -44,7 +44,7 @@ class WorkspaceUserSettings(PowerOnModel): "frontend_type": "text", "frontend_readonly": True, "frontend_required": True, - "fk_target": {"db": "poweron_app", "table": "FeatureInstance"}, + "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, }, ) maxAgentRounds: Optional[int] = Field( diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index d1593473..d5803e4b 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1599,18 +1599,19 @@ class AppObjects: from datetime import datetime, timezone, timedelta now = datetime.now(timezone.utc) + nowTs = now.timestamp() targetStatus = SubscriptionStatusEnum.TRIALING if plan.trialDays else SubscriptionStatusEnum.ACTIVE subscription = MandateSubscription( mandateId=mandateId, planKey=planKey, status=targetStatus, - startedAt=now.isoformat(), - currentPeriodStart=now.isoformat(), + startedAt=nowTs, + currentPeriodStart=nowTs, ) if plan.trialDays: trialEnd = now + timedelta(days=plan.trialDays) - subscription.trialEndsAt = trialEnd.isoformat() - subscription.currentPeriodEnd = trialEnd.isoformat() + subscription.trialEndsAt = trialEnd.timestamp() + subscription.currentPeriodEnd = trialEnd.timestamp() subInterface = _getSubRoot() subInterface.createSubscription(subscription) @@ -1716,19 +1717,19 @@ class AppObjects: targetStatus = SubscriptionStatusEnum.TRIALING if plan and plan.trialDays else SubscriptionStatusEnum.ACTIVE additionalData = { - "currentPeriodStart": now.isoformat(), + "currentPeriodStart": now.timestamp(), } if plan and plan.trialDays: trialEnd = now + timedelta(days=plan.trialDays) - additionalData["trialEndsAt"] = trialEnd.isoformat() - additionalData["currentPeriodEnd"] = trialEnd.isoformat() + additionalData["trialEndsAt"] = trialEnd.timestamp() + additionalData["currentPeriodEnd"] = trialEnd.timestamp() elif plan and plan.billingPeriod: from modules.datamodels.datamodelSubscription import BillingPeriodEnum if plan.billingPeriod == BillingPeriodEnum.MONTHLY: - additionalData["currentPeriodEnd"] = (now + timedelta(days=30)).isoformat() + additionalData["currentPeriodEnd"] = (now + timedelta(days=30)).timestamp() elif plan.billingPeriod == BillingPeriodEnum.YEARLY: - additionalData["currentPeriodEnd"] = (now + timedelta(days=365)).isoformat() + additionalData["currentPeriodEnd"] = (now + timedelta(days=365)).timestamp() try: subInterface.transitionStatus( diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index db1ee619..fcb559aa 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -884,9 +884,10 @@ class BillingObjects: periodStartAt = periodStartAt.replace(tzinfo=timezone.utc) else: periodStartAt = periodStartAt.astimezone(timezone.utc) + periodStartTs = periodStartAt.timestamp() settings = self.getOrCreateSettings(mandateId) - prev = self._parseSettingsDateTime(settings.get("storagePeriodStartAt")) - if prev is not None and abs((prev - periodStartAt).total_seconds()) < 2: + prev = settings.get("storagePeriodStartAt") + if prev is not None and abs(prev - periodStartTs) < 2: return from modules.interfaces.interfaceDbSubscription import getRootInterface as _getSubRoot @@ -896,7 +897,7 @@ class BillingObjects: { "storageHighWatermarkMB": usedMB, "storageBilledUpToMB": 0.0, - "storagePeriodStartAt": periodStartAt, + "storagePeriodStartAt": periodStartTs, }, ) logger.info( @@ -1044,18 +1045,9 @@ class BillingObjects: if not periodStart or not periodEnd: return None - if isinstance(periodStart, str): - periodStart = datetime.fromisoformat(periodStart) - if isinstance(periodEnd, str): - periodEnd = datetime.fromisoformat(periodEnd) - if periodStart.tzinfo is None: - periodStart = periodStart.replace(tzinfo=timezone.utc) - if periodEnd.tzinfo is None: - periodEnd = periodEnd.replace(tzinfo=timezone.utc) - - now = datetime.now(timezone.utc) - totalSeconds = (periodEnd - periodStart).total_seconds() - remainingSeconds = max((periodEnd - now).total_seconds(), 0) + nowTs = datetime.now(timezone.utc).timestamp() + totalSeconds = periodEnd - periodStart + remainingSeconds = max(periodEnd - nowTs, 0) proRataFraction = remainingSeconds / totalSeconds if totalSeconds > 0 else 0 amount = round(abs(delta) * plan.budgetAiPerUserCHF * proRataFraction, 2) @@ -1488,7 +1480,7 @@ class BillingObjects: @staticmethod def _mapPaginationColumns(pagination: PaginationParams) -> PaginationParams: """Remap frontend column names to DB column names in filters and sort.""" - _COL_MAP = {"createdAt": "sysCreatedAt"} + _COL_MAP: dict = {} _ENRICHED_COLS = {"mandateName", "userName", "mandateId", "userId"} import copy p = copy.deepcopy(pagination) @@ -1974,7 +1966,6 @@ class BillingObjects: ) -> List[str]: """SQL DISTINCT for filter-values on BillingTransaction, scoped by mandates.""" _COLUMN_MAP = { - "createdAt": "sysCreatedAt", "mandateId": "accountId", "mandateName": "accountId", } diff --git a/modules/interfaces/interfaceDbSubscription.py b/modules/interfaces/interfaceDbSubscription.py index a09fe93f..a39685fc 100644 --- a/modules/interfaces/interfaceDbSubscription.py +++ b/modules/interfaces/interfaceDbSubscription.py @@ -224,7 +224,7 @@ class SubscriptionObjects: updateData = {"status": toStatus.value} if toStatus in TERMINAL_STATUSES and not (additionalData or {}).get("endedAt"): - updateData["endedAt"] = datetime.now(timezone.utc).isoformat() + updateData["endedAt"] = datetime.now(timezone.utc).timestamp() if additionalData: updateData.update(additionalData) @@ -244,7 +244,7 @@ class SubscriptionObjects: result = self.db.recordModify(MandateSubscription, subscriptionId, { "status": SubscriptionStatusEnum.EXPIRED.value, - "endedAt": datetime.now(timezone.utc).isoformat(), + "endedAt": datetime.now(timezone.utc).timestamp(), }) logger.info("Force-expired subscription %s (was %s)", subscriptionId, currentStatus) return result diff --git a/modules/interfaces/interfaceRbac.py b/modules/interfaces/interfaceRbac.py index 13bdfcba..ad2ac6b5 100644 --- a/modules/interfaces/interfaceRbac.py +++ b/modules/interfaces/interfaceRbac.py @@ -25,6 +25,7 @@ GROUP-Berechtigung: import logging import json import math +import re from typing import List, Dict, Any, Optional, Type, Union from pydantic import BaseModel from modules.datamodels.datamodelRbac import AccessRuleContext @@ -35,6 +36,138 @@ from modules.security.rootAccess import getRootDbAppConnector logger = logging.getLogger(__name__) +_ISO_DATE_RE = re.compile(r"^\d{4}-\d{2}-\d{2}$") + + +def _rbacAppendPaginationDictFilter( + key: str, + val: Dict[str, Any], + colType: str, + whereConditions: List[str], + whereValues: List[Any], +) -> None: + """Append SQL for one pagination ``filters`` dict entry (operator + value). + + Mirrors ``connectorDbPostgre._buildPaginationClauses`` semantics so numeric + comparisons use ``::double precision`` instead of lexicographic ``::TEXT``. + """ + op = val.get("operator", "equals") + v = val.get("value", "") + isNumericCol = colType in ("INTEGER", "DOUBLE PRECISION") + + if op in ("equals", "eq"): + if colType == "BOOLEAN": + whereConditions.append(f'COALESCE("{key}", FALSE) = %s') + whereValues.append(str(v).lower() == "true") + elif isNumericCol: + try: + whereConditions.append(f'"{key}"::double precision = %s') + whereValues.append(float(v)) + except (ValueError, TypeError): + whereConditions.append(f'"{key}"::TEXT = %s') + whereValues.append(str(v)) + else: + whereConditions.append(f'"{key}"::TEXT = %s') + whereValues.append(str(v)) + return + + if op == "contains": + whereConditions.append(f'"{key}"::TEXT ILIKE %s') + whereValues.append(f"%{v}%") + return + if op == "startsWith": + whereConditions.append(f'"{key}"::TEXT ILIKE %s') + whereValues.append(f"{v}%") + return + if op == "endsWith": + whereConditions.append(f'"{key}"::TEXT ILIKE %s') + whereValues.append(f"%{v}") + return + + if op in ("gt", "gte", "lt", "lte"): + sqlOp = {"gt": ">", "gte": ">=", "lt": "<", "lte": "<="}[op] + if isNumericCol: + try: + whereConditions.append(f'"{key}"::double precision {sqlOp} %s') + whereValues.append(float(v)) + except (ValueError, TypeError): + whereConditions.append(f'"{key}"::TEXT {sqlOp} %s') + whereValues.append(str(v)) + else: + whereConditions.append(f'"{key}"::TEXT {sqlOp} %s') + whereValues.append(str(v)) + return + + if op == "between" and isinstance(v, dict): + fromVal = v.get("from", "") + toVal = v.get("to", "") + if not fromVal and not toVal: + return + isDateVal = bool(fromVal and _ISO_DATE_RE.match(str(fromVal))) or bool( + toVal and _ISO_DATE_RE.match(str(toVal)) + ) + if isNumericCol and isDateVal: + from datetime import datetime as _dt, timezone as _tz + if fromVal and toVal: + fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() + toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace( + hour=23, minute=59, second=59, tzinfo=_tz.utc + ).timestamp() + whereConditions.append(f'"{key}" >= %s AND "{key}" <= %s') + whereValues.extend([fromTs, toTs]) + elif fromVal: + fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() + whereConditions.append(f'"{key}" >= %s') + whereValues.append(fromTs) + else: + toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace( + hour=23, minute=59, second=59, tzinfo=_tz.utc + ).timestamp() + whereConditions.append(f'"{key}" <= %s') + whereValues.append(toTs) + elif isNumericCol: + try: + if fromVal and toVal: + whereConditions.append( + f'"{key}"::double precision >= %s AND "{key}"::double precision <= %s' + ) + whereValues.extend([float(fromVal), float(toVal)]) + elif fromVal: + whereConditions.append(f'"{key}"::double precision >= %s') + whereValues.append(float(fromVal)) + elif toVal: + whereConditions.append(f'"{key}"::double precision <= %s') + whereValues.append(float(toVal)) + except (ValueError, TypeError): + pass + else: + if fromVal and toVal: + whereConditions.append(f'"{key}"::TEXT >= %s AND "{key}"::TEXT <= %s') + whereValues.extend([str(fromVal), str(toVal)]) + elif fromVal: + whereConditions.append(f'"{key}"::TEXT >= %s') + whereValues.append(str(fromVal)) + elif toVal: + whereConditions.append(f'"{key}"::TEXT <= %s') + whereValues.append(str(toVal)) + return + + if op == "in" and isinstance(v, list): + if not v: + whereConditions.append("1 = 0") + else: + whereConditions.append(f'"{key}"::TEXT = ANY(%s)') + whereValues.append([str(x) for x in v]) + return + if op == "notIn" and isinstance(v, list): + if v: + whereConditions.append(f'NOT ("{key}"::TEXT = ANY(%s))') + whereValues.append([str(x) for x in v]) + return + + whereConditions.append(f'"{key}"::TEXT ILIKE %s') + whereValues.append(str(v)) + # ============================================================================= # Namespace-Mapping für statische Tabellen @@ -401,36 +534,10 @@ def getRecordsetPaginatedWithRBAC( whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')') continue if isinstance(val, dict): - op = val.get("operator", "equals") - v = val.get("value", "") - if op in ("equals", "eq"): - whereConditions.append(f'"{key}"::TEXT = %s') - whereValues.append(str(v)) - elif op == "contains": - whereConditions.append(f'"{key}"::TEXT ILIKE %s') - whereValues.append(f"%{v}%") - elif op == "startsWith": - whereConditions.append(f'"{key}"::TEXT ILIKE %s') - whereValues.append(f"{v}%") - elif op == "endsWith": - whereConditions.append(f'"{key}"::TEXT ILIKE %s') - whereValues.append(f"%{v}") - elif op in ("gt", "gte", "lt", "lte"): - sqlOp = {"gt": ">", "gte": ">=", "lt": "<", "lte": "<="}[op] - whereConditions.append(f'"{key}"::TEXT {sqlOp} %s') - whereValues.append(str(v)) - elif op == "between": - fromVal = v.get("from", "") if isinstance(v, dict) else "" - toVal = v.get("to", "") if isinstance(v, dict) else "" - if fromVal and toVal: - whereConditions.append(f'"{key}"::TEXT >= %s AND "{key}"::TEXT <= %s') - whereValues.extend([str(fromVal), str(toVal)]) - elif fromVal: - whereConditions.append(f'"{key}"::TEXT >= %s') - whereValues.append(str(fromVal)) - elif toVal: - whereConditions.append(f'"{key}"::TEXT <= %s') - whereValues.append(str(toVal)) + colType = fields.get(key, "TEXT") + _rbacAppendPaginationDictFilter( + key, val, colType, whereConditions, whereValues + ) else: whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(str(val)) @@ -587,29 +694,10 @@ def getDistinctColumnValuesWithRBAC( whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')') continue if isinstance(val, dict): - op = val.get("operator", "equals") - v = val.get("value", "") - if op in ("equals", "eq"): - whereConditions.append(f'"{key}"::TEXT = %s') - whereValues.append(str(v)) - elif op == "contains": - whereConditions.append(f'"{key}"::TEXT ILIKE %s') - whereValues.append(f"%{v}%") - elif op == "between": - fromVal = v.get("from", "") if isinstance(v, dict) else "" - toVal = v.get("to", "") if isinstance(v, dict) else "" - if fromVal and toVal: - whereConditions.append(f'"{key}"::TEXT >= %s AND "{key}"::TEXT <= %s') - whereValues.extend([str(fromVal), str(toVal)]) - elif fromVal: - whereConditions.append(f'"{key}"::TEXT >= %s') - whereValues.append(str(fromVal)) - elif toVal: - whereConditions.append(f'"{key}"::TEXT <= %s') - whereValues.append(str(toVal)) - else: - whereConditions.append(f'"{key}"::TEXT ILIKE %s') - whereValues.append(str(v) if isinstance(v, str) else str(val)) + colType = fields.get(key, "TEXT") + _rbacAppendPaginationDictFilter( + key, val, colType, whereConditions, whereValues + ) else: whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(str(val)) diff --git a/modules/routes/routeAdminFeatures.py b/modules/routes/routeAdminFeatures.py index 9634dd0d..511babde 100644 --- a/modules/routes/routeAdminFeatures.py +++ b/modules/routes/routeAdminFeatures.py @@ -475,6 +475,9 @@ def list_feature_instances( if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") + from modules.routes.routeHelpers import enrichRowsWithFkLabels + from modules.datamodels.datamodelFeatures import FeatureInstance + enrichRowsWithFkLabels(items, FeatureInstance) return handleFilterValuesInMemory(items, column, pagination) if mode == "ids": diff --git a/modules/routes/routeAdminRbacRules.py b/modules/routes/routeAdminRbacRules.py index 5f3b5317..3eb45f1b 100644 --- a/modules/routes/routeAdminRbacRules.py +++ b/modules/routes/routeAdminRbacRules.py @@ -929,42 +929,17 @@ def list_roles( if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - from modules.routes.routeHelpers import handleFilterValuesInMemory + from modules.routes.routeHelpers import handleFilterValuesInMemory, enrichRowsWithFkLabels + enrichRowsWithFkLabels(result, Role) return handleFilterValuesInMemory(result, column, pagination) if mode == "ids": from modules.routes.routeHelpers import handleIdsInMemory return handleIdsInMemory(result, pagination) - # Apply search, filtering and sorting if pagination requested if paginationParams: - # Apply search (if search term provided in filters) - searchTerm = paginationParams.filters.get("search", "").lower() if paginationParams.filters else "" - if searchTerm: - searchedResult = [] - for item in result: - roleLabel = (item.get("roleLabel") or "").lower() - descText = (item.get("description") or "").lower() - scopeType = (item.get("scopeType") or "").lower() - - if searchTerm in roleLabel or searchTerm in descText or searchTerm in scopeType: - searchedResult.append(item) - result = searchedResult - - # Apply filtering (if filters provided) - if paginationParams.filters: - # Use the interface's filter method - filteredResult = interface._applyFilters(result, paginationParams.filters) - else: - filteredResult = result - - # Apply sorting (in order of sortFields) - if paginationParams.sort: - sortedResult = interface._applySorting(filteredResult, paginationParams.sort) - else: - sortedResult = filteredResult - - # Apply pagination + from modules.routes.routeHelpers import applyFiltersAndSort + sortedResult = applyFiltersAndSort(result, paginationParams) totalItems = len(sortedResult) totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 startIdx = (paginationParams.page - 1) * paginationParams.pageSize diff --git a/modules/routes/routeAudit.py b/modules/routes/routeAudit.py index 0e686297..ed275a88 100644 --- a/modules/routes/routeAudit.py +++ b/modules/routes/routeAudit.py @@ -36,37 +36,47 @@ def _applySortFilterSearch( search: Optional[str] = None, searchableKeys: Optional[List[str]] = None, ) -> List[Dict[str, Any]]: - """Apply sort, filter and search to a list of dicts in-memory.""" + """Apply sort, filter and search to a list of dicts in-memory. + + Delegates to the shared ``applyFiltersAndSort`` from routeHelpers so that + date-range filters (``between`` operator) and null/empty filters work + consistently across all in-memory routes. + """ + from modules.routes.routeHelpers import applyFiltersAndSort + from modules.datamodels.datamodelPagination import PaginationParams, SortField + + filtersDict: Optional[Dict[str, Any]] = None if filtersJson: try: - filters = json.loads(filtersJson) if isinstance(filtersJson, str) else filtersJson - if isinstance(filters, dict): - for key, val in filters.items(): - if val is None or val == "": - continue - if isinstance(val, list): - items = [r for r in items if str(r.get(key, "")) in [str(v) for v in val]] - else: - items = [r for r in items if str(r.get(key, "")).lower() == str(val).lower()] + filtersDict = json.loads(filtersJson) if isinstance(filtersJson, str) else filtersJson except (json.JSONDecodeError, TypeError): pass if search and searchableKeys: - needle = search.lower() - items = [r for r in items if any(needle in str(r.get(k, "")).lower() for k in searchableKeys)] + if filtersDict is None: + filtersDict = {} + filtersDict["search"] = search + sortList = None if sortJson: try: - sortList = json.loads(sortJson) if isinstance(sortJson, str) else sortJson - if isinstance(sortList, list): - for sortDef in reversed(sortList): - field = sortDef.get("field", "") - desc = sortDef.get("direction", "asc") == "desc" - items.sort(key=lambda r, f=field: (r.get(f) is None, r.get(f, "")), reverse=desc) + raw = json.loads(sortJson) if isinstance(sortJson, str) else sortJson + if isinstance(raw, list): + sortList = raw except (json.JSONDecodeError, TypeError): pass - return items + if not filtersDict and not sortList: + return items + + sortFields = [SortField(**s) for s in sortList] if sortList else [] + params = PaginationParams.model_construct( + page=1, + pageSize=len(items) or 1, + filters=filtersDict or {}, + sort=sortFields, + ) + return applyFiltersAndSort(items, params) def _distinctColumnValues(items: List[Dict[str, Any]], column: str) -> List[Optional[str]]: diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index e3d26352..34ebc184 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -244,7 +244,7 @@ class TransactionResponse(BaseModel): aicoreProvider: Optional[str] aicoreModel: Optional[str] = None createdByUserId: Optional[str] = None - createdAt: Optional[datetime] + sysCreatedAt: Optional[datetime] = None mandateId: Optional[str] = None mandateName: Optional[str] = None @@ -311,7 +311,7 @@ class UserTransactionResponse(BaseModel): aicoreProvider: Optional[str] aicoreModel: Optional[str] = None createdByUserId: Optional[str] = None - createdAt: Optional[datetime] + sysCreatedAt: Optional[datetime] = None mandateId: Optional[str] = None mandateName: Optional[str] = None userId: Optional[str] = None @@ -515,7 +515,7 @@ def getTransactions( aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"), - createdAt=t.get("sysCreatedAt"), + sysCreatedAt=t.get("sysCreatedAt"), mandateId=t.get("mandateId"), mandateName=t.get("mandateName") )) @@ -1073,13 +1073,9 @@ def handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: stripeSub = stripeToDict(stripe.Subscription.retrieve(stripeSubId, expand=["items"])) if stripeSub.get("current_period_start"): - stripeData["currentPeriodStart"] = datetime.fromtimestamp( - stripeSub["current_period_start"], tz=timezone.utc - ).isoformat() + stripeData["currentPeriodStart"] = float(stripeSub["current_period_start"]) if stripeSub.get("current_period_end"): - stripeData["currentPeriodEnd"] = datetime.fromtimestamp( - stripeSub["current_period_end"], tz=timezone.utc - ).isoformat() + stripeData["currentPeriodEnd"] = float(stripeSub["current_period_end"]) from modules.serviceCenter.services.serviceSubscription.stripeBootstrap import getStripePricesForPlan priceMapping = getStripePricesForPlan(planKey) @@ -1211,13 +1207,9 @@ def _handleSubscriptionWebhook(event) -> None: periodData: Dict[str, Any] = {} if obj.get("current_period_start"): - periodData["currentPeriodStart"] = datetime.fromtimestamp( - obj["current_period_start"], tz=timezone.utc - ).isoformat() + periodData["currentPeriodStart"] = float(obj["current_period_start"]) if obj.get("current_period_end"): - periodData["currentPeriodEnd"] = datetime.fromtimestamp( - obj["current_period_end"], tz=timezone.utc - ).isoformat() + periodData["currentPeriodEnd"] = float(obj["current_period_end"]) if periodData: subInterface.updateFields(subId, periodData) @@ -1462,7 +1454,7 @@ def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]: aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"), - createdAt=t.get("sysCreatedAt") + sysCreatedAt=t.get("sysCreatedAt") ) result.append(row.model_dump()) @@ -1588,7 +1580,7 @@ def getMandateViewTransactions( aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"), - createdAt=t.get("sysCreatedAt"), + sysCreatedAt=t.get("sysCreatedAt"), mandateId=t.get("mandateId"), mandateName=t.get("mandateName") )) @@ -1879,7 +1871,7 @@ def getUserViewTransactions( aicoreProvider=d.get("aicoreProvider"), aicoreModel=d.get("aicoreModel"), createdByUserId=d.get("createdByUserId"), - createdAt=d.get("sysCreatedAt") or d.get("createdAt"), + sysCreatedAt=d.get("sysCreatedAt"), mandateId=d.get("mandateId"), mandateName=d.get("mandateName"), userId=d.get("userId"), diff --git a/modules/routes/routeDataConnections.py b/modules/routes/routeDataConnections.py index 05c8aa9d..dc5013bd 100644 --- a/modules/routes/routeDataConnections.py +++ b/modules/routes/routeDataConnections.py @@ -179,7 +179,9 @@ async def get_connections( if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") try: - return handleFilterValuesInMemory(_buildEnhancedItems(), column, pagination) + items = _buildEnhancedItems() + enrichRowsWithFkLabels(items, UserConnection) + return handleFilterValuesInMemory(items, column, pagination) except Exception as e: logger.error(f"Error getting filter values for connections: {str(e)}") raise HTTPException(status_code=500, detail=str(e)) diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index 11b90f09..b6d6f8e0 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -259,7 +259,6 @@ def get_files( ) from modules.routes.routeHelpers import ( - handleFilterValuesInMemory, handleIdsMode, parseCrossFilterPagination, ) @@ -275,16 +274,11 @@ def get_files( raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") crossPagination = parseCrossFilterPagination(column, pagination) recordFilter = {"sysCreatedBy": managementInterface.userId} - try: - from fastapi.responses import JSONResponse - values = managementInterface.db.getDistinctColumnValues( - FileItem, column, crossPagination, recordFilter - ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) - except Exception: - result = managementInterface.getAllFiles(pagination=None) - items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in result] - return handleFilterValuesInMemory(items, column, pagination) + from fastapi.responses import JSONResponse + values = managementInterface.db.getDistinctColumnValues( + FileItem, column, crossPagination, recordFilter + ) + return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) if mode == "ids": recordFilter = {"sysCreatedBy": managementInterface.userId} diff --git a/modules/routes/routeDataMandates.py b/modules/routes/routeDataMandates.py index 7972181d..ef058ed9 100644 --- a/modules/routes/routeDataMandates.py +++ b/modules/routes/routeDataMandates.py @@ -140,15 +140,9 @@ def get_mandates( raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") if isPlatformAdmin: crossPagination = parseCrossFilterPagination(column, pagination) - try: - from fastapi.responses import JSONResponse - values = appInterface.db.getDistinctColumnValues(Mandate, column, crossPagination) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) - except Exception: - result = appInterface.getAllMandates(pagination=None) - items = result if isinstance(result, list) else (result.items if hasattr(result, 'items') else result) - items = [i.model_dump() if hasattr(i, 'model_dump') else i for i in items] - return handleFilterValuesInMemory(items, column, pagination) + from fastapi.responses import JSONResponse + values = appInterface.db.getDistinctColumnValues(Mandate, column, crossPagination) + return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) else: mandateItems = [] for mid in adminMandateIds: @@ -325,18 +319,19 @@ def create_mandate( plan = BUILTIN_PLANS.get(planKey) if plan: now = datetime.now(timezone.utc) + nowTs = now.timestamp() targetStatus = SubscriptionStatusEnum.TRIALING if plan.trialDays else SubscriptionStatusEnum.ACTIVE sub = MandateSubscription( mandateId=str(newMandate.id), planKey=planKey, status=targetStatus, recurring=plan.autoRenew and not plan.trialDays, - startedAt=now, - currentPeriodStart=now, + startedAt=nowTs, + currentPeriodStart=nowTs, ) if plan.trialDays: - sub.trialEndsAt = now + timedelta(days=plan.trialDays) - sub.currentPeriodEnd = now + timedelta(days=plan.trialDays) + sub.trialEndsAt = (now + timedelta(days=plan.trialDays)).timestamp() + sub.currentPeriodEnd = (now + timedelta(days=plan.trialDays)).timestamp() subInterface = _getSubRoot() subInterface.createSubscription(sub) logger.info(f"Created {targetStatus.value} subscription ({planKey}) for mandate {newMandate.id}") diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py index 67156291..6d72b763 100644 --- a/modules/routes/routeDataUsers.py +++ b/modules/routes/routeDataUsers.py @@ -100,14 +100,9 @@ def _getUserFilterOrIds(context, paginationJson, column=None, idsMode=False): if idsMode: return handleIdsMode(rootInterface.db, UserInDB, paginationJson) crossPagination = parseCrossFilterPagination(column, paginationJson) - try: - from fastapi.responses import JSONResponse - values = rootInterface.db.getDistinctColumnValues(UserInDB, column, crossPagination) - return JSONResponse(content=sorted(values, key=lambda v: v.lower())) - except Exception: - users = appInterface.getAllUsers() - items = [u.model_dump() if hasattr(u, 'model_dump') else u for u in users] - return handleFilterValuesInMemory(items, column, paginationJson, requestLang) + from fastapi.responses import JSONResponse + values = rootInterface.db.getDistinctColumnValues(UserInDB, column, crossPagination) + return JSONResponse(content=sorted(values, key=lambda v: v.lower())) rootInterface = getRootInterface() userMandates = rootInterface.getUserMandates(str(context.user.id)) diff --git a/modules/routes/routeHelpers.py b/modules/routes/routeHelpers.py index 1a396d26..37bfa3b2 100644 --- a/modules/routes/routeHelpers.py +++ b/modules/routes/routeHelpers.py @@ -111,27 +111,28 @@ def resolveRoleLabels(ids: List[str]) -> Dict[str, Optional[str]]: _BUILTIN_FK_RESOLVERS: Dict[str, Callable[[List[str]], Dict[str, str]]] = { "Mandate": resolveMandateLabels, "FeatureInstance": resolveInstanceLabels, - "User": resolveUserLabels, + "UserInDB": resolveUserLabels, "Role": resolveRoleLabels, } def _buildLabelResolversFromModel(modelClass: type) -> Dict[str, Callable[[List[str]], Dict[str, str]]]: """ - Auto-build labelResolvers dict from fk_model / fk_target annotations on a Pydantic model. - Maps field names to resolver functions for all fields that have a known FK target. - Unlike ``_get_fk_sort_meta`` this does NOT require ``fk_label_field`` — the - builtin resolvers already know which column to read. + Auto-build labelResolvers dict from ``json_schema_extra.fk_target`` on a Pydantic model. + Maps field names to resolver functions when the target table has a registered builtin + resolver and ``fk_target.labelField`` is set (non-None). """ resolvers: Dict[str, Callable[[List[str]], Dict[str, str]]] = {} for name, fieldInfo in modelClass.model_fields.items(): extra = fieldInfo.json_schema_extra if not extra or not isinstance(extra, dict): continue - fkModel = extra.get("fk_model") tgt = extra.get("fk_target") - if not fkModel and isinstance(tgt, dict): - fkModel = tgt.get("table") + if not isinstance(tgt, dict): + continue + if tgt.get("labelField") is None: + continue + fkModel = tgt.get("table") if fkModel and fkModel in _BUILTIN_FK_RESOLVERS: resolvers[name] = _BUILTIN_FK_RESOLVERS[fkModel] return resolvers @@ -147,7 +148,7 @@ def enrichRowsWithFkLabels( """Add ``{field}Label`` columns to each row for every FK field that has a registered resolver. - ``modelClass`` — if provided, resolvers are auto-built from ``fk_model`` + ``modelClass`` — if provided, resolvers are auto-built from ``fk_target`` annotations on the Pydantic model (via ``_buildLabelResolversFromModel``). ``labelResolvers`` — explicit resolver map that overrides auto-built ones. @@ -354,7 +355,14 @@ def applyFiltersAndSort( operator = "equals" value = filterValue - if value is None or value == "": + if value is None: + result = [ + item for item in result + if item.get(field) is None or item.get(field) == "" + ] + continue + + if value == "": continue result = [ @@ -455,6 +463,19 @@ def _matchesBetween(itemValue: Any, itemStr: str, value: Any) -> bool: if toTs is not None: return itemNum <= toTs except (ValueError, TypeError): + # Numeric range (e.g. FormGeneratorTable column filters on INTEGER/FLOAT) + try: + itemNum = float(itemValue) + fromNum = float(fromVal) if fromVal not in (None, "") else None + toNum = float(toVal) if toVal not in (None, "") else None + if fromNum is not None and toNum is not None: + return fromNum <= itemNum <= toNum + if fromNum is not None: + return itemNum >= fromNum + if toNum is not None: + return itemNum <= toNum + except (ValueError, TypeError): + pass fromStr = str(fromVal).lower() if fromVal else "" toStr = str(toVal).lower() if toVal else "" if fromStr and toStr: @@ -470,13 +491,42 @@ def _extractDistinctValues( items: List[Dict[str, Any]], columnKey: str, requestLang: Optional[str] = None, -) -> List[Optional[str]]: +) -> list: """Extract sorted distinct display values for a column from enriched items. + When the items contain a ``{columnKey}Label`` field (FK enrichment convention), + returns ``{value, label}`` objects so the frontend shows human-readable + labels in filter dropdowns. Otherwise returns plain strings. + Includes ``None`` as the last entry when at least one row has a null/empty value — this enables the "(Leer)" filter option in the frontend. """ _MISSING = object() + labelKey = f"{columnKey}Label" + hasFkLabels = any(labelKey in item for item in items[:20]) + + if hasFkLabels: + byVal: Dict[str, str] = {} + hasEmpty = False + for item in items: + val = item.get(columnKey, _MISSING) + if val is _MISSING: + continue + if val is None or val == "": + hasEmpty = True + continue + strVal = str(val) + if strVal not in byVal: + label = item.get(labelKey) + byVal[strVal] = str(label) if label else f"NA({strVal[:8]})" + result: list = sorted( + [{"value": v, "label": l} for v, l in byVal.items()], + key=lambda x: x["label"].lower(), + ) + if hasEmpty: + result.append(None) + return result + values = set() hasEmpty = False for item in items: @@ -496,7 +546,7 @@ def _extractDistinctValues( values.add(text) else: values.add(str(val)) - result: List[Optional[str]] = sorted(values, key=lambda v: v.lower()) + result = sorted(values, key=lambda v: v.lower()) if hasEmpty: result.append(None) return result diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py index 8138775f..4f4f42c3 100644 --- a/modules/routes/routeInvitations.py +++ b/modules/routes/routeInvitations.py @@ -85,8 +85,8 @@ class InvitationResponse(BaseModel): roleIds: List[str] targetUsername: Optional[str] email: Optional[str] - createdBy: str - createdAt: float + sysCreatedBy: str + sysCreatedAt: float expiresAt: float usedBy: Optional[str] usedAt: Optional[float] @@ -227,8 +227,8 @@ def create_invitation( roleIds=data.roleIds, targetUsername=target_username_val, email=email_val, - createdBy=str(context.user.id), - createdAt=currentTime, + sysCreatedBy=str(context.user.id), + sysCreatedAt=currentTime, expiresAt=expiresAt, usedBy=None, usedAt=None, @@ -250,8 +250,8 @@ def create_invitation( roleIds=data.roleIds, targetUsername=target_username_val, email=email_val, - createdBy=str(context.user.id), - createdAt=currentTime, + sysCreatedBy=str(context.user.id), + sysCreatedAt=currentTime, expiresAt=expiresAt, usedBy=None, usedAt=None, @@ -268,7 +268,6 @@ def create_invitation( roleIds=data.roleIds, targetUsername=target_username_val, email=email_val, - createdBy=str(context.user.id), expiresAt=expiresAt, maxUses=data.maxUses ) @@ -368,8 +367,6 @@ def create_invitation( f"to {target_desc}, expires in {data.expiresInHours}h" ) - # Invitation extends PowerOnModel: recordCreate/_saveRecord set sysCreatedAt and sysCreatedBy automatically. - # API response uses createdAt/createdBy; map from the system fields (no separate createdAt column on model). return InvitationResponse( id=str(createdRecord.get("id")), token=str(createdRecord.get("token")), @@ -378,8 +375,8 @@ def create_invitation( roleIds=createdRecord.get("roleIds", []), targetUsername=createdRecord.get("targetUsername"), email=createdRecord.get("email"), - createdBy=str(createdRecord["sysCreatedBy"]), - createdAt=float(createdRecord["sysCreatedAt"]), + sysCreatedBy=str(createdRecord["sysCreatedBy"]), + sysCreatedAt=float(createdRecord["sysCreatedAt"]), expiresAt=createdRecord.get("expiresAt"), usedBy=createdRecord.get("usedBy"), usedAt=createdRecord.get("usedAt"), @@ -470,7 +467,9 @@ def list_invitations( if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") try: - return handleFilterValuesInMemory(_buildInvitationItems(), column, pagination) + items = _buildInvitationItems() + enrichRowsWithFkLabels(items, Invitation) + return handleFilterValuesInMemory(items, column, pagination) except Exception as e: logger.error(f"Error getting filter values for invitations: {e}") raise HTTPException(status_code=500, detail=str(e)) diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index 3419038c..a433c1ed 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -106,11 +106,11 @@ def _autoActivatePending(subInterface, pendingSub: Dict[str, Any]) -> None: now = datetime.now(timezone.utc) targetStatus = SubscriptionStatusEnum.TRIALING if plan and plan.trialDays else SubscriptionStatusEnum.ACTIVE - additionalData = {"currentPeriodStart": now.isoformat()} + additionalData = {"currentPeriodStart": now.timestamp()} if plan and plan.trialDays: trialEnd = now + timedelta(days=plan.trialDays) - additionalData["trialEndsAt"] = trialEnd.isoformat() - additionalData["currentPeriodEnd"] = trialEnd.isoformat() + additionalData["trialEndsAt"] = trialEnd.timestamp() + additionalData["currentPeriodEnd"] = trialEnd.timestamp() try: subInterface.transitionStatus( diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 9c8a7ed7..22beef42 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -486,7 +486,11 @@ def getAllSubscriptions( if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - return handleFilterValuesInMemory(_buildEnrichedSubscriptions(), column, pagination) + from modules.routes.routeHelpers import enrichRowsWithFkLabels + from modules.datamodels.datamodelSubscription import MandateSubscription + items = _buildEnrichedSubscriptions() + enrichRowsWithFkLabels(items, MandateSubscription) + return handleFilterValuesInMemory(items, column, pagination) if mode == "ids": return handleIdsInMemory(_buildEnrichedSubscriptions(), pagination) diff --git a/modules/routes/routeSystem.py b/modules/routes/routeSystem.py index bf05f8c0..573df000 100644 --- a/modules/routes/routeSystem.py +++ b/modules/routes/routeSystem.py @@ -581,12 +581,9 @@ def _buildIntegrationsOverviewPayload(userId: str, user=None) -> Dict[str, Any]: # --- Extractors (registered extensions, unique + per-class rows) --- try: - from modules.serviceCenter.services.serviceExtraction.mainServiceExtraction import ExtractionService - from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry + from modules.serviceCenter.services.serviceExtraction.subRegistry import getExtractorRegistry - if ExtractionService._sharedExtractorRegistry is None: - ExtractionService._sharedExtractorRegistry = ExtractorRegistry() - reg = ExtractionService._sharedExtractorRegistry + reg = getExtractorRegistry() ext_map = reg.getExtensionToMimeMap() uniq = sorted({str(k).upper() for k in ext_map.keys() if k and "." not in str(k)}) out["extractorExtensions"] = uniq diff --git a/modules/serviceCenter/services/serviceBackgroundJobs/mainBackgroundJobService.py b/modules/serviceCenter/services/serviceBackgroundJobs/mainBackgroundJobService.py index 37830fd1..b8a55e28 100644 --- a/modules/serviceCenter/services/serviceBackgroundJobs/mainBackgroundJobService.py +++ b/modules/serviceCenter/services/serviceBackgroundJobs/mainBackgroundJobService.py @@ -132,7 +132,7 @@ def _updateJob(jobId: str, fields: Dict[str, Any]) -> None: def _markStarted(jobId: str) -> None: _updateJob(jobId, { "status": BackgroundJobStatusEnum.RUNNING.value, - "startedAt": datetime.now(timezone.utc), + "startedAt": datetime.now(timezone.utc).timestamp(), }) @@ -141,7 +141,7 @@ def _markSuccess(jobId: str, result: Optional[Dict[str, Any]]) -> None: "status": BackgroundJobStatusEnum.SUCCESS.value, "result": result or {}, "progress": 100, - "finishedAt": datetime.now(timezone.utc), + "finishedAt": datetime.now(timezone.utc).timestamp(), }) @@ -150,7 +150,7 @@ def _markError(jobId: str, errorMessage: str) -> None: _updateJob(jobId, { "status": BackgroundJobStatusEnum.ERROR.value, "errorMessage": truncated, - "finishedAt": datetime.now(timezone.utc), + "finishedAt": datetime.now(timezone.utc).timestamp(), }) @@ -211,7 +211,7 @@ def listJobs( out = [r for r in out if r.get("featureInstanceId") == featureInstanceId] if jobType is not None: out = [r for r in out if r.get("jobType") == jobType] - out.sort(key=lambda r: r.get("createdAt") or "", reverse=True) + out.sort(key=lambda r: r.get("createdAt") or 0, reverse=True) return out[:limit] diff --git a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py index 6c47b725..1a902945 100644 --- a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py +++ b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py @@ -142,6 +142,7 @@ class SubscriptionService: self._cleanupPreparatorySubscriptions(mid) now = datetime.now(timezone.utc) + nowTs = now.timestamp() if plan.trialDays: initialStatus = SubscriptionStatusEnum.TRIALING elif isPaid: @@ -154,19 +155,19 @@ class SubscriptionService: planKey=planKey, status=initialStatus, recurring=plan.autoRenew and not plan.trialDays, - startedAt=now, - currentPeriodStart=now, + startedAt=nowTs, + currentPeriodStart=nowTs, snapshotPricePerUserCHF=plan.pricePerUserCHF, snapshotPricePerInstanceCHF=plan.pricePerFeatureInstanceCHF, ) if plan.trialDays: - sub.trialEndsAt = now + timedelta(days=plan.trialDays) + sub.trialEndsAt = (now + timedelta(days=plan.trialDays)).timestamp() if plan.billingPeriod == BillingPeriodEnum.MONTHLY: - sub.currentPeriodEnd = now + timedelta(days=30) + sub.currentPeriodEnd = (now + timedelta(days=30)).timestamp() elif plan.billingPeriod == BillingPeriodEnum.YEARLY: - sub.currentPeriodEnd = now + timedelta(days=365) + sub.currentPeriodEnd = (now + timedelta(days=365)).timestamp() created = self._interface.createSubscription(sub) @@ -310,11 +311,8 @@ class SubscriptionService: ) if currentOperative and currentOperative.get("currentPeriodEnd") and not isTrialPredecessor: periodEnd = currentOperative["currentPeriodEnd"] - if isinstance(periodEnd, str): - periodEnd = datetime.fromisoformat(periodEnd) - trialEndTs = int(periodEnd.timestamp()) - subscriptionData["trial_end"] = trialEndTs - self._interface.updateFields(subRecord["id"], {"effectiveFrom": periodEnd.isoformat()}) + subscriptionData["trial_end"] = int(periodEnd) + self._interface.updateFields(subRecord["id"], {"effectiveFrom": periodEnd}) session = None for attempt in range(2): @@ -509,9 +507,7 @@ class SubscriptionService: periodEnd = sub.get("currentPeriodEnd") if periodEnd: - if isinstance(periodEnd, str): - periodEnd = datetime.fromisoformat(periodEnd) - if periodEnd <= datetime.now(timezone.utc): + if periodEnd <= datetime.now(timezone.utc).timestamp(): raise ValueError("Cannot reactivate — period has already ended") stripeSubId = sub.get("stripeSubscriptionId") diff --git a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py index ce63a43d..9e99ba12 100644 --- a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py +++ b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py @@ -18,6 +18,7 @@ StripePlanPrice is updated. Other stale active Prices on the same Product """ import logging +from concurrent.futures import ThreadPoolExecutor, as_completed from typing import Dict, Optional from modules.connectors.connectorDbPostgre import DatabaseConnector @@ -242,8 +243,142 @@ def _validateStripeIdsExist(stripe, mapping: StripePlanPrice) -> bool: return False +def _processOnePlan( + stripe, + planKey: str, + plan: SubscriptionPlan, + existingMapping: Optional[StripePlanPrice], +) -> None: + """Reconcile or provision Stripe Products/Prices for a single plan. + + Each call uses its own DB connection so it is safe to run in a thread pool. + """ + stripePeriod = _PERIOD_TO_STRIPE.get(plan.billingPeriod) + if not stripePeriod: + return + + interval = stripePeriod["interval"] + intervalCount = int(stripePeriod.get("interval_count") or 1) + db = _getBillingDb() + + if existingMapping: + mapping = existingMapping + hasAllPrices = mapping.stripePriceIdUsers and mapping.stripePriceIdInstances + hasAllProducts = mapping.stripeProductIdUsers and mapping.stripeProductIdInstances + if hasAllPrices and hasAllProducts: + if _validateStripeIdsExist(stripe, mapping): + changed = False + reconciledUsers = _reconcilePrice( + stripe, mapping.stripeProductIdUsers, mapping.stripePriceIdUsers, + plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", + intervalCount, + ) + if reconciledUsers != mapping.stripePriceIdUsers: + changed = True + + reconciledInstances = _reconcilePrice( + stripe, mapping.stripeProductIdInstances, mapping.stripePriceIdInstances, + plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Modul", + intervalCount, + ) + if reconciledInstances != mapping.stripePriceIdInstances: + changed = True + + _archiveOtherRecurringPrices( + stripe, mapping.stripeProductIdUsers, reconciledUsers, interval, intervalCount, + ) + _archiveOtherRecurringPrices( + stripe, mapping.stripeProductIdInstances, reconciledInstances, interval, intervalCount, + ) + + if changed: + db.recordModify(StripePlanPrice, mapping.id, { + "stripePriceIdUsers": reconciledUsers, + "stripePriceIdInstances": reconciledInstances, + }) + logger.info( + "Reconciled Stripe prices for plan %s to catalog (CHF): users=%s, instances=%s", + planKey, reconciledUsers, reconciledInstances, + ) + else: + logger.debug("Stripe prices up-to-date for plan %s", planKey) + return + else: + logger.warning( + "Stored Stripe IDs for plan %s reference unknown objects " + "(likely wrong Stripe account or copied DB) — re-provisioning.", + planKey, + ) + + productIdUsers = None + productIdInstances = None + priceIdUsers = None + priceIdInstances = None + + if plan.pricePerUserCHF > 0: + productIdUsers = _findStripeProduct(stripe, planKey, "users") + if not productIdUsers: + productIdUsers = _createStripeProduct( + stripe, "Benutzer-Lizenzen", f"Benutzer-Lizenzen für {plan.title or planKey}", + planKey, "users", + ) + userCents = int(round(plan.pricePerUserCHF * 100)) + priceIdUsers = _findExistingStripePrice( + stripe, productIdUsers, userCents, interval, intervalCount, + ) + if not priceIdUsers: + priceIdUsers = _createStripePrice( + stripe, productIdUsers, plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", + intervalCount, + ) + _archiveOtherRecurringPrices(stripe, productIdUsers, priceIdUsers, interval, intervalCount) + + if plan.pricePerFeatureInstanceCHF > 0: + productIdInstances = _findStripeProduct(stripe, planKey, "instances") + if not productIdInstances: + productIdInstances = _createStripeProduct( + stripe, "Module", f"Module für {plan.title or planKey}", + planKey, "instances", + ) + instCents = int(round(plan.pricePerFeatureInstanceCHF * 100)) + priceIdInstances = _findExistingStripePrice( + stripe, productIdInstances, instCents, interval, intervalCount, + ) + if not priceIdInstances: + priceIdInstances = _createStripePrice( + stripe, productIdInstances, plan.pricePerFeatureInstanceCHF, interval, + f"{planKey} — Modul", + intervalCount, + ) + _archiveOtherRecurringPrices( + stripe, productIdInstances, priceIdInstances, interval, intervalCount, + ) + + persistData = { + "stripeProductId": "", + "stripeProductIdUsers": productIdUsers, + "stripeProductIdInstances": productIdInstances, + "stripePriceIdUsers": priceIdUsers, + "stripePriceIdInstances": priceIdInstances, + } + + if existingMapping: + db.recordModify(StripePlanPrice, existingMapping.id, persistData) + else: + db.recordCreate(StripePlanPrice, StripePlanPrice(planKey=planKey, **persistData).model_dump()) + + logger.info( + "Stripe bootstrapped for %s: users=%s/%s, instances=%s/%s", + planKey, productIdUsers, priceIdUsers, productIdInstances, priceIdInstances, + ) + + def bootstrapStripePrices() -> None: - """Ensure all paid plans have separate Stripe Products for users and instances.""" + """Ensure all paid plans have separate Stripe Products for users and instances. + + Plans are processed in parallel (one thread per plan) to reduce boot time. + Each thread uses its own DB connection; Stripe SDK is thread-safe. + """ try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() @@ -251,132 +386,29 @@ def bootstrapStripePrices() -> None: logger.error("Stripe not configured — cannot bootstrap subscription prices: %s", e) return - db = _getBillingDb() - existing = _loadExistingMappings(db) + existing = _loadExistingMappings(_getBillingDb()) - for planKey, plan in BUILTIN_PLANS.items(): - if plan.billingPeriod == BillingPeriodEnum.NONE: - continue - if plan.pricePerUserCHF == 0 and plan.pricePerFeatureInstanceCHF == 0: - continue + plans = [ + (planKey, plan) + for planKey, plan in BUILTIN_PLANS.items() + if plan.billingPeriod != BillingPeriodEnum.NONE + and (plan.pricePerUserCHF > 0 or plan.pricePerFeatureInstanceCHF > 0) + ] - stripePeriod = _PERIOD_TO_STRIPE.get(plan.billingPeriod) - if not stripePeriod: - continue + if not plans: + return - interval = stripePeriod["interval"] - intervalCount = int(stripePeriod.get("interval_count") or 1) - - if planKey in existing: - mapping = existing[planKey] - hasAllPrices = mapping.stripePriceIdUsers and mapping.stripePriceIdInstances - hasAllProducts = mapping.stripeProductIdUsers and mapping.stripeProductIdInstances - if hasAllPrices and hasAllProducts: - if _validateStripeIdsExist(stripe, mapping): - changed = False - reconciledUsers = _reconcilePrice( - stripe, mapping.stripeProductIdUsers, mapping.stripePriceIdUsers, - plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", - intervalCount, - ) - if reconciledUsers != mapping.stripePriceIdUsers: - changed = True - - reconciledInstances = _reconcilePrice( - stripe, mapping.stripeProductIdInstances, mapping.stripePriceIdInstances, - plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Modul", - intervalCount, - ) - if reconciledInstances != mapping.stripePriceIdInstances: - changed = True - - _archiveOtherRecurringPrices( - stripe, mapping.stripeProductIdUsers, reconciledUsers, interval, intervalCount, - ) - _archiveOtherRecurringPrices( - stripe, mapping.stripeProductIdInstances, reconciledInstances, interval, intervalCount, - ) - - if changed: - db.recordModify(StripePlanPrice, mapping.id, { - "stripePriceIdUsers": reconciledUsers, - "stripePriceIdInstances": reconciledInstances, - }) - logger.info( - "Reconciled Stripe prices for plan %s to catalog (CHF): users=%s, instances=%s", - planKey, reconciledUsers, reconciledInstances, - ) - else: - logger.debug("Stripe prices up-to-date for plan %s", planKey) - continue - else: - logger.warning( - "Stored Stripe IDs for plan %s reference unknown objects " - "(likely wrong Stripe account or copied DB) — re-provisioning.", - planKey, - ) - - productIdUsers = None - productIdInstances = None - priceIdUsers = None - priceIdInstances = None - - if plan.pricePerUserCHF > 0: - productIdUsers = _findStripeProduct(stripe, planKey, "users") - if not productIdUsers: - productIdUsers = _createStripeProduct( - stripe, "Benutzer-Lizenzen", f"Benutzer-Lizenzen für {plan.title or planKey}", - planKey, "users", - ) - userCents = int(round(plan.pricePerUserCHF * 100)) - priceIdUsers = _findExistingStripePrice( - stripe, productIdUsers, userCents, interval, intervalCount, - ) - if not priceIdUsers: - priceIdUsers = _createStripePrice( - stripe, productIdUsers, plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", - intervalCount, - ) - _archiveOtherRecurringPrices(stripe, productIdUsers, priceIdUsers, interval, intervalCount) - - if plan.pricePerFeatureInstanceCHF > 0: - productIdInstances = _findStripeProduct(stripe, planKey, "instances") - if not productIdInstances: - productIdInstances = _createStripeProduct( - stripe, "Module", f"Module für {plan.title or planKey}", - planKey, "instances", - ) - instCents = int(round(plan.pricePerFeatureInstanceCHF * 100)) - priceIdInstances = _findExistingStripePrice( - stripe, productIdInstances, instCents, interval, intervalCount, - ) - if not priceIdInstances: - priceIdInstances = _createStripePrice( - stripe, productIdInstances, plan.pricePerFeatureInstanceCHF, interval, - f"{planKey} — Modul", - intervalCount, - ) - _archiveOtherRecurringPrices( - stripe, productIdInstances, priceIdInstances, interval, intervalCount, - ) - - persistData = { - "stripeProductId": "", - "stripeProductIdUsers": productIdUsers, - "stripeProductIdInstances": productIdInstances, - "stripePriceIdUsers": priceIdUsers, - "stripePriceIdInstances": priceIdInstances, + with ThreadPoolExecutor(max_workers=len(plans)) as executor: + futures = { + executor.submit(_processOnePlan, stripe, planKey, plan, existing.get(planKey)): planKey + for planKey, plan in plans } - - if planKey in existing: - db.recordModify(StripePlanPrice, existing[planKey].id, persistData) - else: - db.recordCreate(StripePlanPrice, StripePlanPrice(planKey=planKey, **persistData).model_dump()) - - logger.info( - "Stripe bootstrapped for %s: users=%s/%s, instances=%s/%s", - planKey, productIdUsers, priceIdUsers, productIdInstances, priceIdInstances, - ) + for future in as_completed(futures): + planKey = futures[future] + try: + future.result() + except Exception as e: + logger.error("Stripe bootstrap failed for plan %s: %s", planKey, e) def getStripePricesForPlan(planKey: str) -> Optional[StripePlanPrice]: diff --git a/modules/shared/attributeUtils.py b/modules/shared/attributeUtils.py index f7e432bc..b949a1b4 100644 --- a/modules/shared/attributeUtils.py +++ b/modules/shared/attributeUtils.py @@ -291,15 +291,11 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag } mergedExtra = _mergedFieldJsonExtra(field) - fkModelName = mergedExtra.get("fk_model") fkTarget = mergedExtra.get("fk_target") - if not fkModelName and isinstance(fkTarget, dict) and fkTarget.get("table"): - fkModelName = fkTarget.get("table") - hasFk = bool(fkModelName) or (isinstance(fkTarget, dict) and bool(fkTarget.get("table"))) - if hasFk: - attr_def["displayField"] = f"{name}Label" - if fkModelName: - attr_def["fkModel"] = fkModelName + if isinstance(fkTarget, dict) and fkTarget.get("table"): + attr_def["fkModel"] = fkTarget["table"] + if fkTarget.get("labelField"): + attr_def["displayField"] = f"{name}Label" # Render hints (Excel-like format string + i18n-resolved label tokens). # Labels are resolved server-side via resolveText() so the FE renders them @@ -318,6 +314,37 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag return {"model": model_label, "attributes": attributes} +def _loadFeatureDatamodelClasses(modelClasses: Dict[str, Type[BaseModel]]) -> None: + """Register Pydantic models from ``modules.features.*`` ``datamodel*.py`` files.""" + features_dir = os.path.join( + os.path.dirname(os.path.dirname(__file__)), "features" + ) + if not os.path.isdir(features_dir): + return + for root, _dirs, files in os.walk(features_dir): + for fileName in files: + if not fileName.startswith("datamodel") or not fileName.endswith(".py"): + continue + fullPath = os.path.join(root, fileName) + relPath = os.path.relpath(fullPath, features_dir) + moduleRel = os.path.splitext(relPath)[0].replace("\\", ".").replace("/", ".") + module_name = f"modules.features.{moduleRel}" + try: + module = importlib.import_module(module_name) + for name, obj in inspect.getmembers(module): + if ( + inspect.isclass(obj) + and issubclass(obj, BaseModel) + and obj != BaseModel + ): + modelClasses[name] = obj + except Exception as e: + logger.warning( + f"Error importing feature datamodel module {module_name}: {str(e)}", + exc_info=True, + ) + + def getModelClasses() -> Dict[str, Type[BaseModel]]: """ Dynamically get all model classes from all model modules. @@ -375,6 +402,8 @@ def getModelClasses() -> Dict[str, Type[BaseModel]]: logger.warning(f"Error importing module {module_name}: {str(e)}", exc_info=True) # Continue with other modules even if one fails + _loadFeatureDatamodelClasses(modelClasses) + return modelClasses diff --git a/modules/shared/fkRegistry.py b/modules/shared/fkRegistry.py index 9772b8a6..bf869bf4 100644 --- a/modules/shared/fkRegistry.py +++ b/modules/shared/fkRegistry.py @@ -241,3 +241,32 @@ def _invalidateFkCache() -> None: with _lock: _cachedRelationships = None _cachedTableToDb = None + + +_FK_TARGET_REQUIRED_KEYS = {"db", "table", "labelField"} + + +def validateFkTargets() -> List[str]: + """Validate every ``fk_target`` dict across all registered PowerOnModel subclasses. + + Returns a list of error strings (empty = all good). + Each ``fk_target`` must contain exactly ``db``, ``table``, and ``labelField`` + (``labelField`` may be ``None``). + """ + _ensureModelsLoaded() + errors: List[str] = [] + for tableName, modelCls in MODEL_REGISTRY.items(): + for fieldName, fieldInfo in modelCls.model_fields.items(): + extra = fieldInfo.json_schema_extra + if not isinstance(extra, dict): + continue + fkTarget = extra.get("fk_target") + if fkTarget is None: + continue + if not isinstance(fkTarget, dict): + errors.append(f"{tableName}.{fieldName}: fk_target is not a dict ({type(fkTarget).__name__})") + continue + missing = _FK_TARGET_REQUIRED_KEYS - fkTarget.keys() + if missing: + errors.append(f"{tableName}.{fieldName}: fk_target missing keys {sorted(missing)}") + return errors diff --git a/modules/workflows/methods/methodTrustee/actions/processDocuments.py b/modules/workflows/methods/methodTrustee/actions/processDocuments.py index 11e9aba1..b05e25f4 100644 --- a/modules/workflows/methods/methodTrustee/actions/processDocuments.py +++ b/modules/workflows/methods/methodTrustee/actions/processDocuments.py @@ -15,7 +15,7 @@ syncToAccounting (via DataRef on documents[0]). import json import logging -from datetime import datetime +from datetime import datetime, timezone from typing import Dict, Any, List, Optional from modules.datamodels.datamodelChat import ActionResult, ActionDocument @@ -79,6 +79,31 @@ def _parseIsoDate(value: Any) -> Optional[datetime]: return None +def _toTimestamp(value: Any) -> Optional[float]: + """Convert ISO date string or numeric value to UTC midnight unix timestamp.""" + if value is None or value == "": + return None + if isinstance(value, (int, float)): + return float(value) + raw = _cleanStr(value) + if not raw: + return None + try: + return datetime.strptime(raw[:10], "%Y-%m-%d").replace(tzinfo=timezone.utc).timestamp() + except ValueError: + return None + + +def _timestampToDatetime(value: Any) -> Optional[datetime]: + """Convert UTC unix timestamp (float) to datetime for proximity scoring.""" + if value is None: + return None + try: + return datetime.fromtimestamp(float(value), tz=timezone.utc) + except (ValueError, TypeError, OSError): + return None + + def _normaliseAmount(value: Any) -> float: """Use absolute rounded amount, since bank lines are often signed.""" return round(abs(_parseFloat(value)), 2) @@ -103,7 +128,7 @@ def _findBestBankMatch( bankRef = _normaliseRef(bankPosition.get("paymentReference") or bankPosition.get("bookingReference")) bankAmount = _normaliseAmount(bankPosition.get("bookingAmount")) bankIban = _normaliseRef(bankPosition.get("payeeIban")) - bankDate = _parseIsoDate(bankPosition.get("valuta")) + bankDate = _timestampToDatetime(bankPosition.get("valuta")) bankCompany = _normaliseCompany(bankPosition.get("company")) bestScore = 0 @@ -122,7 +147,7 @@ def _findBestBankMatch( candidateRef = _normaliseRef(candidate.get("paymentReference") or candidate.get("bookingReference")) candidateAmount = _normaliseAmount(candidate.get("bookingAmount")) candidateIban = _normaliseRef(candidate.get("payeeIban")) - candidateDate = _parseIsoDate(candidate.get("valuta")) + candidateDate = _timestampToDatetime(candidate.get("valuta")) candidateCompany = _normaliseCompany(candidate.get("company")) # Strongest signal: structured payment reference / invoice reference match. @@ -183,7 +208,7 @@ def _recordToPosition(record: Dict[str, Any], documentId: Optional[str], feature return { "documentId": documentId, "documentType": recDocType, - "valuta": record.get("valuta"), + "valuta": _toTimestamp(record.get("valuta")), "transactionDateTime": record.get("transactionDateTime"), "company": record.get("company", ""), "desc": record.get("desc", ""), @@ -203,7 +228,7 @@ def _recordToPosition(record: Dict[str, Any], documentId: Optional[str], feature "payeeName": _cleanStr(record.get("payeeName")), "payeeBic": _cleanStr(record.get("payeeBic")), "paymentReference": _cleanStr(record.get("paymentReference")), - "dueDate": _cleanStr(record.get("dueDate")), + "dueDate": _toTimestamp(record.get("dueDate")), "featureInstanceId": featureInstanceId, "mandateId": mandateId, } diff --git a/modules/workflows/methods/methodTrustee/actions/queryData.py b/modules/workflows/methods/methodTrustee/actions/queryData.py index 36cbbe89..9b2e3e10 100644 --- a/modules/workflows/methods/methodTrustee/actions/queryData.py +++ b/modules/workflows/methods/methodTrustee/actions/queryData.py @@ -20,6 +20,7 @@ This action does NOT trigger an external sync — use import json import logging import re +from datetime import datetime as _dt, timezone as _tz from typing import Any, Dict, List, Optional from modules.datamodels.datamodelChat import ActionResult @@ -27,6 +28,26 @@ from modules.datamodels.datamodelChat import ActionResult logger = logging.getLogger(__name__) +def _isoToTs(isoDate: Optional[str]) -> Optional[float]: + """``YYYY-MM-DD`` → UTC midnight unix timestamp (or None).""" + if not isoDate: + return None + try: + return _dt.strptime(isoDate.strip()[:10], "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() + except (ValueError, AttributeError): + return None + + +def _tsToIso(ts) -> Optional[str]: + """Unix timestamp → ``YYYY-MM-DD`` (or None).""" + if ts is None: + return None + try: + return _dt.fromtimestamp(float(ts), tz=_tz.utc).strftime("%Y-%m-%d") + except (ValueError, TypeError, OSError): + return None + + _NAME_NORMALIZE_RE = re.compile(r"[^a-z0-9]+") _ENTITY_TO_MODEL = { "contact": "TrusteeDataContact", @@ -224,7 +245,9 @@ def _deriveRentForContact( if not entries or not lines: return [], None - fromDate, toDate = _parsePeriod(period) + fromDateStr, toDateStr = _parsePeriod(period) + fromTs = _isoToTs(fromDateStr) + toTs = _isoToTs(toDateStr) accountMatcher = _accountMatcher(accountPattern) nameKey = _normalizeText(contact.get("name") or "") contactNumber = (contact.get("contactNumber") or "").strip() @@ -236,10 +259,10 @@ def _deriveRentForContact( eid = e.get("id") if not eid: continue - bDate = e.get("bookingDate") or "" - if fromDate and bDate and bDate < fromDate: + bDate = e.get("bookingDate") + if fromTs is not None and bDate is not None and float(bDate) < fromTs: continue - if toDate and bDate and bDate > toDate: + if toTs is not None and bDate is not None and float(bDate) > toTs + 86399: continue descKey = _normalizeText(" ".join([e.get("description") or "", e.get("reference") or ""])) if (nameKey and nameKey in descKey) or (contactNumber and contactNumber in (e.get("reference") or "")): @@ -260,7 +283,7 @@ def _deriveRentForContact( amount = credit - debit e = entryById.get(ln.get("journalEntryId"), {}) rentLines.append({ - "date": e.get("bookingDate"), + "date": _tsToIso(e.get("bookingDate")), "ref": e.get("reference"), "account": accountNo, "amount": round(amount, 2), diff --git a/modules/workflows/methods/methodTrustee/actions/refreshAccountingData.py b/modules/workflows/methods/methodTrustee/actions/refreshAccountingData.py index 0082336a..6ff5641c 100644 --- a/modules/workflows/methods/methodTrustee/actions/refreshAccountingData.py +++ b/modules/workflows/methods/methodTrustee/actions/refreshAccountingData.py @@ -8,12 +8,33 @@ Checks lastSyncAt to avoid redundant imports unless forceRefresh is set. import json import logging import time -from typing import Dict, Any +from datetime import datetime as _dt, timezone as _tz +from typing import Dict, Any, Optional from modules.datamodels.datamodelChat import ActionResult logger = logging.getLogger(__name__) + +def _isoToTs(isoDate: Optional[str]) -> Optional[float]: + """``YYYY-MM-DD`` → UTC midnight unix timestamp (or None).""" + if not isoDate: + return None + try: + return _dt.strptime(isoDate.strip()[:10], "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() + except (ValueError, AttributeError): + return None + + +def _tsToIso(ts) -> Optional[str]: + """Unix timestamp → ``YYYY-MM-DD`` (or None).""" + if ts is None: + return None + try: + return _dt.fromtimestamp(float(ts), tz=_tz.utc).strftime("%Y-%m-%d") + except (ValueError, TypeError, OSError): + return None + _SYNC_THRESHOLD_SECONDS = 3600 @@ -147,16 +168,18 @@ def _exportAccountingData(trusteeInterface, featureInstanceId: str, dateFrom: st }) entries = trusteeInterface.db.getRecordset(TrusteeDataJournalEntry, recordFilter=baseFilter) or [] + fromTs = _isoToTs(dateFrom) + toTs = _isoToTs(dateTo) entryMap = {} for e in entries: eid = e.get("id", "") - bDate = e.get("bookingDate", "") - if dateFrom and bDate and bDate < dateFrom: + bDate = e.get("bookingDate") + if fromTs is not None and bDate is not None and float(bDate) < fromTs: continue - if dateTo and bDate and bDate > dateTo: + if toTs is not None and bDate is not None and float(bDate) > toTs + 86399: continue entryMap[eid] = { - "date": bDate, + "date": _tsToIso(bDate), "ref": e.get("reference", ""), "desc": e.get("description", ""), "amount": e.get("totalAmount", 0), diff --git a/modules/workflows/processing/modes/modeDynamic.py b/modules/workflows/processing/modes/modeDynamic.py index 49e19705..b31568a2 100644 --- a/modules/workflows/processing/modes/modeDynamic.py +++ b/modules/workflows/processing/modes/modeDynamic.py @@ -744,8 +744,8 @@ class DynamicMode(BaseMode): name=name if name != 'Unknown' else 'Unknown Document', mimeType=mimeType if mimeType and mimeType != 'Unknown' else None, size=str(size) if size and size != 'Unknown' else None, - created=str(created) if created and created != 'Unknown' else None, - modified=str(modified) if modified and modified != 'Unknown' else None, + created=float(created) if created is not None and created != 'Unknown' else None, + modified=float(modified) if modified is not None and modified != 'Unknown' else None, typeGroup=str(typeGroup) if typeGroup and typeGroup != 'Unknown' else None, documentId=str(documentId) if documentId and documentId != 'Unknown' else None, reference=str(reference) if reference and reference != 'Unknown' else None, diff --git a/tests/unit/features/trustee/test_accountingDataSync_balances.py b/tests/unit/features/trustee/test_accountingDataSync_balances.py index 517318c9..711c9808 100644 --- a/tests/unit/features/trustee/test_accountingDataSync_balances.py +++ b/tests/unit/features/trustee/test_accountingDataSync_balances.py @@ -9,11 +9,17 @@ These tests exercise pure-logic paths -- no DB, no HTTP. We pass a would have been written to ``TrusteeDataAccountBalance``. """ +from datetime import datetime, timezone from typing import Any, Dict, List, Type from unittest.mock import MagicMock import pytest + +def _ts(isoDate: str) -> float: + """Convert ``YYYY-MM-DD`` to UTC midnight unix timestamp for test fixtures.""" + return datetime.strptime(isoDate, "%Y-%m-%d").replace(tzinfo=timezone.utc).timestamp() + from modules.features.trustee.accounting.accountingConnectorBase import AccountingPeriodBalance from modules.features.trustee.accounting.accountingDataSync import ( AccountingDataSync, @@ -124,6 +130,45 @@ class TestPersistBalancesConnectorPath: assert row["mandateId"] == "m-1" + def test_connectorBalancesEnrichedWithJournalMovements(self): + """When connector provides closingBalance but no debit/credit (e.g. RMA /gl/saldo), + the sync should enrich from journal lines.""" + entries = [ + {"id": "e1", "bookingDate": _ts("2025-06-15")}, + {"id": "e2", "bookingDate": _ts("2025-06-20")}, + ] + lines = [ + {"journalEntryId": "e1", "accountNumber": "1020", "debitAmount": 500.0, "creditAmount": 0.0}, + {"journalEntryId": "e2", "accountNumber": "1020", "debitAmount": 0.0, "creditAmount": 200.0}, + ] + db = _FakeDb(entries, lines) + sync = AccountingDataSync(_FakeInterface(db)) + + connectorRows = [ + AccountingPeriodBalance( + accountNumber="1020", periodYear=2025, periodMonth=6, + openingBalance=10000.0, closingBalance=10300.0, currency="CHF", + ), + AccountingPeriodBalance( + accountNumber="1020", periodYear=2025, periodMonth=0, + openingBalance=10000.0, closingBalance=10300.0, currency="CHF", + ), + ] + + sync._persistBalances( + "fi-1", "m-1", + _FakeJournalEntry, _FakeJournalLine, _FakeBalance, + connectorRows, "connector", + ) + + byPeriod = {(r["accountNumber"], r["periodMonth"]): r for r in db.createdRows} + assert byPeriod[("1020", 6)]["closingBalance"] == 10300.0 + assert byPeriod[("1020", 6)]["debitTotal"] == 500.0 + assert byPeriod[("1020", 6)]["creditTotal"] == 200.0 + assert byPeriod[("1020", 0)]["debitTotal"] == 500.0 + assert byPeriod[("1020", 0)]["creditTotal"] == 200.0 + + class TestLocalFallbackCumulative: """Replicates the BuHa SoHa scenario WITHOUT prior-year journal data: the local fallback can't recreate the prior-year carry-over (by design), @@ -134,9 +179,9 @@ class TestLocalFallbackCumulative: def test_balanceSheetAccount_cumulatesAcrossMonths(self): entries = [ - {"id": "e1", "bookingDate": "2025-01-15"}, - {"id": "e2", "bookingDate": "2025-02-10"}, - {"id": "e3", "bookingDate": "2025-12-20"}, + {"id": "e1", "bookingDate": _ts("2025-01-15")}, + {"id": "e2", "bookingDate": _ts("2025-02-10")}, + {"id": "e3", "bookingDate": _ts("2025-12-20")}, ] lines = [ {"journalEntryId": "e1", "accountNumber": "1020", "debitAmount": 1000.0, "creditAmount": 0.0}, @@ -163,9 +208,9 @@ class TestLocalFallbackCumulative: def test_incomeStatementAccount_resetsAtFiscalYearStart(self): entries = [ - {"id": "e1", "bookingDate": "2024-12-31"}, - {"id": "e2", "bookingDate": "2025-06-15"}, - {"id": "e3", "bookingDate": "2025-07-10"}, + {"id": "e1", "bookingDate": _ts("2024-12-31")}, + {"id": "e2", "bookingDate": _ts("2025-06-15")}, + {"id": "e3", "bookingDate": _ts("2025-07-10")}, ] lines = [ {"journalEntryId": "e1", "accountNumber": "6000", "debitAmount": 99999.99, "creditAmount": 0.0}, From 60d50622048adb58f2266fb61abb43185cc86d58 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sun, 26 Apr 2026 18:13:11 +0200 Subject: [PATCH 6/7] fix model registration race locker --- modules/aicore/aicoreModelRegistry.py | 94 +++++++++++++++------------ 1 file changed, 54 insertions(+), 40 deletions(-) diff --git a/modules/aicore/aicoreModelRegistry.py b/modules/aicore/aicoreModelRegistry.py index f05745ac..1c50651d 100644 --- a/modules/aicore/aicoreModelRegistry.py +++ b/modules/aicore/aicoreModelRegistry.py @@ -38,6 +38,31 @@ class ModelRegistry: self._getAvailableModelsCache: Dict[Tuple[str, int], Tuple[List[AiModel], float]] = {} # (user_id, rbac_id) -> (models, ts) self._getAvailableModelsCacheTtl: float = 30.0 # seconds + def _addModelToDict(self, model: AiModel, connectorType: str, target: Dict[str, AiModel]): + """Add model to a dict, tolerating benign re-adds from the same connector.""" + if model.displayName in target: + existing = target[model.displayName] + if existing.name == model.name and existing.connectorType == model.connectorType: + logger.debug(f"Skipping duplicate model '{model.displayName}' from same connector {connectorType}") + return + raise ValueError( + f"displayName conflict '{model.displayName}': " + f"existing name='{existing.name}' (connector: {existing.connectorType}), " + f"new name='{model.name}' (connector: {connectorType})" + ) + + if TESTING_MAX_TOKENS_OVERRIDE is not None and model.maxTokens > TESTING_MAX_TOKENS_OVERRIDE: + originalMaxTokens = model.maxTokens + model.maxTokens = TESTING_MAX_TOKENS_OVERRIDE + logger.debug(f"TESTING: Overrode maxTokens for {model.displayName}: {originalMaxTokens} -> {TESTING_MAX_TOKENS_OVERRIDE}") + + target[model.displayName] = model + logger.debug(f"Registered model: {model.displayName} (name: {model.name}) from {connectorType}") + + def _addModel(self, model: AiModel, connectorType: str): + """Convenience wrapper for adding to self._models.""" + self._addModelToDict(model, connectorType, self._models) + def registerConnector(self, connector: BaseConnectorAi): """Register a connector and collect its models.""" connectorType = connector.getConnectorType() @@ -102,51 +127,40 @@ class ModelRegistry: self._connectorsInitialized = True def refreshModels(self, force: bool = False): - """Refresh models from all registered connectors.""" - import time - + """Refresh models from all registered connectors. Thread-safe via _refreshLock.""" self.ensureConnectorsRegistered() currentTime = time.time() - - # Check if refresh is needed - if (not force and - self._lastRefresh is not None and + + if (not force and + self._lastRefresh is not None and currentTime - self._lastRefresh < self._refreshInterval): return - - logger.info("Refreshing model registry...") - - # Clear existing models - self._models.clear() - - # Re-register all connectors - for connector in self._connectors.values(): - try: - connector.clearCache() # Clear connector cache - models = connector.getCachedModels() - for model in models: - # Validate displayName uniqueness - if model.displayName in self._models: - existingModel = self._models[model.displayName] - errorMsg = f"Duplicate displayName '{model.displayName}' detected! Existing model: displayName='{existingModel.displayName}', name='{existingModel.name}' (connector: {existingModel.connectorType}), New model: displayName='{model.displayName}', name='{model.name}' (connector: {connector.getConnectorType()}). displayName must be unique." - logger.error(errorMsg) - raise ValueError(errorMsg) - - # TODO TESTING: Override maxTokens if testing override is enabled - if TESTING_MAX_TOKENS_OVERRIDE is not None and model.maxTokens > TESTING_MAX_TOKENS_OVERRIDE: - originalMaxTokens = model.maxTokens - model.maxTokens = TESTING_MAX_TOKENS_OVERRIDE - logger.debug(f"TESTING: Overrode maxTokens for {model.displayName}: {originalMaxTokens} -> {TESTING_MAX_TOKENS_OVERRIDE}") - - # Use displayName as the key (must be unique) - self._models[model.displayName] = model - except Exception as e: - logger.error(f"Failed to refresh models from {connector.getConnectorType()}: {e}") - raise - - self._lastRefresh = currentTime - logger.info(f"Model registry refreshed: {len(self._models)} models available") + + if not self._refreshLock.acquire(blocking=False): + logger.debug("refreshModels already running in another thread, skipping") + return + + try: + logger.info("Refreshing model registry...") + newModels: Dict[str, AiModel] = {} + + for connector in self._connectors.values(): + connectorType = connector.getConnectorType() + try: + connector.clearCache() + models = connector.getCachedModels() + for model in models: + self._addModelToDict(model, connectorType, newModels) + except Exception as e: + logger.error(f"Failed to refresh models from {connectorType}: {e}") + raise + + self._models = newModels + self._lastRefresh = time.time() + logger.info(f"Model registry refreshed: {len(self._models)} models available") + finally: + self._refreshLock.release() def getModel(self, displayName: str) -> Optional[AiModel]: """Get a specific model by displayName (displayName must be unique).""" From d505ffd9cddbcb1d49a88b174cabbb66ef906937 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sun, 26 Apr 2026 22:53:44 +0200 Subject: [PATCH 7/7] Graph and data class falignment strict --- assets/fonts/NotoEmoji-Regular.ttf | Bin 0 -> 418804 bytes modules/aicore/aicorePluginOpenai.py | 11 +- modules/datamodels/datamodelAudit.py | 8 +- modules/datamodels/datamodelInvitation.py | 53 +- modules/datamodels/datamodelViews.py | 112 ++++ .../datamodelFeatureGraphicalEditor.py | 101 +++- .../interfaceFeatureGraphicalEditor.py | 4 + .../routeFeatureGraphicalEditor.py | 41 +- .../trustee/datamodelFeatureTrustee.py | 37 ++ .../features/trustee/routeFeatureTrustee.py | 129 ++-- modules/routes/routeAdminDatabaseHealth.py | 25 +- modules/routes/routeAudit.py | 9 +- modules/routes/routeDataFiles.py | 14 +- modules/routes/routeInvitations.py | 35 +- modules/routes/routeWorkflowDashboard.py | 554 ++++++++++++++++-- .../renderers/_pdfFontFallback.py | 145 +++++ .../renderers/rendererPdf.py | 6 +- modules/shared/attributeUtils.py | 55 ++ modules/shared/fkRegistry.py | 7 + modules/shared/i18nRegistry.py | 25 + modules/system/databaseHealth.py | 38 +- .../unit/services/test_renderer_pdf_smoke.py | 25 + 22 files changed, 1269 insertions(+), 165 deletions(-) create mode 100644 assets/fonts/NotoEmoji-Regular.ttf create mode 100644 modules/serviceCenter/services/serviceGeneration/renderers/_pdfFontFallback.py diff --git a/assets/fonts/NotoEmoji-Regular.ttf b/assets/fonts/NotoEmoji-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..19b7badf4afe64dae28cd64fc506b95291bb8b03 GIT binary patch literal 418804 zcmd43cX(7)_dmM!K4ogAm-J*(W`IBlsXz#j(0lK_BfS@qDk30ARm6&gj;K@{O_AO# zlu#8A8z@QLsl8nx?CbaQ3d0^H9gVk26$tC~4&#<-cd?u_TX`wj0is_vJ+Jwv45 zgiMYN>N9o}>F`new#Rexpkb2-&V8?7F|z7TYUDG62lVN08MW_eJi7?w4n}~ZSf}OZ z!hOo%;o~ORcIskr|1pvJ*RYZO`WW^RIP^?H{?~{1nKVk;q$J>Zb;M5`(P#L8=WlOM zCAHQ$BIWX^kz>cDoK5(I)FPx$SgEBXWJO zfqxrL#7^?%(i;9Qe~st!o%{{Hn7_qer;SL$8RwT+2{Y2?6o>FZe6R+MdA5d?&`r9@ z7x6`GG%ICXcF9g|!Dj;ZaF5hY>c(a=6C1&r@@)1!%VJ}2{|!rI*H|yMm~COpSr@jJ z-C<+db@q@Ap^w-xxsqIof)r#eSY@CKBru+SW`k)zo5nh_z4RTp$*1k?41Gu+vVA;@ zcF-N#&En}0&tes%IR2*`Dc43Ri$kCNXzADaJl>h@2me}b>uF00KhvqroCNZ=J9r88_9eac>A%h*}e z>EE9`mIoekDT7JOK~I-|YEuq%p@;MidyCa6|4e1eX#q`T1$2xt8pIaTL9~H|wPhZ5 znwGGY%tSj`HuEzpTY}I?qoxD_e+qPoT^6E8SojY&>gCqrgo(VvS;Y zb__DPO}E*bsKErp*}>Y~LsRhlV?%=gOE$44iDegA2A*fwGi(;MFTAE4|Xbu}o_Yty-+R-dlhlaBl)+NMQ zDt%2WXeHuIr#Uo+9i>-jC3_k*?o5YSKia^W&?WW`En_E9&muM#pOfq)!i(s4HiIpK zEV5982lOUwgcdZTp|lV(9tdrUL=8*8TO-P#1nl}@qsEQh{?*474JU(;mh))3a6`OpS+pxIT}EPB8)HQ-Z|y-8oQifEZ0 zXeTRg#d4rm9#%+$r~^IEU!cXbm&x=LI`jq9<$bC_7tvEyI#1{5Ds4x*g+E`>akSsJ z(2}heZaTSOmH7ZN6|8a!9hHF)M2!imSQ4W{R29NezAaFN6#F|qghQ_L;LAt zNHK_>rhPV}uCKwqeu5Nb(7r72zm~oV*#tr!-_i?c!5vgachO(pqTbLCiN>;d>;-(P zK-Ldf5kEr>z=;f9K24?Q1*hmeR*(8iF6i-Qd<1<#&+?gU3L6aVT*eB}g7c^g>kUaH zq8APhe`q6*K%W4OZ_)+UlEswP#VD|ChL~5Dtz?? zO8SD9@F-Y{tEA1|h=0lSI z{tTdw)EZiN27NmlnmGWUMrfH%kW32f;z2e>S_X@D1#NkOenNX5V=d?^8_8y|c;1F@ zfPMKCHNC)h@D2QJ>JGX7g4nmimIZCs{l9$XpbeHFFRhiwvb?f0)R~^eXAOUY{YYIn z!+mGEg;way`jnkQt8JioTFhQ$Z-;Eqa@d{r>>Y6O9!1j#$oVig(xs5(H0j;qiFBJM zuuf&yp@ofU5;aDNk8Rf^SpL6I!!~H=7a)(SRFzd{AEK-n$g`NPvkH)AHRgsc+=Kk< z!D7XNs|?7g%Rgm=+vy9IfRB?Ua4W5bhW*atSv6RG1wHj;Jc*=VSuDPnXxAY`GApd! z`;fZ!$Kj`FBcTh_h}%sh^H*^H4m=o@l5iD4iIj{!wTjl$=EPcwO%kv9l6*CMD}5W1 zW0G$s-%h@h@1SNS*iclr01^Z3^jJnEm|G3y^5+l6?1shr2`G$k$OfAPr5?v-6D`=RV~ z*{HI?W&N+5y>j$Q=H*+LFI|52^0-UN#p)MxE@oXAaDLkPap#Ag?|VM;e6>p*FLk)o z?o#98hs8IGzc0RAe5v?i@sZ+##oLP46|X8@R6M_UR`Jy0!Nq-wdlz>v?poZjxP5Wk z;?~72i(3>oFU~KnR-9CvSR7j%Q|vAF6dTW-Klk;yFVAg1TX=fR>0YNRpRRDK<*B0M zza0OWy%_d|LX!Od@0VF)&6HTq;LUQX5lw$1%E3Vs%OQa#mqYUkGk_WZd`M_Y7&g%L z<*{Zq}9qnef?T}n}Y5FcH_D^ z=zicUT(<;05r$^X{NIH^Z9snw16~;U{{kRW_*Ue<7X~~B@;?d#BM|w6VNfTc01pEr z00opVV0S5?2h7N)D=6Aq1N8!hu4tg%ppc;k>H`Wr&_I1bD}(``kOEl%mMqJvGo%pr}Itywea+jaPt%gKBvJ7>g+I60ic-BS9fc4KxPy z!!T$p=$~FPzdKUk&sOD9YCW&y_MV!k}rO z1!2HbrA(Bsfo6cBA84SNM4~JVjP&fb90C#S&+U^T5G^A1=O|z_;x7iB1|UCL3W|6D z!AoU-E&*P}^-G{P0OX5N8pA4p_JcRZ{)!HP=rho&ArR(m?5|894{>&ZRtIWg-1`Z% zbqK_N1#J@o!6#yOU}5fP?YzCz22B7Gk+3xT9a(B?oZg#QJKx&tKTf}-ws&{r`dVRxPhfh3K`8Nf0K z!w0$w*pKTR&;ub5>k4`>1mgEV4~0Oi8|dK>h--Ns34vI5(C>k3$fpPB^$>{v4f-Q+ z8{vJ3-37U80B@+rT_a$^HRN>H44^I8a8R@bKoaD17i|HMR0xW;xC{BSQJ|1NKvEGX ziR0}7emML(75fqH?82p*9>$AuoKr0K|cn5!?hmt zCU6VaC7{2DK)e>{A0ZGAg5CxoJYE2b_5g?rP_)6_zj3YAL(8`m*Y%0vF}sICr6!>8 z3IXDcL1jRbKlJ4udBf`9@ImcYvhvpgAEB{{S>M1d@7!qW|2l zi})XcLSOHr@AFSVXM{lfE6|z1EaWo*^jY9J#5n+}wZ|NU_X2%B1mcH5=Y~L1Z_s%m z5I+JsA9w-rwfZdxf%s9-g#c{0q}6p%2*i(pE)IdDKA>7#!p`vHpvyxbsW0eDArLH+z|rtE1)}pj}dMJ{RG&J ze7*33@f(iSNdaE#_fr9 zTi8%zV_+Jv5jY259Qm3ufO)`GB8v}hu!a`EWB~HBeoN%oOXNg+7p`4@5V_G;+&MsN zU_Jo3xlaO*;5wtj=EO= z4ATVgnU8)@s}MkY7l6mwdx+|yyt;jf3TFU2i0W-1YRKTOqRb{Jrzzyn47Re_hj3NT z!bR;()C&EkZ9Ss)XtVa0h&sGS)X@c?UY+1ib^4B|b9G=BfUqt@fu(RW(f_+80J!hA z9ykU3P1HRO=m5M<)T24@1yN7L?fEra(A7l!n!vSumuLXW9oP@<OcuSH2$Yp6W0Bx`ga$5%7cu65z(V1vvE27u-5xvog=uMmt zC3@FL^j-k~el}GgdOw2bL*)4p+I1Ubxig38(@}5%n-cAT%=b-&dk22MNhJEt0`vfm z5}j@VK<;Ny-?ImZiaWy9+KGk6`T+92QbcqW`f%-Oq8}lPUy%0Z38Fu06WwkA*XlUY z-Q7g@J|_AbVGnYN9-4_BftQjcM5SnZ0sF4`u`SJ7Km_@A7MPf}+W;4WZahF(Y*xokRiM4%1tbG?^9T2zkQn+w;i1i)< zcdQw)zE2bD4_*hx5gXK;*kC*xiufa8L&t3*HW6h`IZ13f(#}M^p6f_#F6y-CSz=2M z5?eNk*ou#cy#c$u>K9_~tRuD>wsIrVeNc_qhc6S`3VydeAhu%$wyyRQ+clNgm&oHQ zi~|Qy--8(2j$v#${*c(IH;H|Hg4k))p?CqY3x5#1e4E(!KN7o&y4;8)_R}Os82PsBU-B;Kte@g66M_sS*S`#a)&=MeAzA@M<-;Cf9VJ`80JN1h|oh>u2H zCV-F0sK?XDdn)QUqXqGqZxesE7xCHPea=h7pT9$V-Ui|ekoQ9Hx#$_~g)7h; z!}v6k)RrXadyr%qN|H5^BxgKHp4Un8KEyQR99;OlBqg07DWxG?%WEWI43R32CMkUa zrggZ_Gy;1_%0_t3dQ73dC8;LD>LZWFl}KtjpQL8n;XYp=sl_WKwfcdiHgn4mpQS{Oyr zqB$fjzDUwibenSZqfmVN!t7)NgttZTR$Udr%KW%SX=%Ke185SNqb{R`T}y@UxlQDACYtv&yIdf z(s86cH3=vq=?v<60rJ0OBk6JpN#BG2tJ6rj-h!kbi%7bWP0~+(U=~TgtR?9;l=lb9 z|0|26JK*VVPm=Bf54w}|ur*1KmXcH&ASrl?WU`Zt)q0uzNizIFS*}L1a*bs56_RzW zNH$a_*@!UX2PDInkt4d0Y*|XO^-YrP6-jnjNp>zE+4UUB?iWe+O29pmquwDo`YV!S zyOJFDfaHWXNcL?bIjMx?lqw{rB2Pc^57Z^O;z*J!y-jl3d6KIjJRRZbzmuG?kmPEO zNX~3Wa@H)8v)?B<=XsK=&m_48;&5k z@fMPsqWl&prxkc?bBN^jc9J`GBDpho>avRDt|+%hDan0MPQOVc_rFW>!2Ki-noIIv z@b(ne{)ZeOdFW1(haDz)_$87@d`j|2+>hE#^5|V8k9n8mv8eC3Z%Lkj_>*!-o(x$% zy@BK@>qveE`A)q>^7H`7Gy0P}3o>}N0m-vbr{~@yc@B7(t0#HhBa-K%?hDFDUU-w_ zMPo=_yoBVXsU$B)-YZs*Gb-hvNsJR zdGi+}f7FEJEz3yW1{rUEk>njmNd6dY{29_i2j$)1d+%bB_u=}Bm*MhWBzZsNb^tsb zu1)e$3(3b2A9hAQ3E7>5e7=5#L~3 zK855TQ06tr>qa$_f6gNL7YE6|;@M5;%I~P#Z5PRZen9da)c@`olEns6=yg)C6R*f~ zNzvUT#k_(P%M4QNElKevlM*wJlsM4%E~F%GCdIdvl%xZsq~bc@Bc1{~KXhBNm7o=oAO-k+(QmTJQO8#>HKkhZ~hq%=nUO)rqrB8ik%iKMi# zlG3)Al=h62j(?NVrIeIz7E-!TA*II|Qu-VvrEfP<`VS;!;C)gCPaqBtql}j#NmyxrApOl@i zlJe;>Qug*DWk13Xb|wX5hjI*ZIgUD?>O{&n!$|qAGVl{Ar~8p|raEwelygT&IS<}0 zTqEV;%cNX-o0KbEN%`STQm*|-%8fKqejZQCuOE|gOGnBd$nQ_oJ zgp*VYo>^s5ZJkNAuO!v+3#sl5Qa$~&^FHuNz7=Xsym3Ylj1LZXLUctXnPGz^0|FdvNMDnpVJ3qNg=oyZq;KQ*>ur;^x?5$ z^r)|fiZ|KwUBq1W>Zl{5+0wCJjb(#M4zfj^#2D!lQRnmE4&GG5=WO5SyjAcc4WElT zY#$h0F3#$ftJle-ZG8~gZRAS~edjvtHlCcs?KVf2bWVJZjs1Gqylo)95T70UY}b)P zJ9iuwXK|q7J~y*R!CN9C_?OUq$7Zt+*oSPUm?c_>Rsw#Uv_{-fXF>)x)GOJZotu}J zo2RN)9$;DO%)f8~h%;}pqs!6RJmQ47BVVoaeC_E4Rilz^t=6n(D~a)U#UHxo)XNYJ zM3}>Ja~(O^{^TT87syS?N+h>SRdvkm(>+%b&%SSW`MJ$6&HAS2f_W_YCG2Bo_g;TS zOqV_sf1KvVE~QLb@%omh+P?erXmR=8oV&Mgh=K4i`D+lvLiH+DL=kR}#|<$gr37+3 za#m(;PIihfIVW`S?^2aF@05z!;`NurgYgrYXU5Hk;*lubv7`9T=dUimGGN*ymb(t8 zMhdP-M7vnM+yW?Eg_KhgP!N<JjCh0X zWZRbP-uS_9m$q#>pkB3_zO&ii7MsQKiDSg);<`8_RH4eE$R8)_X|#nQ@>&Y z!55>$y#i%Z(U4FUpPNZ9OH#?-iaw}^9{zFdz=OM3CVC6(&V$-}As5U8(Zf9sw@cIg z97x;m&&j5&Of&=<-yaC5;MkLe)edj27NFHY*LdyTOGm|eaTVNd-+f@qVpjX!BUV#P zKe6RV#Z4c6@rgK4cfx{g^EbV+WTQOrk3QqXgE#MsuUTAh19S8o^I-7>CJXWXqjx`g zciP@Q&bHsJcw>`XGQD3>yB*`^EnPAnGJPFPE`Us|b(E&Z@C5hBI;n;K3>!Rckn)m##!Eim{Qk|0+c)nQXKjYlb~_GF z?HDr=F`{vFXOyTU`UXD;ek#9n`pUwuS^6zZJ8%rCzu_6RKe)hR0L%2a(chANc6=o# z`E%?(eC1^4X0dQUK$1caK+9t|p#fQ$UZ+zd9byTcvj@*^FmQ){fj_{L!tP`dV>c$5XYx#dN`BEe&YtrD7{?EG0gZ!(w8k zLE$tQITy0YVztQo3|439IjMrvXu(P$7vC9D)xWXjo1kr3>Ny%&vv68tlikl#G(Ckz zqN}Gka+u2tb5br}u6<3pWE~;Zlg`UQXpT#v1DemB`#&w(}va#=(uYaj% z9xN5jf`V-;4YI94(U85dYlQe-%)^16#5u+7R2*1TRWO`|HaymaJIIO)lhtbC_76%HP1;r1t4Grl zs%pTf7mHfQBfSHkYYZ|#$j_y0J8InwwbsPAy)^ixYBbqRY=L-A%SB#qvOnB3?R4h3 zfsLB?)rl=KCz)R=^XQslmJk6|t)T6Av!ooUydG#m9;zj@RZ~`w|HC6G%hphDWOMY! z`m)70azu~5!-x0fMXIqpv;nt14rMni()Wr{r!P&kh+PBv^d2<0SO31Oo?iVg!DyF~ zGN-HO4%y1hEJ`K<561@{Ku zX3l{ZdsZDWvPZ9xL;9DtK@IeJHdk+KXw=6J8{NIvh`|HckPeeJt{&RFVcVii#+x_1 z{?(KnANB0n;E2ZkfW8Czu=+Z1ubND1X`-qVpAYKQYw)1n{raIJ8O8U8c=aySDn>h- zp_zgw)y3(8#z1kPNbb~t55@@k;?-U2Lar{%#>y-9g3BEg|-P05DGPoInT z4MTO^LyDe^b6NjW(K(@W)8o!fIV{Vb>`*nc1swqUhN0pNbnJ{ct$njfoZ}c;B^;2f zv$mFEs#T|;c79!UzC4A*(n9Id!Z~40?X%J{5{-nG zwv_EEW4h9P()Ts1=T*UIRe5iXxQ>fW2>2e62P5)cWlbouWRP`xpLndSDLKJngP*hZya;D5 z|CMuUDkATD@_F+$EEe(FBYt8i<7UWswU%+J6I={_;&HPlIl_odB4`MxO|)}6EHTAy zOM&|XJFU5;AwkKz*;Y18EE6qDZ%Izxh$jWl2X~2ggKzOZtmg@w+T$l!Q!!DD6eIBc z3){u^2>1p6$~YwU$C>&PWxGxGsRwwU;9JOOSMWSf;*BI{=`GPxEMvn^U{4k!3Pn8_ z+<7={yM)aXJ5Hd1gD1$!)`;KqGu0qCPN7_!p=pWpCIfMPV;W9RzKnBuo9RMwU~lwO~=Ev__5tUI-zbv}B4 zb^q#!Si7HBED({+vZ}QZS8K6Ei_h-#Ssc+GS8S}y70U-jN4uPHaZa%)&gqJYb=nhx zD`R626zgIRErte>8E5xIC)n+A9dL^juGs2vksfDUjGINrIuQx!SkdF?EETC7S`J8~ zrE|tfFNkm67Qc2CZ?Qfj*^rY**}(CvpLnaM`0Y*JdY@Q*biY`C>k=C^)?;E^;`eBeJppmz#5b|AS}NC21Z$6;L+aQlw=-@{h|yRa;$$V_oW8PdG!A_= zq}D_V<{oH9&8fmf$>B)R#$G=UXddZ5BQmB;nnUVwFpH$~ah`%1N^WkR&*Si508w>H zJoow#kf%FZDX&TPqr=%vax1A@S=HFHpNy6&{z!QRS>AjbN({;(~bpt?JeWUDt`zY-`>2<9}Gq%&giglgG?^C;0hhQMPgZn5_0G z9qSDH?Cm{27bL?R-lXP+e5HeC9GoG0$Zkq^WRZ$6R^?yBE3p$XbbJ8BXkA_n)T@P z9vQO=u5)@cuQEAR2{)@#YfVQ=UQRORCs-B#w+EUURsmfgG!4nkvuE+glMeV_Pj<#9 zJNT2Gm!32Tq0SpJChq^Qi~!|^S%Vu~ZfjsAhN7k(;@tO^rUt-g)G-J9J{J?SX+RyQ zS#&IF$i2DO*}^|9Q>RWXz0CTZKqZfy6mJ|o$p#!AX|0`<+vsks`!JD7HS9bt*bwF+ zJ{I+h$G2vGxMs|}-WazvL9-f>9&sba?TFQQkBp3WxT9j+ViW9&JpuOT_ZXKgG2ZG- z5GTBGPETa4Ylvockj)&;Df`#G||^4T_$XVV+7wu|7>V0_LeuJ8Sv>&O9l;^pI_GIDHO>48taKD0j#-VNnC~ zYQI0=Q)RbZcQ`GtarUDlnw`?^WNFc-KOHSs%x#jb)#p<%Bp?L3YxtqHg>jo&zzVVas2zL;z~7R*j!a9G~ytAs}*^! zhTYb&tRIYpRWh44&aPZvFD{8quUB`}@3BUlx$sf&ofk$=UWs%3cJY_^dS;h`iLZ$x ziRoh-Hy)eLE7YHy5o1Y?47|1|c`hDlw8fVF1uYwE6MBFLIo)?BNM= zt~R~8ArdgXx-Mccy^6r}Dh`v&tIUh(RSWo>EktNWh^eJ$7-42Ttfm_#3#+~|s?X4gkOyvHw;KC5Cg z7ms@3QRmPjJJTMiogeC?saO)3ApX>CQL$1S@`PD3V_61CwoW6+HGpsn!&0K^7m}5@BOQbS{_fDQ1hfZm=_ltT#Cr zpQej&7}+~cr?_s2u-i;}xdKNFRTaP64NDPZ5M2@PDamRuSfwF&bj@JWi9b};V$)JP zBlL5`4TI5WV2MhE%@&ar95_db3l{1L8;UaG1r4ACK??r%A_IY^XSsv!~8t zH%#ZY2y`^@yG1gHlNyUC#c~RT20d6WG25k3Da#G^UyxWXFDzQpnH{W$J_2h{(av+ph6L(0~VTp&o zle8v+bi-bK`8Oo3Er;R(zO^J8(P(lK9J|n(V`g5a#}i(1dV=KWY5cl%~)f)7z9}yeTqQQ(NH3v7# zi)oqGctZT0zqEVZ_;Kr0{p6P`BxhD% z_B1=WWWh0Y(q@;vu=%Wk@>;RIPs5D*edJeKXLd{N^_inpex>w2^ZBb|E7xw`bBm>S zLsHm?vMl|l>h#duJdN@QCv#vRl9N)j`A-U#Ivno*X(peXygYf0Js3?mA$9ym8gBik zC8FiShoWUk3ET4UA=^?i{-T&&d|J%8dXY`Oc!f&6;i7)u_>y&wYHwy6?R? zc*rkbM0t~*IWl$9kr_#@SoZxBxoEi-vu7@z7SCR}D4xG~l|6m>LH~aB8}#YZpk6r@ zz0b66{as5}?DM_4E{O8Nf;BB`qralg!NQZam>l?Lofw8DykLyA5S>;BtSf8F$lB_% z{z_)m?~GAvM~z)AZoM~V?CMDKnW~wr(esC<`zL(Pmh$9~z5Nr4B zW?c?`#Bv7=WVs*i6lIL<{7@VbG8VE+cWlSHAIEVG_I+7PeIp!7jMH9lfnTVzW6Zbv zH78eR$E?JTQQxj@tfb_Hy~Y4SHKFJmRbSq<>+6 zQn2!tRrZGdt~wO2`Xu3%A{_LRQv%M==FWfC`mxpPBdwE#yCXJXG%fW?8t2#^K(7v& z<`k`yYTE&bC^ry2ugu!SMs95|zg~|~%>MaqW)r2NB1AW|Z-a(?`!;OY=g~IIRlY9! zmVNfcv%iavS2Ubo)N8o7{rN8OJ0IV-e#1U}8aC+DuWLW1Z`R&ruYG*HmdEYghs0;Q z`P}~f>lFal6HBX4|bLt>3R7&f2gs;+TFr#>EV(1uqTV zMeAkd+hlHUyj0FSv=2HM0KxKy3yTk^Mjky;BmMT#bN82JFW9np_HM=lookARAAc$y zG5vO%jj>@tyo}p*PwVFCfs#+U)T!I0OWitMerd*%zn=mZYuGC**YBPkm$qo-{=uJ! zhs?NRC)3p&?2inxA!4n#eNMz*?msOOqiDB+I^DYAI1>g!!a#V1s$=1V;2UPy(a*8p zqQh!vc;zs}+drL1y9nvZe=Mlm#+m)`j-dJ=Wn!nLCni{oUq0wQ<77uYFHEaaD9#tA zrPmSXe_!2XR!+{WCM$ooW#{j_ct)HVHlna{mAd`CvEqDwXQz2+TjovcRxN8ln)F$E zojU2I>!nVQ>6%*^l`ChQdd+%yJJ!By@gIU)$FTTMs@ATBdVEvXSKmQ>pAs=E#8QtB z8jYSF>T1w^pIwu>X7x0wt2!q_^c{lDXRZ}x+qQ|awKIave=ltK1#5c{>yIBcF8uxB zR9;aOv&_mj7Hne-v0nT!O`KqX?q?@_$=Vzgt;OjHXS<7D;ylBtI+iGYGU(N3Lmkcz zQ;8MV|2w&b7V@*~OxJtF`umNtvKoo=jWV_GyPKNN&&!?La?`y=)kd&&;wR?5w^3@8 z)u>Tc=?rN`X5+@08r^g^yK2?!`x{C>$!yF#qBA(SR<=>!K#d6P@uGH&=@<(zym}88 zQQtt^3@&BT7g$qW{Uz%rf?qO;j|eVRBT7Eks1_iL_wb0x`$UkBU@pkC7goy;t7C9H z8iz1bAWT2@;gap>cK@oK{U380G9%=EvV-amiysa)n^UvaAM-oUn9;e*3~_5l=fOMb z)!#nk2usQsQu7dY9BxhN+-1s?E}f_RE`F$#I(aE`&FCyW-+9LLF2Sn&>&`Qnt8K%E zZN=py;)XX${BWq#w5gptPMz8jJT9i5`d5_Rp+OEOJ?*-HlB%TY0(N{W@K>I^P1U_# zs=Ft4iQU-1+;z{y2pj74@h*9t{}zfj^yj}X_8Qk;#@~87%lYWal`C*&_82$d;>rd) z%lklk1leyXYp0)}&Ly0^Wmp69`mkQ)!(yW^*P)#@(C)NQ?M@5TPY@s4Mz#~5H|=c} zaS^?nirp>8+Qc4KXdBs<6}9Xh!LFIRw`BD@54Ez^yz4PymnD&i;Qm@UHjDz`^3#(*|NPE_@NL*QJ2L|63fN3 zkN1e1HFH*qpiwa?R*=roV@9~+C`6ahnB%9`s>s!J#{>{5djkN3ax zg0awBJ}_fs)PXIG%qPXIF9vU~>#iAN4e?ah5|vUIUAS$?UU7@LcPAx!qL^#n9_C8- zCGKTz)?eK0(0I`L>WQ*ZvAf;&h@u@68h2o>y?e2uTXjI*9_ALe_6_T$ikq6a)|UOG zpP`(`P7s_T3Om;UKf!1}=^o5DD#m?VRc|uusvZ_2PBF`_IX7~v|M=W)W|sPiC1UyX zSXGUkh`qqEAAH8FFHL@Csrd6VWPb;1(mCom?MR0QxnIz}Q&KTDGb2FmkonTP(Iw*`>#?VT*I}u+m358kSN|ne zIQiKF!wg1?GISIc{fE7FEti}1dIi7cPc0C+qJJmmj|1ftVfcyb^#t;oqv=Q)m$ zY(tAGVWE56OjjO;$C!FxJ_wm>F|-9^O$*90oc30(yX?@bJ3BBMiq+auNUC>=@*;cWKG0MogA5(kQbYz zui|hz>RDr|nAx-O-ir0+*H6l*S~TfI_{gYfE-f zE6;eB-IF*s;?RK%gW`&9@WNOoxr}LzhSlg-dMjq$vDO{=uBbPjcH0bAZ-Or- z!mR2Q!^lcek>ds=#`+T4aED?0I3E_79*VE^Yt(OXrbgRTqSQP!QZv^6RVOQ>N^fY>vTmKA1do zwaZs0Hkr-L^cM`Q&?bG|;*RZ_H*7QP?T`8lONy@6Y}lx_@n*%q;~O3uT*s^=R&Q71 z#U`beXS;NJc4m*xvp<%<7B^VKtA|)lQsJ}$&wEcj{lUCJsx01Kvv!egDz>PfU0!`j z`#@C7?1YqT)mqV_T6#9liZ2*fJ?=zho|cr|TzxcaR=4i6W_8E-Iz;@g->t63>&?%i z8=!vL%ves*R1KG?a9H7>aq)0wLykENXl@+|dah|X9%xr#mDltVB7h;$#1Xo|X1nWg zrwaEDWUHk^Tu1oyPbgDQmVS4VBi#{c8uZ~!@j#Sa-&89z5Q&f^pW9)s7?sefdBdE@ z1SU_akuB@uWd7d#jNTIqJ)XE8gHOKr;)!RLNV43#Z`bUpgBvR>MyF)gsQYGhPi$6n zOjP{LKJm4>m%iAtW%cR+gKu?m|Aef{gVzk{FtKWK?5G-56H=;W)paE5?3v7M+G$rZXKRE)4@yxDW+2b*V1TT-Whh=d=WL z=*i;=h7tPY$iF}GreKjrA8YkFq(!B*OKVHR6kS!l#7v4VLsdM@8y6KdZ_?lci!CzA z7lpO)EQ>Yn!Efr!htqkAPU5Q-RVN3#NxBOSi)uA%SXi$CMzLe!s=hmp{b}bRwU!Hw zVj&a5LTgoEFWjO!rf;DbjSt_Kbg=6ZKNzf{z#ncS>r~tZYf9XxSkzKgG8!a3CWJT$ zs57b3`S5)y7WU*y+@=WW@i9m!R#CW9x5siVeD00l6u zKc#4yYhy?t8M&mw-_#1#iZP^NJv&|tlW9nPdINDg|%&uc)ws|wW{tZtpfcE zeCQ%_-CYkq89&hFcSTo{tM!ZXnwiyZX4U)Vcub})=f-fmqu#s;5{OKE=BAqJPhsEn+@6h6^gEouwPo9aw3RsW>F(col|!^?3UvI@Vhte>V)$8qbrqzirNY3l-8vQD}Ikn@Uy~JuEV2P%KDm}?HzX4wfM;a?b{FSy94d} z2|q#P!*pZ-dmcZuwi`e8c9%PNHgC^I@)!A9e(-E6(^Ljp5*o8Yx*5LykD_KGPpz+ea02piETj*pn)&u>Ps*Q% z9>OOIWgmcgYtNv(aGkVA9vPl~a#HzXEFNjf92yjKAw>*K+w!{!d2GBoMMh5b!@S zLOZ}=@rJ{~b$4R~44Ip-l<|vB;X=dm2|o&n<)1`(sJh3k3)Mflyz7=1Q!Y*~YUD28 zQTQ*MXl+(rU2VNVOB>dNa1oGJcrel0Llaj{IZKd3xB#!?v82P#Fv{>8S|mj5Fi6!3AO{%~u<7X6A&{FC?*-#1}}ZeV}SjqlhAdYuw$ z*Sqy5n*;q+wVCXd^%jp+Z!uu_a9b+q9Co8A)#No8OnSXpGT9y0$QTI+6V>O$+Rq0K z&^Zjc>Rh&(3=syK&uXY_!vjFKj+%WJLZw!k2wAbnI4 zPfEGRJ}eDhN~wmTt(5`>J9dOro6!>~`b9+8P<|zg#pbsdOlGUun(MJw_DCr)X%RZr z8e!6zQ(Yz$V=(IMMzdaLu+5H2HLBcdibzW}Sj;jeRTiVuBch@z8FaGEsA5PrCV5R( zgTrFu*i8m#OSEdT@|ZY&%4#sWovwIYlw8GXH|V8oo7)qe9-MDA z*|SV;Z?fNRkZmzhk&5cETVz$6#ffvGTuH2KvfEQFa%@6X&XC5JX0$|D ztR`FJ{lsiI(-DTWII|UVS5>l^oKdACEDpUr!)$QseF;{(QBe$v-Q;zeql__QEDn<+ z+ZmZ_&@}5ihHf(~-igV~)7wXt#x-%R?OZGF&1pP?8{)^r|WF}V$%2&-OawB^Se z)!=KI`tr26Nc@bd#bCxVz7>lhd9oKXS)&16!2wxV^ieju&S0|Yjn*pt4DnWjUN$(Z zbBoE0nT=gF$%?^|kz~`0!)lDlh@$LzMQ?O~8kQYx)`Gu$#;lFj_G_nv2WE3`LbB zgCkq98cnegQBH%&Q8n47U!|`s>oA)%DT%0-$6#|okx_VlqCqj3%@OAGWX>%NCzh>P z9+1t2E|zLBMnpt;>?TMChtqk6F}NBHW-;1|oQe*L$SWmE+$2rJmb)cFiL#>uA!fyF zwNz1^xU^*kv{te+&i#J;G=;)ih}ZNNbb6c{8c0vm9Q;fYe!C&G!TNZ$1Dmst4HmpB zZE(^yU$xOiGi2Jn4a^=~w7dW|ay5JB%?riC3=mcq_E(!CVpN26)FwCOr`rFugH}FX zTD# zHL0SQDz>iZO^>pe=AL;SwC?V?ucwYyB&+IOW8vu82(D&J_4V{s84Ek4rDna-`Q>wVn=4jV)g9Gq z$%Muk(G_D;8hSd_PG}Qdv9Wzx=YqDa>Y5u&sPNi|+1=DcOHsYI2NjK|U%68MW({Z5 zZRe@rt=mI6!c7KMI$^dQGMZ(Rsk36RsZE&$1MgQRN!g;0(Ad|Tc}<)S;S)KSm&WR> z2DQIzDed`=Wz2GaQbz8g@$B&YNyp3sX6R}+GuCT)_T`E#Q=;;H26@#}*}WpNdpA$1 z{^Ga_Pq7YD;};fnpT%DqS-EE8c3UEP)Wr<5MOlXaf?7b>d}7+m4LBYSFU6kv#5J5V zZCNwCcdN-*+s;vdkPoA|J9%M+Nb;+-_naj@$I#&cMnldyZmzvH>!*^$jD6kF8`GdtN+G) zwkSBMC%;j8RFR6Zy=nH-wN`Za=$3@#kuGHkSmb^Q?z{7Ux#!whV=QBr;V61p_?s~t zW7q!hef3cbR;)dGGB6x%7n>}e$03!);<)}5-N!iPSco0dCir>1?$jIe(;*NRs;qT_ zq^kU%Hvqz&AT--#=qpY;UYX0ua%Z`d?V;$BI}03Tgdt$7ECi9DX#dQfuq~~roMv*61A=kv8*QOoX|r~%T|(EP zgFvAO+U!OXb7)-)2k~@K$(fnSuKv&U>y;cWhDWCq#rK=l-|MSfEg6o>F{v)f&DJ>Ho6O!wTJUssw`tIiTPs^IC>y$W>0qP^}y zx81HwcBK@=d16>hbbg=r#}p2a#)yrV6=(msd=}F^R%h7N5-?S+`5!6LmIBr-XFn{YOSxQVS7V}q|;bF_VL9>~G(CHNGF!62$qVBx|n>dy~aP9TeNvGZ{ z*_Ld}a&NLNH*9dj#xw&i^lmzs5^8{i(2_s`Nhkpl5=a8+g_j=EcxmrFNFjj$=^a7} zqyd(e-|R`Ujgwctzu#ZKmxp{h-Raby@bP5tM{3bi9f!g%{so zzt1iUe3V_%(Z}e$aW%@?`6e2^g93s6+&{SqQQ8Z-iUmZ$_sHP26oWH$b0ta z>h$D}chpA7Y7MSPNKucjpFVN!l(qh*4ao@(a~x}kABC1x&yuI%bNIbQ_v0P-!znk^ zd^CS}t6i^utbi$UUWaq=kwOn^aqRdEHd6OyB=^$bEi@ybnR1cGn)Tc52i7beI>?e> zMjIW8b^|5IC6d`G>M}-6UG*LI<5!D*K+X5x+w{-Y!FA{->b2nRMszhGyGi)f(0!OB zI<3Gz3HNjFL$+XxYGj0EVDdoNiV}W6rYmG`fRZ2_Lk1wC#0z0l6DR{$hV?%16p6e5 zRUYvrN9HTh+aLiw!u>Zdn0rHcaYo0&M;BA&iym3A+)zEJptvqw&~2F{r@2bpGDlI3 z!!kU!G^3@mDYMAxOmDz%nDuH&g5YIOL2l?=@Dj~s?45k*>w}+Wbe!he7eD&&QamIR zAHhFv+}heuxeXn-745&>-K$WtXt>#le7_3i3A`+ix5eI$cGKRar~h*8hdS&$j} z@Qk{ftdiO5acsPLgq&oyCdORaGO+jL)vYySu156^bBx1jH#%v@?xB4Kwhzjjk%m9) z?~Ji}P3XjfnY9Vo`CxUqold*nonDrnQ0IT}qkC74*gig8FyFjr>?c(G+@$_aLxP^c z?`+?>b34kR$JYC1g+4qnZf3xhl_&}sqi6g){pKzVY_b%Kun>RRa=EAEC4FNtS5mKHpU&gYddM(9#9r=WO*0;DGoNafvi3s9X_R`c|^Yu;Q})v{0m`zaHdr=*5GA`5_%09 zk<%YhgSCx0)pezf6gAvXm0D5NR94IW^QmU{M*rxRCkDfs+S*vRvB~KfAZX%@$=b?6 zNhxFWnpR(Yi8aRb8p=8dRa9O;-{Lt(hunPRJkl<@{yz1LVftZpC8>FBOTKX|dteb& zG1$*iB7axi&@tsYPKM{SlH^mhCbluJzS*|BPeX0RE?$@N(tBlD;2lE?dlfZRhF&Vo zaK?zDrSrIY^n0i5$~;CE+HYFEPSQ ziUfr#euh>)v$ALKK2XDJ1x=z>?sMaynGjZ3S8B*kuT9IH z_Jo2&69PN5X65XQE%p|0Cg@giFD=r$__< zIDFt^c7y< ziGf188-xHL`#(1e9qnT`J7hgykW`y!TX;93`t@x-|KPes4Wmw@dSoBcGzO2K^kU0* z%ZkAnDWan6l!{)VbA>*gWojM0wyZd3+_f3y{kNB){dPN!pIy*UTw*B)TPii zXTfr(S)tM@pi%?4H(WrcX#h4)s1$xFV!FWmAf_athr+PRbRr}Rz*z`GB(nfK~@UD|5rgm-FHhtJ$1mOYOHYHnSzI z?WV^a>Bh|Y<@gRpG?u3=nYM&O<122ex3&)sJ>zm`8X&T+9|v1LYV3l%k#v`+nU8;c zYjB~Bku0o)tomM)_fCp6TNv~e#5mSb)Z(dqmV|b*;Fe2mN=%);ptK@KeMBfu@)g0# z*7UY-SmWS^`aB<(*^;Soo0BSb7np4uryMssg@z5J8y_A)uXH-}yV4(8^8l8@*w`T_5jiZ+{mROqhhS-<>z_UHo?Fr}f7N z_B(Oo>W>BvImMe+tUAlv18lsEO)h(mA1sjwwCp3J^PRIr$$#*UkzotWc&l(v8=jkIo?4fPxyMp!tW=I zKD&I)H=%!&S3L93j#ui+%d1}7dHZWM_?L{?1qHJ+GC*j~S^dJH+LF?mS6_Pijk?m3 z+P6X%QYYtTPfSmrkdr$ZGO>Tb4~qHRPhBfQua|@-hBXMWlx(o%i+m*t#RRpdK(djU z2wy88o;P|-#u~!Yh@gW1M`l|*g=jE}iLIsRK%VA#GASe9VB?YsKq8#-UCbXpMsBJA zc%5&i{`kVc^oFd#d%2c_Szf&%o)c<%_pTB6csKqz+gDZ6nw(pkU(@)-!l{sD`B_Di zcI<=+W3^2c$h~!ScJU9TIgRx>)w#ZrDMOo)-W|_}Z|C9-dZcgR_6*IcPailWr{u?} ztG6O|MUy;cJatnoX@1Cy+xxV-3#twYGkz$}UcFUmeB_k2QA*<|20sXG_w_eqrL-1R zJ~d_tX}h+fqE;YnCuJSXPN^zw@n-ff%$+y`KLB;5+~rN$F_R{Z(KeOi&$g~X%`^Bz zHTmwA-c>2t2Zt!HR$BoB@glxeoX%H}IwfRlfdN-TlAuxxi z|1fzWXa_c@6iUNngEb&n6=6DqAq%JiVUnwOk}Ly&Dbw}pukoZEBfq_0BWu*s>-fhV zdFimBrpw8`1)u$=q=sG0myQp+8&CU!0QG!KWJzgYEAfsO-)xvL~*~%()v` z0Q5=HmE#u|?LfP}R_m!xbh%k~;ZGlv)Pg}ec&zmRp7CC^jiJY1kzhwr{U);9nw=AV z-tEZxvKpG8*c}}scAy>K5Gbi*c*=vLK6=;yyK{bg^Sk76Afw_S5mDscMkLo1-hvP% zkwjqtOOm)~4M-}vo_^B^o%@Txs_bodDcP%C5#!7k8lOKm%Ifz8=i|i zL})2*MV{YzEie2cYPI6={XAdEi6RdOVdIe(Eg>g=+|E*TB`vaQcDpQ$79ju@K}bR$ zUQqMC<^dkIm8xJiO8A6EJzCUm=K#&7lIOSoOd5a+gkC%z|4tf)lMkFACr>;8 zkAjU~7HZn<28f7tf~*yP-VP^uLFBh9?IIukjpwT%n1XviX?H)b7Wh7pGWrMX!-}H? zc{?w#j8I8a?Dik6S`n{AKONicHs~K9A3)W)Tg#}zab0c@_RO4E^N+T1 z1RZYd6o7m>ce$D`;ugY$w!!A=KyV0<>8VVxWC(B!FWt#v9`Gw8_;6rD@^k>uPL=M^ z78dqVJCWnS7FF&CKLP#2s(I~eR?nWhcJR=~0mFw5Zf-;)@bbo;+o~EG{x&*kyL%au z7GjiHFy~*n_+MW)eLpVv>9jKwn#Vshux(-g56A3XzHHv?YnRU(ICRLMfsKO(qbVz< zJiK;ATV8Lfca{;=8f%AKI6ijkb$e?1#Kn!C`RqMQpMJBZ;9zFSl;Ojd?Y(0Y=I=tZ2O>zM4v~o_@a)@9(>M z&YZQY=geI_a9CqwbL)WSM#>g%razByCs~8>=J>(%BbRPlw!D4LHOuEU4jwviXurXL zkI$4}p3k1RL=FEvyCBXQ7&Vuv~ z#sj(^j9pvX*vU}O0J$$e1JUIItPr4x3Z;bB0L}?<;6Qi|SWqxZD)5KYFMbG5zCXpq zo2nkiTi#qyj{K)~BeA?3@pqj>1@!(`Z{L6~KGe*$@BL=ao~P%&hTM0pUw1eD{B?N7 zrpqnDc5WQOv?F`uVKlw4g+a0(qt=24+&oduPl6VR9V2Y_*g`vO9*^w@yW3+A?IMTN zB)i9L|3Nuyvw5h=@RKd1Od4vkjr>GTz#+^cI1MLAC#Y;;JG}u$veRseK3Et>nz}fZ ze9{|~M=8z2?%Afx)1d2(3VTB~diR7AMFSD52D%wF#G)-SWh+LETzSoi;VaP-D@Tr4 zacxWMDr#QWQ`omxT|0clHP?)|H2>OFt>l2NgXfY9-~)R5chzuVBIIIn&|Bi@hwz7q zfT?$c&Z9mGEeG7TPtjJ`5hzkM3O})n0aBIFmJC)TJDo(_MAM|nIxUi~4nwN)k?k!q zFIn?e6m#|3&+7!-vA1Md<+SPe%kNI(&t^=iTvoCdU*abnyXtBH$GQy)o!+=$!3Ne; zoZ&S=HeRWy9e-i-Is86K`(^Wm@mjGA5R09$8Q8scEndF;R>0{8^p7Fp26%y9u<_U! z?i(0*25{uCo^YC}^<{MUmqPt%82)~=%43X%76aZdPf($@94XZnZyzJ^W{M8fX z)7*&@GY4FsIvkhMFG5gP8RdQEC9{OKm|U&>_xyo8!HVCZt=7vy{$0*SN{b>7BC-?4 z&}3B^*mo!tst$p3apLZ(mp=h)!b72A>Wyc|-Cm9+rQ?V2mp!;gVIIWh(s{un!CTpJ zcslj~79-R{p*CIKz;ENy6(%Dr=0W_n=+D%PgxP2)g#wptgv&C>We`?Ru47G69-I#l zkI*nI-mb!Z#V2?u3k5yyp*_W=cDLL9M=ptu;8#bFgtzT($QQi|*TH>j6bbvMI|<^B zxRgwb@ZG}lKaL4${F?4<-0%m~-9URUzbmZaCIDbFr2QbWj8IT!z{*pcWh96v?2+*j z2pX%+1pEA~7xHIV+6x(@Yjn7J19BMv%-4)flGf;=Zp1YPhmoo}DaFfH_9LpwXoV1o z_N`m-T$DEs4MoGo(;q_wC*Bl#Lo&Do6u@smJ|NFf#i(6pG1=&B$`q@ULqBBG_E~N; z8{gu_c=7S$2h;a&`=Yp3=(9@mo z<6GW`e;<5+W_|EJ`~yp%?nH53nfl_^`fWHgaTNZwt#DS&)W*k0weGsL zbSN@yX-9oVWX|bNjjbtK)_dpfz+b$XtmhxMl_wN6X@@)n=`d>UeuhH> z<7yKERl32y_N%X|U-9FkLM@NaaHs*j`0Iq$w_{w@g=plx6vLl_$a4;rxDx8h0Igax zbli|p_-8;>+O~@k!O8a}en70|HOe3~!Ab-mi+Z37!y?<`R&^mPM+H4BOMHnO#dFF^ zn5<{P9Y*v&QD8)IDNE=`ZeD=SgXJ*jIat8@6~P={tNWtsY(u0S&?W&yS|Mshi7q3` z#iD$``d$QVQ>+D$?$59Ya0U+a2XX@x)-T9C2>*uj@E;JwQwTs?68aac{nY3%ENt(Q zbV=z`ZY`#>*xB{G1~SjV`l=nmHA`BpL~SMT(^GkkI@>R@KdIFitJUw(6gQnyYq%N! zo)GnI9E4LY<|!8Ce<8}U_^4hyq?VUJ-12I45PI_&N=D}(11bhSlf(Uk4jhIxYX@}c zJgg1<_*Xb{1R!^dW$6yHI2{!~`x1U`f=CU3fNqhS_Qx(+qT1B5tPTNiZe{+GseVKa z70#>G=Y?Z{-m2lI3xK;|C!M7XlQ@0_&<|GNURion)bA!Y?$(QsYPjV9ht+*dr>h=> zBC1EB5c_`{SC$}6DS@uO?xPYOULriYMkPE}06i7(DtSEd6bwydjKHV?=L+;L+*Y!W zOfrSfiMNy@a8(dwYXc)U03%Es9##vv7XB%!f}B(Q_mRbG*dr32Eg;lLtKeBwTCsTv zj}Z2VIBWi2a7V|NAO$AVf}Zop(p{1~j@Gd}y-ub-hjEalf32ih1D^v}*6%?WxhT;& z$Z+jVO}D*+Gg|Hx$* zz!jjS{Yb9E3rSzmqQ46)Rmtmk2K`f(HcPrXA7$7{F$Vq?O2h3COmr%wacl+vcOR$W zbd^g43Y7;kM+#SSYJ+Qj(@3O$RV`E6+}fss8%C-%D1+7si+`!jC?1zlT2xfP&^e5n zgJjG(98b;T_2fXHLw5iI7-dyUXCSuTVzH90yhuJkiK_9Z06tAJ)tnHJakT94NtE^iCnmlL$ryjIQo`*7R`0p^b%H1g`{O8)`-4@-z6r?|Kgu)K)bie85 ze)%grY`jh_ye_Cul27qBSuLFVO%}y-V&_TqF&3O~%sY`k)QCR;Xos3lLRlA}?B_t) z-+;0%P(9@kNc1P0!lW!MKu{Bb>?O+&0`A=n+Z$XzB<}!Vnqlo@xB`LYk<6-S|DR*2 zPSkIXA+XGKuitNPrtUb<0aH-Ezx|2cg1H|&%G?SN#9>a#6PemzYdqQApMUu{JV^dGJpQZ|kVT!N{Qg^=r%G4sCc$4IR&`g)exGhCi!|dF<(dC4&a!)9jM~ z@FGB4D~|6TvhVY%9u&wSCD{KX1rj3D`HJx+OFy#yh`QM$J_%UVcR5->FOfYe81@K3 z{tY>3KFZpDFMGgc3;i@=G!8y9`S2a|JeJoM-c?i5+-s~ib7I+m{J^!@T85+M(O11X z?(XwVVP?OQE`kuozSl@$UQaAE2LH(0He%1)O{+{IbswMGIHbVaJB`zC)=B|DQ=@>ki)!0px!H;2)g@i4c750sdEJ1K>vzpIO*~ z^Y~#hkhvN``A1ymgr_))kVOId57PyILipqzBfg-=b||?(p^E~CAh!(LUIIc~Q3QZy zl!5`DQ)Za;V3XY;ktzU$6us2kF+qJyuRo?1jsa`X5~3i>F2o_I-*v^zZ%~c>i}&vK z+7Rp<<^nRxinS48gJi{f36_L{)#{086i-w!bmyeM6G=>?(ayJUI5|*F}{JY zMFwj}z}4W%ykIOzQFjCQsf6y*zpLa!Son9V`G8-?XWu`KFBY#pSm%RSZf-BbSKL|b z@Vnjv&%@tm&Kx!j{`FJ^VPP581e}JAssckt-EYYF>)ca6KbLN!xO;g=kl;TPQ1F-X!gH2n|#dwLPgRurG0?s{DncLkg0C_xM3 z{`2d2|C1CXk;32rBePl&{1otibWpoTX8$Fxb8EWfU$~$LcGL~R_^mf_aB!VJ@cn@U z-xvLTbWQE(mfrm*;QQMK4V*k_(4e-zV)SU)k8YYU!nOgE3F&p-R@FQM1(1rX;i_HvVyIZo(9uXknaT;)$@?BGLdJp;r& zgsdg#Xb>MZVXVIwpA;J5URG9`8`2l_0@~l7bXD(?oZu$vXHA?gsSlhmIcy<=GcLKe zDby_S4sKrPo}^iou_ca>*^@28aEgdJ!|)9K%RS_C3&qlRoq_G`)37>5zdz5h z5EHe9=F#(LG7WWK22;$K;g1a;2f5CO836c&>{GUpy>S1*BQt~ZvPv9il4C?M<%}zf zp>N75vURlK9UL16Ut*lrI0?0B;$njHWUJPeh;{`}<`ikPXu=nk1T|d7!DSoe7sN$cba)O*BlZT9Q)Q0SK z7yJam1TO0Q3^xc(D0+jmulRX7pB_$sQ#K33zlCpBZVNxyN8+pC9|5v}*n8sahW7)O zO(vffTQ?B~UL@jqAc~4y2R#jow(zUMarh*-oTA1Z`K9~bg(+WI4w`wJg(pl+j35Zc zH$jL+06Ji?@HTS72k{}~Jg5j8csRh-kO&El;lIk+@G%WDL3t>-FnlT@S(#PgSFo5O zZ<81uBGO-)G)-oauK*rtvw^>h+)P8mJanp{AmAe>NQ_9NQC~s0FOXQ-?djoT6HHT> zmq4k`F6Ti|GC2{JY;@$%&%atZd1CdYd#Wc+s$w_ATQ1#WiI2Ciqb%|G7jKNy$Iuf8 zq{|wAhU|lH$uU~BRkotsA(NP7CrRbgXyP;}hM|?lSdAEKfW$&)jF&PxG&W*8@G6Fg~m%rbj4>LjZ>E6Z*7iIsJNT$M6*lTnZjSsr($FAkN* zx|3-wEeXBa^O!_OjL#PnV=`vNq_!nkT?|j_OrjX$4S9Eup0cxlc0#pBuJn~OY_cVK zo!)GVS#Oou{_z@5u}RJHtT-VxzYQS6p%fb^ScYGm?KbOEV{DF4y$eb(IBhnUM5o9m zvyC#+$R75ErG9$v?G$ z1pq+<>-_J{($xRCS$@d)+6z+%R5gEl&4hhqm@Z5)$H$vd6HpzpFuivtClB@)OinUb z6oPq?E5nu(+w236QowYRR z2_Y&am!{J*C~73N!bs_0u=Er)nu1Scq!0`657`uzo26@pm^I>X!Y_e?k?&w5jUvkx zjpD=qrBOh3{)YRz=$xM;)sL?azKiYzjkp*GS@=;vWHRNuR8Gyar2-F+=};b#NM#eEg< zv@R9Vlx)(9p^&JhF@{>n(6wWcu93Q!z5lF15CDgRVjz*|s;K$vCw7(MQUw>0p9TpP z-IIos-Hk-X@(+|ab9_Y|{^XybcV&}FrjjZrPpUcz0b2n6MZaC%(ozoH?dB4>@B?C= znR&3H?2h42A>Jfk%9c&kjWhf8pCO`Akn97+V>2jzY(@E4g|MXeXB7QNz{h(Kwr8as zhU*mnmMe(*=?Q$z+3)en509Z$XV0Q*Kg2`6I`FqIzC3W?EAC8pVg^4#i{5?*-|)?s zc>TNYpv7N3^y#M$J^abXk3>2Xt8^w&iF)qQ8CXYj>0yJ1kX7k0kc_OW2?b#(0gj>Y zQkt=bW)gy-O4*EZPN5>GfEpaOd|5NTObG(ToryZ2_=4;{IAqdZ%F>$6T67W>9zTW( z@R4gxJWDxNk||@oNBwz+d={h+xmyK-kl~^ohP5Ujc&3zrbINpVF&8G_!fg znM>(}6M`PSm|9wzx(YIj`K3vLKvHnkkbAi^(T3nvdk6JpcZ>v#Y@Ku|8&F!%lkLyb zqM*?TmjYUi1vY0Sjh3ZGO&&C8GKWU9Cj#71b9ZehLdkr+?Oz*e1U8VPNQ!+{4xF# z#eO{Dw$W5OM4dQiR83m9c5<(45&@EcZXkiMp^cJWFTJMLM+aMR8dkLvwgA_P(I}%) zEI0vP2b*?08jGH<84n1I_z87d<+1zreK<5v{6JR!PNHPlw@7;(H7Y@w@KFMgG7YV& z8DX@||3(NE1R5&Eq4SJT1EcF0;L}VNph);Pz!L099+ntG-AQ9HsFJ&kjvmhA&I!dOj)EnUv&=O^R?;pRk*GPK2gJLQN-Gp(qDsL-o-G_+9)Sz6xzX zneg9Dcs_pb7+~&w#yp1B01!YvTGvp=<5NOn>)}0jz1{A4Td(-I*doBy>UfH3{`Az7 zPoDbX5y0|(ocvSqSJ1yiH5wLacL3<`e`P~&Sl%Z4XeikV6EZy8!ioC8Ys#+%!ga*7 ziyU;(-1DC_L+$D3PCDzlVI@qY4;Vqi3+R2EV>Gfin((Ku@A$({KPU;`gJba>i9c(?kE!7cJe$3deD*wV zg1f?JU(t{sL$5}!4Gmn_)Vz?x3s}Xya4aH1s|)LGzr_Z0P2m8%Laqo zlS*ddJ|#bMTktfz3;vs8)UXF7W5*mswLG`#*;7U-1n_jsW~1g(fQS_KEse3q+g3rf zq7DC@Mak=Q=R?9nVUFx#L|=qrux_HeGZT`M65Y{6i~VMb2I&Na&whfi;WL3N7De zs_HFjCYx5mH$jtLDhZ=_ZD}fP>`BqE?1fKmwj}((OTc2nSy94|!5@WhHVYD^6)w#Y zG8WQ5$Mce2wQJ9C8q%FqCR08q)*nB zgq_2RNB{YHDD?W+J-rZG0sa*!ep1TFtCx)`o5IbI<6EqzfYKtsg zx=PaN>G8q)NvrIQl57axMYl?F4@Ry2YX^5vTKSZcS%1NT76Wh=VKp;P|A+m}f1{$t zAXz$WVlQZ=XUMkvGm`d0UVj)i=PzR&zV!AHs1d^+c^g5Is#(oVvu8Io&-%B#^$pF! z3sW0I%S4?{WMPM2mO|$!h(woIIwOo&@Qx&b=hw0%Y11TVvqNQ6T5@6{MGNhGkfTN3dYvavyP)2Z)=+ z>Q}A@Z&D%&l2z^@CR9Y}+nC4!u_FIHyM?wqwejYsp1x_r(@4mjI(E~ns_8>e6)S3E z$EQn%_;oAtrmt$aCY{o9BU4;SPIJdI^P4x#u3K74QPngc2aL~?vZ_z&9XrulpJ&f6vn_YyuwWf_%~8P`GP&PSrm{pQ0i+6Pj?-ExpP&-$cG$dP;d#ULvza!v)ib z{H2d+IG!M&_mHfOVEg{p$w6pH9E(xjR!kwqQQB=bknhTbfYfb84@Q8_AP~ui6c=4o zX+_s}SIXAq$L^nS_cEy0Ewj_+`BMv1Q=hqJ@ZB#@db^s{LIzacl%_i$;?4a z*_CFL@-@7?_H&09uEhJ#hs>lZ3$^_&WZu>iI@d5|TK#XZsswoO#syam#vz&$X>q_8 zD?Y?;S`FM8gYC9oZkYbu=s67o=I|80gJpxiEr-x_n#JP<2-Q4xIYk)4-2wSnP_+uW zz(d+~5?S#8k{F|;||fu93Xa<78zo8k){=rv^hC+f8v_9{N6 z)ht7EweQb60`=zUw`ZImD`~kb0BjPdvzO4*NQ8*MXD*DL!~x_ii|2A2MHkgve(lg@ zRZE=&1M?q1aR+JO%s~5tFy;D6&R~{=&3S zQ%C|%=yuvo{&p0TPv#Bs$r|u|keYMKV8bhHI((A!h5L?t;*SkBc-B66*0t~~Kt59h z7%{H9p48pe=7%c}aZ%IsR;xa=4Vt@#oZ+ss>Xiqa3_mD{m6trDx56+^x|}N<;`V|4 zt~9TwAaeD}VN@caVJe4f=5@uZ3J;6gcknFy5r7p1E?z=!BI^+}dM(f4GmyjBdUW`^ z_#84F-te*h7e(R>0JHNnHsRm}^o$Z{`rL(Um-B!Jj9(J!TJSq>9ma=t%#{RGB50sv zM=lQ%hI3048b>Ki=<4bI$_j-QL^2`AV7D7^qtO;x2(LK^Ql#|gaf6Mz!Di%^*bEnw z2`Q-4A%LU=lAKgXbse>w(3mk@9>|&DS$`3Q&VC=TWFuD;@HT-)9OW`N@-N*0=JSR>kp(#)y!!dMK&GLEBnblTU zkK>#0&d+C}*smOBdM@fWruVj!^mzje5tVd#0IUwzLXdh_@!_zg+9d^m1tGtckb`V% z@*#$T(I0BnguVh|Ttmhi0WYAFk{La$w}NQ7un=-B4K_19Hu$7Mku!wnLj9?K7=QsK zHuz&FpRa?u7z*m%pD52SBDaRw8Lc$fUL_2sx7$Jbk{>QjgmEkfVR@Qc3N_4BEa6=+ zi)ga%N)Av|V3KtVbRITe6}bt85Kmjp_!fhe>W9B#EgJljWM}a={V2dF*le)hW;dYq zvV{dW(P@lTM(eC5fd7Xo(A4)KK=_um)NdhB?RGPFQns6PO^n*fS z?ryV*bmYoci`e}hW$3R_o7hku@YXd3hXcq6Lj4GEz3Xs^p#?`<522}?f06+QC z?>Ga0^XVs0N5=ly=LC-uH~jJodJ=Gd1oXFGf58p-!ck!f4pxW{`kJokqDF6f}4b`6J~6Kim@hIATJ8Op3k_wAaMf`WyrIMJZu8O7l#u)$7d6sQ>#gHR8M@ZhFr%yDpRYqSP- zDAGUGdspjGh(P-8===B#&?_D$6{!~(OB7gChx?G?u;5WY3 zs>OY`TL@bK-Do|c(fp%Nq-}1)hk>hUR90HDWS*oqSaeiBpdMU9#5&%BFCWBjG*Az2 z1uPOH%SzJvNifq=pvbt2`+{sYgt3umFd|^-l^3>8VcbBED*uP|12|AX?ZZ^>u3=%v zl7UedREO}rZ%_Z?*E?k92rQM}Iz7JMRYM0fq-BiEO1C_6^H?+=Rct`Udo1*7Mkn%Y z)xbxGjGGDj=Nlis6@RofOM zk0IcUi3bSNzKibq8eiH9Q|rFV4Zn@W*1)&d z47N*4i0EL~*$k+Zh-EM_DuhOEoeB!dDlLM(CmB*n$q@sdh&uzpassrkK!C_uA=O}G z(r^3%|9s0!_?4HJuuzZ?J!-H?Rcoh@UfMeewk-`DJRrF6p|kkXeM46L3w)STE;)&vo)O2CLBxJU)jZX!#VVX)JorL@@q#!(Ch zg|HEbsI^2y-H#ps0md!0n|SV`55V1Z+zFcDOm>~N;~{34RtE_-&s`oQ&VuJ;gQ$R< z*Ql6`NJrQQi$oXN6o(JIHXMWnrA3@Y;XxK7zE5e*YP( z5fy9g^qKv%RgWGu+qi)&buoo^XifCVV6xh3l(`RS9%Ppse1vXdVoXx-L3)U6ih-V? zouGv`Mx=Lm6|B6z5(%c*x#7G6V(WtJ4l5uKQfpawK2}L479Yj$Ypfw`vhq}I=tXiK zeP_|aDzZ{*L8lFNE_AS+hsq0fQG}TNDkFHQHS|DwB!FcZZAP4qK7dmib?{#3&8;wL zZ{w~d`WDuSowBX74~ha&Iw9P~hgL!t#u;p(mC8fcLTAvU=n50Kt z6n;*I_JdCx0$oMSIg?t$US$ISe}mOw(1@2dz>+Hi|0;BFhm>8qV%3mtGbUm>_Vh;( z_KUu+!VFM}A)N3-`VsyGd|5C8oj(ZIuFj%fwzS zOC2wfRG@Q$fy&Yg=SWNBj6He+98vTYZ#m(=zaw$j5kCuLNP+GkfU;!m0HHKA)bJ$d z74t9p+T>-)qTB*mj*%nbn!Nft|3#m_u0HR|H-1em>%bXo^L*T-Zal^K@!7K!%@nrzM?Mdm27<(rK0U3Q|mk9gf>n`&F-~UQ@lL zIt)OIAE-|Jck&PxP{`AnH56q564?^X{^AMg0DdTh zS6Ri|BB@MpmQcMQzk&iO12yz?ROAHgz5}_zxkW5YK0G=^2tp7KagNw%i1G2lKM(W< z!9P#r_}{Z3#)R>~f^mSnv+HTXCu zpjKYbF)KI`wQ?qwW`-h>Uqy3J5u-0FS z=GOqCJUCqr4IYfxhKA4sH8lvqaZ~vC-|{_^q}8mGUWeF~RGITQy!3Na3db)rHw14Iu9r^~sdo^qtB?`xB#>i$s z|4(y9&W$ATBM9u5DUVusU+HWTf;eUd`pP6wWQ*uY_+RMvUAuOH_zTOBR}~qOty-YE z3*b3NjQfMPLcR27<^?qDHuNRMah0pN&+aSJKbi=%p-HsLJC0Gzyn|b zU{=DA(Mv2|C1;HQ^zp344_p#vG}`Ckd5g{F2?TmkzRoX%P5S0v^pVdi=PrIjFc^f; za`M@Vcws?RvTd`VqwKP}Vm!wR5?zFJ4{aCpdSSa>d{&ZY06;rGBb9>X2Hj}LEgq4F zH}$G82zy~}Z{lIxlPMGK^?r;uRx84o(F1sVf~P1R`N(JIG52ai?!~m+hK5}3SZ>4B zCa2SMF^zoE0gX2Fq>>5Qpw*PpP;rmW$NMU(7+R8^rX}5`Oh7STMiS%(0=k{JUeqC0 zly#6Wua(wI@*`#>f5w@(xwW3!O!U17C5V>H2d-R2G04QO0@dv@>ZnjEl+~%{D2l7! z-jDq02Zh{Wx4OIP7!DXXPpDp19asJIKMXvX*4?m|I>lz^ScbheieOMg1raJlP7wc2 zL||NTUJ(@ku|-ov>UbjeDHU7%;j9Ue+JUH4ib%3<5L3|C2*VJG15a1FMbrT~t@x3` z(mpl6z9EmNVhk5k%}%G8J8p=%_+AswKf8>`m2(<-jjZ`Q5&u;)S+-e(B2NpHb5E&Z z4ld>SVPsGBDOp;tkrt5sCb)HGwE!vIuSpshtaGAHFDzR`8VG$`6L?)MBWQMZ=vk)iM;x- zQuPtFLLX^`-Fp62Sv_7bj1zP^VVq7qUeeAH)LOQc=alP=HUv}w*P_Kq9Y&COrFj` z?;LZ*EgV(s1yL4J1W1=oC*CK8&&naYT#2ZLtWRQl6X`-?G~utJ*q}HJ(f=Sr{(>X$ z9uy+%p{gq*u&T|e&CetASv^vmOp`Rlt$3baa=wo6)_ zxLq*J(@NVUC<3-k)XihEbt0ZE>155vACr(sbc>|DmT=Owp*Tv~!14yIby9Nz*RB@y zs}`Yp4}`O+rL;}3owh@xwGP|UIVxcjGF!=6ob^A9N^~?LM6dj9NqLzA!pQ?qoDvRjG(&4Rvw!I|_aLNS zd+27!JMGk-Ve;J$0VhvDsnL)B2LeXf+Ctd4OKB1l)BqMwr%;V77&^a$MHFRqq9MDc zLwDniv_c-x)l8>swkUQLDMUylao9#93Kf<-L`_FzVwVJ`la-Uooaq$b(J{+w=s9Ni zEU%YEz^J%clzXSBcZ*ueDyyfTPS%Tah~mXi09-50mgK~5WC?bgVc~ECNtD*9_pc#$ zuF(k_1^th*v_UPehH7Ez)wt=$A8|X4_%ArE%=TmXFVt#?bve%>h8sjf1wG-Ani~Y7 zOn69**uk7E@Q0xIX`=vWcFM@Z!lgcsI{_nahym%Y2uWs@MqVXp0TCedP+1v;B=ur| zrQ7szJF&Ab5DW1FIconn^fg(MP)|v6v8-Mt=|++JG|d{FR&It*ou*k9$>~uc5Cn{x zKg&_#G#>EgzUL_sHPNi77Ncyd zHro!R%tFRK)snbTC)b`OOGzr(SIe-%Jsf9>z5jZx=Ac^c9NEI+WXEYVG&+o+kL+qg&T@}lqW@4J)`I80{tA}n2Q z(KiiYkw!QUq}$Z?)NGF34ET`}-?o1kEdoBkrM38x)vFcYkcR;{M5nwhiI#$Z)Bh1T zgvbu^!ipv&qDINUBT;D4#edlFW_^i@aqSNT3IN&RTP)#E;6Di*$?@n{K>Z-NAMphD z!w6L77+LCG2)$5;eiHS-BlGz; z!4PHNgpI?$LlnnJL9G{kE^~`{Q|CGJrF1p)}HmK z1x-ZlXa&Si?gZc9)3E*d9>8*)MVBBu*hIOhOsWKOpqi+O)Ld!>wSn42JxIZ_&PXg@ zGW9{RKq)=S>#tE7*?Hk0*{fke)!?!e7CQpFOVM; zZ8Z4`$W18S(bePbNXT7x_@G^`^M3Lac#}w%Nu!FVqT89SJK<8o`d!_OxBw&PNQz$c z4WQhM>55CQ{MH>mlsKR|@4J#FJv!FexN?UnLZC1qB1H=#FJcPQF8mV^uNzT1Q>zFa zu6(z!t2J`*zayld>OQ7d(4#Y~AKu<_2NGnY$PfpCfbj+C;n-CwD>~)RFqSO1O zmHI4FoQ#HRG)XzW%8G0{KZ$!qBPZo}1GRy?q(3g^mfK@e3Q`8dQ!C<=N)yYn3w^;* zURED}YLWMIcT%s!K52PKmDML9HYGPLJ~MllzbwI-k`eGEWuSq9L}x)pZh1^rJigfy z=TD9+NXzW))UrS4mOC6utLSgelt6MtR$;9ejJL+-mitpn5@j9olqB`a&Zx9|;}he& z@YvkqYVqy(o|?3LUub8Mw=CCRW{>l-ss7ZwUcHm^y`k+XxiRq>nRzkE@i}QJnR)TC z-dvAX=~bCG&Y!d+x4gHn6eMyIekeP$pdcwOwa`l!*kTKPuFRZ*0$3zZFv1(573+#~u*<1951fjP zbqCv#8~WgM*^`(rp@Xq;@pgBxGA_;;=ZZ~mGV#!P_~LPfCOAnO$ZMkyJ7Z#8?j-ZY z0PVDrUdO~Sc2}GePRGUv&nqXLu2>hNbGc&TfRu^B&s`4a1oWoK1GI2aF?QyiXtzQK z;*iVZit{*vxp8suqOl$)^BHt0(S=g1!P}sz1ScHPmr3tQA20H8HXvm@gdZYCO~rc1 z3s`(N0A1r{RN;oG|kuk&}M4)uTTiTF}L;O@- zvXy5Qr*!llKy6R9m@o3TnXEBsg1Ca7DY#8B$@JCDhD1Y*kK$-H2KBQ$8Vd{#8H$Sg zN|p{<|M#*I{FOP`?DXlEw!~*6>=r`aMRJNuV^d$xy<#vXJJcq1^D@yJqv7?U(-z1= zN!EhcKqI75G9{_Seg3|_Zk4}7NRF}Q*jtvwC8%V8ZagYl`K?f_G+i}DwOMsH*a@$| z4#4NCb5MrVfIKKSVgh&8*#k2cBr8LbqayM({PJNP5k^IiI#)?5aAXZ=fwExcL@*e^ zTu`V3^0^>DHj*Pi>Jj%^w_6}znA}6GOxS)Q6eSKr<(5c{J{bXGe4+pHoortbBP@Eo zx5xLM_xHTM=kGnglf>xm?F;QSGA4B&iOtqPsqZpY3!yN@3?srG0pLh*Ql)5QV_@@@iK9(owaDbK zJR&~)QUQv^7jv-T1Kfv>IE{tom>6@R(RuNDr!hcwhyq3@d!XyM!_jpO+Tk=85chF` z*~u8X&UYll8VZdrm$A?gOE2m=f9dD0cK+3Mnu(iO-7$(fM)HuyQCZa!1BiX?6jGbb zg2U!5-9Ku>KN6F@KC}?<%n(!oEfs_k*a_hQTb;>-C`16fr#w)aDgmmpP*fEZJ7&<` za4n^#WC1QNhAk41ZlK`0Y0_A3yBuZj*dAwj?gS$Ap%QVdFv4QL`pPR(I^ySFl*ZT-V}Ui zXxgxhEd1?>7w~sEsVkf2rnRPJJr-;mOF$nE|{u)&zIqf!R-Y<4{F+gXTo$8FgYmlWn|YIZOXr&WF02X zqzsG!_j#67euhkQf?$|mpuFA^YW6+<#&eek&l$HOKYfw5_-oLPm~4bAk1mhi{OW&> zN&#*?4>oi5yG~>i$zN|_AiXVhLe~97IVs3spU|v1snOj*p9N%mhUP|4s{K>*p5{x< zFTB60xBfnn8|Dk4L*WT27Y z8UuddkeY7LxcR@W;jl%7QY}NP4|i&!u#<$S2#_xcihp4}GdvGFuY#LNdU$N!aIgL0 z=Z7Eb?l^xc29Tqp7@diG;}*SW#z0~hz`x2u0<2&l0T7WW01^Wy+5$+&l7e**Mjm@p zZx&mRy9lxYQp@EP)_)ZO765rQiTO^lSS@`)GR69HKDp19j%yI z94~0?$ozU!#q{FBg4U*5YER|#bhx0ox#rh#RZ~mid2MZ#gn8h_spoH=MwFH_CP?T{ zb>C;;VGw=uZ#~I?!>jTMKpYSCeU)EC5U~w+jU*+5)mUV; z>MWOiw?TRNh`w+nC+R;&GxsYWU1D!g`t=sp0Q$myy-|-r z&Wrq(+( zk7`~5$o^K{tpN@pP74&bV7JIJpm-YkM@3^PYXp>4fiwkWzyqMOqf#^z3PfW8YXr_{ zf49vC0Kl2wZElglO&362-Sl#C#}IBj`1|+msbL#0Zu9&5aS^=Xn`N^x?LU2DU)joz)&BH?R;w%f>4l*`_Gi*Ht$!Y8)*ior{iV9P|NP@ysPm3*xk=FP zA|ZqaU94SKU0wO}i8)Q^mDbsKvqld#^_}OkWGpUqCsbIskTrYM-4D%E&pWJeu@vv0 zuw}sh+24gNW*K$wcnR;oZupR#Rj0SfzQLW@C0>PZgZ&0>+pB)6zgccfL*3ZFh|>D? zlKAf4&JD8`zmO=LJlUF%C%@O+{Eoxnww0Ea_Wz4`t!d8O#+gK-X-;oT3U6~01hNHd!I|hf9j^V`SjJ-Pn}eb z58eQE-$a1Lr(o;*0=)j0LoD((&?w!gxnJ`P=#xIt{0wfOGV-7h%0q<+C$0wgQjH1# zJUMO=gOMcwdl^6?kT3iRlr3Pz#Z&=ShdeKbC|lMV!uC}F%5(tI;GF&haE2dQ&_So+ zHu!|!HY;e&eL$NTz49#iJx6V?Og*Z zx0wTb?VV5@Tt}mI$G7|eEE(sszwjN^_&?=kofdPFa~o$7iRL+Tnzd2hW8Q1A#MaAJ zr)x1U`0E3&gPG&WR~A1og6tXPa!eN_NoaL7#U{Hdy^CE=tGqsDvFtT_c)%l3dKY8W zuf?p*35=l&Qp8-GLkYkTH zeKidU!7GFkGwN#RnIS%{xlMq|r8NO5U-D0hjyqV)7|uR-&0G_!{pP02Ke0k$AiKdZ z^V~Hnc)IxDxcr_-XksE%BFSlIe1zX(>8-Ds6AzhvLZYEKX^qNem%-thQW&fXx}&bi zRmD>vBM|4|Q=t0(9Q=nN#V=}&INU`){a_J&p>D#C#(#-!MAoRS*q^Wb9ZK8(s%#&) znI11WWFf4~R6aYYQI^;6E}qqyE76R1zDH*s(X;M6FJ!l0{U)SM6gzA&A&Z_t(HGGC z&y;7SgzQ%CfBkXgg|sbSiX!&r#~?Y;%-HByLqm2{%w4inY#E!ZF^tP5NwNqLfpS@E zHQFpAo6!1MeU}qf`KE7a@pgRUCHyH*JQUCx;ymZ~y=0I5$oN;(|~>0Gh#A@rwu{c(Z6!yGA>D`t-jpF0ZLs zUr!CVvmVOktU7M{v1v^?xu^2nh97Jw(|F~i6JIwi&3}_d6!j%oq|TaVwmZbHm3_*B z7m<77-g}|C{Jf{o1~lXO{quibA9K57_)q6n6DmpWs}g}g0{@|zkYmZ^vPzyClaQ(_ z8SoVmi`#r(U+4|v$XBw+l{V;Dm*m?6t z$EeTB&ujYW*Kg6zJx9(}9$}#ZIf5G9?{FeJ(t8zKu6qc|OB8;(yiPyyf;W}D$`c@^x;)XShZw-agFj_2 zj2LGV>w~f26X;I$L?Tc*)Z$uv09Vn2NqFC)q5#}Gc;?!{o8BrqtFC_|dfX9c9C&=< zeN#Cnk9H-d6b$S*seB&a1=TqwpPZ|E5sJr;*IWYe44716w|xv8@Qr1@K_UrW3mPDK zDjTBGQ)r=PyHlwvma^3 z(mhKbQchm6Rrx_Va!=R!%D$a*E~OZKLqXl#c6a3>bPmyiqOEI6!y6v0Dh_p9c)i&n zmlsmSaU&n#7@?=OD6nbb{PpO`&pWTkGX1mXcFx{M(X64&TVGH(-N_hfy-9FY#=?_y z2IG>Gn#&d{uk`lJUu=_)FRb3%0+zmafAJj`Es%Mm+&XvDx)RD+aYXrhkJs#!c%z9oJ%X$k{95b{&)7eE=4zAP?RVH~ zFKk_P@|QaTNPEkM=SAKST-J8*^~$le)AGjGZ9yfs$!2k^@)L26;dx{ok(Zfy?A}oj zf54F?ysfCXdVa;&OGg@o5}q-B^6HIZL^wg)Z6Rl>$Dp^y3f?bEhC-1V>%a>se=y5) zoH@RU{M2PTE9xmI49i8oepr;w4^&N+1x_#icuVL0xxP}_btl^PEC*0oRF($Y|GIy+ z(La@3{JC;1$T8yUH?AE~?VlA0b(%NMiCS#|BbO~bcIm*m;f_RDr-N;X3wUOAYz|AE z*DVV1k~8)o>PHEwAaUugpkVJ=-^TO%P~Ks`x3r=7_M6;BTLWz~_yU|HfuA!$#k+&x z*!qC4)ZvXC`aAF5lkF{u78bdZOP`L`m+&qlZ|*GoSZn6oQ{ui*R9j{&3`OWP74Spw@YNqM?xnIFdvXs6cZSr;0*pcH6?YpQl47R6{a+IeduZ~ zGsABO$1q6UQE+ND;1zIfMRiZemW6MsJzv=dWxf8#`V6Ue;9o+GM-ziS5jMpUyB^Y30 z0XpCzSP=_ZX5nEfM3kl>SOJ>{!7Fvx8sgW;E%B)p!2?yds2pdI+t6~*cG}MYfQnQR zS%t#%aJLI}0~~CB%C`cj!EZcohToe5VD*e`*zxf8=Hci6mwSh|AAUT%Tqc2$P{o(b zZ+vcsw;6u#@OSym@vwdh~E4)<_qDIOlIE}U<(FIKj6(S-&+;3(Pb zG}#jPB2NxJL=ap=NV4+OQI?e{hrk+WH@GTG4#dc2hY&Cs-EQC~P4@olNjuNk7$IhM z86^&4M+Iey`F{CtVkDyK!bKY zT?!W%7OX$H3y6X9^>A~5g3KBg!p!y`W~ovJGJUYcI$o=7(Lo|uCJ#3fsUQW(z(pui zq=5hbU{92RWypG{)QFgv+KLQ-8Agj+k(Yq_D3yDlo~J=w#z1GR+DssWBn|P%@WgLP zt`4k>Ap+z~d|Q={h5DFoY{7w+P9PKF?jkUNQbOH0Qo1Ei2)%-}hm$C4g!2d469 zsJat^3NTFnKL|aQN09*-6&0oQYymtCfyEdOIIRp5@q|mldMn8_aGNuCR8ZHXeVjVIn1x~mF$!skvLg9Cg6T8(eQ$0UU1=ej43 z;G1y6U$+XmZUw?=HmeQMe*_C<%8IhHRO^-W7B-pI>209+kpc|uw^avNhe3OX$0nJR zWd^g~y5}IV8r!1|m>3(BdR{2j{XD%9gZ29}CF=w8|lY zS~RL>t#-m>=iKI`!whL4C|zDi4jw-sS_PTnqJqU9BwQAgN$o~-K{pgBOGQt@6*-i< z0)0phwSTG5F4#>nMOIl@DZpBtq*fFdw*`E>ttLsd6C`*Fc}c<{4xDd>WpIJ{*lc!s zNF61(NzS6Pi&4pBGs~n-*`j086k84PaVE&BPlZV{TgMS$hOn|EDhIb(qrQX$z&)1$ z>tgB&gs9PbEUQp@9rB&`zr@hxAHj zeJM25>X|ZH9Xt9!{YhsAUj@3SLg0{4J$RSeyHUScOF(xl6(^Y@iX#k=fRA)RVYm{O za1&Z6Jq|I*kS?1#ZUyS+qIf2$!xYBM5JVxUSU6o&3k3*_agr+HDRL@7Qn}6&42O)M zw(~$BYB5c?DI|cqa{)w+a=H`M;4S2)O{J{b6-5&HTE;*}q5dH<TKI*v=mR%^ zO#t6L2BR)cmHFKe(l3DnjO*H7IBkH-iVYJF_|gBlN-@kF`)fec%x2x7JVMxj|CYUO=g-V54Mt@f{OJ>|g6Bt26SNRNAO_#+I@05$s z%c$@9=TYA)15=RI947P4B|6>_(+0u0qDA2Cws1)DxHvgvW%UrGCz( zr)y+gUY;#omT+Z*0)VC?j0{Gz0D#(>ckU#oJ@1O~Qz`T7%7N#eL%si)=@wj$U=i#u zI130)i2S_1s_Z5!(03C(6=~AX{!0%H(R%a}W@rw#YrB|C&$GzK?LL-#iVp z1-}d3fI8Z_r_ngWEj;oUrBZ#FN(eF3!^@EH?-``;FZdMg{rDqg*_Vp)6skb4E9L6D zJ&R~~@^jz@IRGR1x&IpC6SNl|yyuHPB?>Vd$CP|^1Qh!feYYAy^7|1$5e609@hb;O zAoyRw7%JdT{RH#GGw`Q={_^Z+XzwS`|7V{m%i;Bxe+I7)Er++s-G#3WZwo_Mchbe) z3LzP8Tr(Nex4|&DCSsfbq`*@!sCCnbIsbi(#$X#88X9}+z`e?uPl0;+y|$v*#AnZe zoMBIPRJRs&pwg=cK5TCKKw0ql+3(L^g?7C2Dcb+>F*JGL@yk$bR7p!k<54u_sSW?^ zDJ*G$a<&&gMZCS}>op%MXMSB%`|#^upi4gf2qGMoK`HKs*6h zJjH~8-3N)zT*_y^D2CQ#1R0H8l+{$zwBpC-D<@~`Q0a%~t~&5-cSb1E(N;6Vx>mn7 zCQy+bxq6u$QiU+4kgeWv*T?0x(Uo7W{Q=!Npxk03+=VOlN;I+wW-Dyri^j7MiE~16 zE1w?UUfHB$9FaL?u8_H8X{Kdq^k0{a*-}_P_Acd9<%YYSL*w4hcj+ugy8Ri{l0LOni#t8S|V6{PzAuDgwu2T&&n;qu#nr^S*)G}R>?zS{ck3ff}{C|77v z+&B6)w^*=o@`1mbAh)5#LsvK|N?Uvd*IF!=-SxMkR7c~P>ylRI<9$BCv>^5c;gcY% z{CflM(mZYT5(gBk2(F2n{$mXA4*EaDCW}*4Y)TTHvl#U#Z8zMgw4t7b)LER>;ct!KGFZ%}&(LLPQB-T#05#o~K#|k` zu{l_*f8gokPeaLEL86ojy#bPEzTZOF%%TD1qyL!gkbeGSkf^bvSycJ>2h?`>Ipwy$ z7b$n6O05L)ti{MZ`0-YfGMBvl-3}}Fayb;8{_gB*17&0!5T{kCC4tP0Vq`&Ff@pSj) z3;u96dh&s<7btxne5WkzeHcuV=b=bWqt<&&2ka+_GRrCuAemp0ck2T9G+t z0jqB6qB&K{Wm9L~Q;}*puei$;tg5gW>-*}jRF17$vUbCXtFA=)MFpt#mCsNk8C+W5 z+vik{U%&nBEzzLWUAOYPbKADI*ZCjWdG^L*yAIy+ALLbDcpf%#q-@m1m`$h^$Ux40 z5;S#DprcqPi?wko6$s(uC=3?{nj~T|l}6^&ctd0Y249A^jTqMP!TGf-PtOr;d@9$M%2no`QiciRQ>+Mi+#0B8$8UJHs(L@cW(ElovG^-MmAu z(^(*$1qp7GS{flP8_>H+u+W%61Oiz{410Obp4p9hw>TVS&XgVTq?pv1NQ%;N6302f z2c9zN1V%?@ro)R{-Dy{pW|#c9d-CNS&fuD_>L|hlO)}HWK~GZFgF9w5A})#HtpbQfsdW}@eB8D2l#{RAdW@a;){T38OYw8}v3#hUilEdNUrs~*N90slD?mXL`fh@AomaU?-C z{PCi?;zVEByxTXEj5wwQ{QrzjGtgp?1!=1(8r*0!Ui>iO*3Yu*^Ie2Hts@0+qcIt1 zTYd@ao*3YT@vWZFyWmniU-9c6WtM3g!4H0|=Ah;w&AY&&s(B?LNPz*sRdEd@Bo)GP z+o~5hE^NL5SqrBd7X`qhAqfb>F2n_v$`Ch_F4JSV2?QsLC}QPGGN1wr(*$Lqcz%qeHC;x_HiN-Y6smGCT>hE8c^s`Xd)<0;9f4wFoAjL1pws&{ z_??ir$st%EE(Am%TA$|!(9;WC#a`u_w23psge7mi5k1~5>IkZ}Y|0;RoDOUZXex}6 z_{cM_I$StaTf^x&rsdw=4491d9ETmJ0f@5awJkmV_NcwPXSOiG^X)U&B=iBJk!n^Z zJ#xXljpMraOk4igmcDr?*vT3$KwZ~f@K|KlH={}s(SI4z^r1>ryjOWnIaX1=V%qxN zssmGYZ5TEF(*3{Oi9B}K0|bj6(cWU8-dP0Y1`hPw4%rP9W#xJ8t;E4ev!6yQ&?_@; z__o-@nw0NfzOw&m<(IbnqUpDJNY9$;I*9isULOZY#W7-@a=*eqsvH@ZZa3P_xPJ+n zL>jBS>6Y;_XEym7_q2cV_N~{uWC2P|6!*S?&c60UOsfOiHbN^u*y*kEXjzM1)>${s z-tg|43{CVh0%-GR){Qxf11QesikWOQ^Sy@?5ljrLOKj3rb3sej(nVKsp&q37OD z3i4qq+NF$R4>Bt>kYi2*u1lwAyb5l3E5c}jg_Kjm3HF1+G#fxm#Mpu1RK}4`kTnA% z2PWire+8g|Vumz>6QAVlzwE*LjwwGq7EjHOM)KQ7+XM-b{^rnI%BM``^7gjX4>+O; zyURG~(5MmjPxO?}Frdz!@ZHPHM_tkT?Y~7<%N9*paAi1KeRJAVf?hvEnWp@ZGMKD( zf9&Zt%jCPK78Xn}8c^-)ndRlnZeI|KM8|gZ+`q7GM}5_Zt>^%X{o`d*l*6N}-{mmCtMY49P0S;0Xsxos}%$HG$zVm9y>uoZ)+A?ufn&2Z{= zthy({tU=b;98N|t`l~al(gA!@97JM_*~9(5@U)+5l-2EqG;ZfTyQ6L7*#}(3k{0+r z9ZdwiOI|+M)22LeZ`}<fRyZ#lP-ym|f>HVV zs>#lHe$w@yG2@PZ5b=47Bkn-)b41kRD~b9Xh0}{;o;`W%=QyhUS zjP?u$lp&X#*JGHd^lz_Tw3Ns$W%UHT;E})W`R7SZK9p0QYq52uR=xY@z%S1PS|(A9 zG~%M0+qX7c``$8+GA^FMSrGN0UOw0aw)bA|J_r0QH-T8$cXWWC*`EGpZ zjwu5L_)OgM2J6bt_rf(h#0Y9m%DTf$Sd-C=()59#wMX+uaO!&zTv@T_w-n5siv9lSI?oT*0x;0=K+s?rLBIG*gBY@iZZ6)W&LKmiJD zy>J98q*7Mg6tgTj+qa4rr_NioF}RG82tN;QQ}-YOR$7&d0l0w;9=?o zO%50^gEq(!*;3(9DCbf%ID}GQs4<}Mz)C^4@GFA{1mnagiS5DaFd-Td4-&5AKH-WA z!uSa=Ef{g(2;;R^8^IY2)VA;i>}u{N>{6Ju;hJ1Iz7aas)vjoGO8KOtvdx~X+S4-b znVm)BpV(WREX-cE`-(1~!ywRs*|Tq+x%tS0EE!o^IkD+Pb&u5%ZI3smMqQ$3jhDQ? zKQ;+HWVE(&WN1^}EKUcyWt{@N9^W+@Y!x#hRhU=W&|00%Yv#33 zgO?sxb^IX%r?;BZ9X4mml6h9)R!Kh!s#)amU_>=dD09ZQs>QBa8KQZon=s#-Z`=; zTL0dK&9=6@>cv&S?ZSqc%o?ZbIsYxweX=scSy$M-i($zdXIIXsG*Gm~{3LAU-grS# z6v7nc1;BZ6GyHO2epSzkkn%V~QOVU>v~-O>IwEL6^9@VREHUUglz!eb{yJJGSc@GE zd84i=57Tj4zkI`z0ewnjsfLYZ7D{wX{cGbDsnn(>EzMY~R@~Et4s>6$dHy65qup@T znBE61`sjh0hCkJmVO8g%Lks&W40JOjF`~6rAv`af7w2i4-e(F)yhTTHBi062WA!ZQ z@`GCjmv5)yZ(P*{!zHQwGt8WgD>)aGQ~b$%oddkncW53%t5ne`r)mPI1XPG&-i8M` zE~+v$0KWjCgZo)b6+vfYu$){1{u(*I zcXgw)*?>gkHpH9)OCF>c&@CuwatH)65e_s0EK@NSNJviGg>ehcpH5@lso&5F zjsfXZ`l4bme#zXuus;LJ!BC*d&;h0&7{T#gj3+=n#>lLm8T^?l&5b|zGC9%&4_99o zBRw9~sUPaG&W^_I=Zx+p5qljlcP3Z+NZ0z&lO|GTyP3^G)sKU^ ziL7C92Be8DcO96J4RacdE#-pOOw)Yn7%`CGk!$+|ciJtQ1LlymaaQ4`#>sgO&M$c$ zKA(0be_dxbF>I`cDf04=9uT;~@~Q?oT@stRyBcgw#eRllOT#(~&9ZLJ8_>QR62lfj zs&fa!H4_@5x{$yL@QPMzf#fc-at2)Zx0*M}Q9dwt{*3mzMuDYWe6j#kP-d4YxPWKD zEY~*2Cz$_SiWw!>*7$MS5j3dK^8<(v)r27D~I=<;I5Xg29#1x34? zp>XkL7veIesDIK+*Y5q1(mLa7nQL(mD8#f{D-Ho4beBrj$E3O}N_S{9+YXvl= zx_X3#q%20KVS0UO71Rxv!nEV8vQa`u{eovneh*XSa#f6-)bm2bkr%4c2IX(Y$ZtV&k7!a9)i!Awq&%Q{*nmU&nUJt=%nCfgCMpDOwZ{*kio zSRFPb6-y{_i{)f1HS z|D?P<=beid?!EZT?u%>gUk++W+eshjFE(n%0T-Xs!p6Y44c1=;A^-rqQ_w>!PmbA% z#fE-kC^$_BF%y_uQ_wzPN!yBJod;=YS|{vXS-EwxjTee*NI7#ubB|ROT=iNSe z4$tWH?xfv*>Gl_H;LQGVN63B0d5w$uCP4mx#y@Y|b#&3f6@NZsAbVuz`FB0NZS%wE z(ADW0#1s`=P`2SONVC~!qUK&43H))#g!?BLLDkNI+KSje^Q@`AaMU6V6yzY?&Xye`YY}<149UFlse)Z&Js5W%AMgULtB2A5^3zDbJ2XFR` zP=@iKnoIjbK*ksb*g*tjg0L&$KoOXop^X3pKHMTVchU4|>n%)$svTDW9wrB4uQ=>S z8YfvCn!3#3wt`n+c-|@6Hh2d{9*9~|S18w39_DBV$&_;YWvUb;OsuR2AvA~Js+JvN zIf$#+Y?e}aT0lftW*pk?C#~9xO%yA{37cdKyC7L~9-1#CP}jZ|XQ;#BN;n+mwn+Yh z<(xiQCjP|5Oib#))&*Y7+YWIZdi77Qr+lfU*=?NyL zetag{G(Dl*XDw{4$&{_OS9j*e_cRu?tV4IXY<2mbvJn7SiW2bjTxt~xE*y2DCSbOg zmF~Dwd8AxBO=~s8s6RWcf}4v4+v3Zi#M5o-D{cTc9VxS}jv&IQ6Q$51vM3750j z>Mhd}?HiOc_!voA1pA0k>DXq7ZXC6Ac0obg!JgFU+nSSIskWZLcp)N}bnkGR>ysWI zn%*$O?}}9~^7kK+S#xN{h=3TrIUD!S%@kC(6W0=>0{Mw@pD*Lgwp>_QP`;(~46eXY z+MVw&nogkU@*l>^mKkTtki3!-El_0U^EV%Ps>$im%|q4vj7+9#QvdP-7wfgVl#`zZ ziq^%Q`LfYrzZ%(Y-U5MFdzH_0w?HkPtfon=thfLYZ=bK06ue1u4@OXs6fyLqV@VR$ zP36J{1(2ad$6_~D#bI0pV6>iw!d$$>DX_zF5knBAiYxi@lI;W274ZD422fdCB^nQlvz!9x>_r}VWK>J~H1kttaN2aCB1CQ?#?jtBk+s?AFG46DxOAI32#VowYM2wB5gX z)A;Pfyzb)8?dNnf&79Dj0-?@rx0nj&E_r6H;gm@a$$+@zjK?P2k=0O;!Ev#UWtM=2u@uPd86dIb zY)?6+1LTNP)G8KBLb;k+8uKuiyozuM?<%S#7{DG=v{-O35(u&(sXbyg zx<;NgvCHmZNV=fLg^b~k&ZEZtsV^f{UtRrc2M7eSsN=@(wAzXF*=yBCNf8p(;COqED6m~tg z`;AC=`&rX&kA^xL6J9eon|7_dbKDFE@8^Szj&1j&?n^GZzpSgAyWq^FH?%n{Iw)AB zxnKE@?o9d}*#Ffi=G?9lg8d3p1jx>;f)q^X=_90L2Fkk4^Alw_P`;MBf?>B zb{SpXl>O7Y#+|&}HsY~c8$*+ofqC-83WK=-N)WQ+-h5JY)VfodIrFzGNALLV(eY3S zXw9Zbq!?P2i1wKDlbTe(b%|=9PtP+K$_9t#Bp`fy1l!1*^mH`>t-^09m%( z*#K?R0YU7=t`#XJD2!EJRI9u8ugCTFzHp_nZv3MibT!2 z@q3;bQ`Z&kJ@$Eia@mMXam(_{rS&L`GOcv|~&qLMXYF{)i;)F<)i<NX7u#FgJ_BJ&jWksU(((-!4~2x>JT-eF;n^Gi#L54F~jhTG_hUz z&qF|gOv;lw2jc`!qfub_ng{G2{09l3Vrn=k2Joy3-Pm0LyYYl=ID!ZQ;B$+w!2zdu zUgX^319cv>LxGkQ3~`|B8-$V&0`$QRG3AB|OzMR!$V55^I2wJw^o<2Gm8c{e>+dIn8KIQI`{PzZ4x?$Tl zKf3qS4%L#*zI=TYqLw%W3$hZa;;t}_|JH@|$&RikGv z9d+CHUoFr+X)4QWnX$50@EZDiYjxMnZd{+={?t;xcJ!L5mweP!`~1-u0O8(~Pq6#x zVj%2&kX;1+1K$OWkn^(yM@y*ErV{KNzk&e~o7i&D2W4T(u&x)2sVZ-JN_@cd1fURO zAA_egGbp9Pks1Iz$fL0Pm6w$|@JxcT>nG8lKimO9^4w(u8Ny`pc;dW0P~ve5BP(ca z`fn*CFXKpPj^)giP5hPPqTr=S>*v)%u_iqW;h1?mYe1r{Xm3y4XieHP_M$b5m5G6g z`Hs-!LZVF>c@$lZmcEJh{ipLa<*mD)1jl^HT+iFQv{~;Jxw6?+dODFVO1kun*~Y^z zz(p>eQwf5dtBTOk@yM<`p5Ym8=5ELVZs5lL-Q$SV2W+t#kji>ao(o!lR#4HegZTqW z90L~_$j2an(^EL-VV0W!2#bOPJRySwSEXM7VAdg(8Gtcx_b`tzL-2_wFn2Tvg}>E! z;pNEUI4D&G0M<|T)lA8>#wMUl^M6&|RSr)7(}!r;?PradQBi0yB#l<1`;R?=IGgI!l&7I} z`kp#oujL9GkAX|$NhnLM+4T0IPF>Za;KI6sI;}yAn(4^Q23MgzIxSni*IQ5)AV;M0 z_Y|~GaS4ScTTK`=ERtRj&4%(h*vUXo>RVF5O^LJ#5;tl2Zo4&HIEf@R2DAt2Ze32V zg$iG_z;Z1BbN6mY7F8${7z$oXV15+NVP85bZyu{jd&; zl~P42Xb7DR-33cDv{HkNG=5TUrfpVS2P`KV!UNQ%5P3GZs#seIEIsr&j5&wCu<<1~ zLVV9KAuv7s{IGkh=t%W7_Z_vl3(p(7F5sv+J7Dzbc}q(%{ioftYnyZ=cJMG;4KB`K zV{#ZA22Xt5lnv(#I=WEceNvs*rYHv{j@w%7G>@H7RMyBFS>LA76IPdwcDqfyhxeL; zqxvl-Yr&{g@6MRR8r_)RR_r(3Xyk6F42%19@8-ZvoT4as1t~E_`%^f}Upd=Vwe9J@wC>u@sy2L>BmZhVk zV=CQ2nSb-Vjebd(-wQ9So7r)tNp!AE?Cwo2Xqm4)oG9BhJHG=Gs>(0{lMb9nUAU{a zas8NXqfeXK)jMy;B{OI4Q7+%Td-lw|duD5C826stFyMhDyLb2EL#U#2i=wdW7!e$* zj%j`Z&s!UKj#Z#`V6to)E7VuSp69sCA}G*dRWW;umm0QX;G<*}5PhzTq1@BWnG}5B2(^g=|u1JvHHc=_)A`{LT zqvHeSq+PCFP~&Rz8F*chb8qN%I=tbA+BEmr%9ci7j#IED=IKFR+%U*0xc?Cbu zbxovG=hgRulT%nRSsNV{Hu}syo)gbCc#&io zIZ9h-w2ZNee2`!7CMa%&)nxHw% z7^5j{;qBlB#@R0nn+FDr6@}a_tjXLU2$|jln&TgpCs%pHl<6x(qEY5Zy;U{}j+C?J#nv@tE&C@` zQreTHJ{Gjk^r+;RRmvkDE13;dt6EVzdhWny=z8+|nV&9JzEL)R+_K^BgD=%gvJ;n+ z^yEp(KZ;$VGiWxwot1choz*5(I9&0l)nCQiYJC=ORY+M}w=iG!$v$hK-j?=(3C!ax zab(@)t=yX?*3$|xlXjoMs&^ZVE>SXAz~RE^NjbJcP-nj1TIkiD|MgN&wlvtIJh$Uw z<-O|=dTMIfMao@QL*ayW4Tn-Et?Z8wL*fDcs}Xb)STx?HBJLG~k z0m{*v)c@DQus<1(cuSQ@1tFg|;&m4ie@nU@vAENj=q?J%o}!4Ye{3PxAWM8s@{&B+ z5h--Yfq|yHJXwxKWb|^>VUHvnvQHTW7kp9KmPdYjhO*(^W6I%YpGULbdk<~@VD|$d zu&+sixOw})5!5>+kFIK*-DV@)(*M$P2Dde^(tMURiN50>#d>IJ2 zo&Jau?Tm*V_ORdUR;YN;?acGYkv{}uk|Pv!C>tV>iPPco`{QuN9u7LtdN}P21|1L& z7CyNZR4Exyul7R1j|(-If_ndcVEk}qvm7s_?r%dsuoDMQ;XM>KAedeiFS8(`3{7@! z1I^7mC_z*$0R{UW#_L?VG}vmxYAOo`Z@RfFIe8Fgp0LRv2%)U>d|1jkY+OTf}B2Sc4 zgx2L>c6~Co>51|>nsrq=Uwdn;bMyNv4?VVd3Z;EwugP!jTVlzp9eHAFYplUqUkTo` z$;+=Sbr%OIgUFjMUl0IGx4Cv{hWzp9s`y{#!d)L3z zKieNPdrbD?r~XvZ6zJ+6_v#tWQa>@5NLP=!WyP88!H|-7BSBPt_SxQXeZdIMBvigJII(RYXay7g$Fi?Z=HAogfmig~iYLin1 zRE9rER>={5KE9uMcjRtCG!}&`3TzQe%rMb$mnVnA zi~OABveHnKJi@s?X5;xierjaVrQmV_F{)vEzBMkcP#(MS@5&QbUxmt_xf`{tmSbQq z*0VmF#Mfp)p%Bp;bRPDV z+24WueZk^nVxscG%@7mvukA>8^BBXzg=W@sWjqk{aV{2>ZES3}nT4(Duv9D>S&>!_aA_K#PcPg zu(LqsBAY>X$5iQYi{p?rN`2M6}+;Cb0Dg^005p*>0J&sK_hGdc!RUp zN&!%MN;b|R0rm*60eMrQiM73^T>GZ-rLz654JU3XN>0-m5Rs}evE0GA)AkuzaO`y$ z&EV3dhkg5!doJ#&*0aJm<@n|i8-TQHgC5?e1($cu6woCtE}pSHvuWZhWoWjt@_}zr z_U|{K$ZNErcwt%wHX^f0_{$(_v^n4D$G-Ry((ahM#ets2BXKvKx%4j7271$es$tOo_E zs>PD=dk!I&`o_2jP7KFp*uWJ6Pr+%S9A-r&0YENbBf)45^-CZg1*m$6eej+WYiIB0 zD7avdxnO(b@MDK(e66hg&kf4ol*d=?Mz)8Vo43=X1Cm{U;wohHyt8N4+Y9?|gOYcM z^h^rKI{xsPz4!Ba)7WKNi0W?MSaZ>YI2D)~uJ&$hW#SUbcM%?YnJJt%d4NGmw&h z;$QR_|Ekf)l-o1TGIxn`(?^po zqaWRH5_VG<*kE;`Xg}O&|cAg!-N1>E|W-C{}qWrw^i$e}q-7JA(tek+5{-b-* zv}t)$yH}{Vp`_;MsLN{%jO_V}Z?2K=>K0l^(^%(6<=+)7_VC!0C z+asI4-^vTN(Vy<_>=B#+*{WQsTn|cUU;@D_`~-V|-VG9fs-cU5b1Aqg4YCbbQH~KJ zBZ`?UsR0K-cwHdsl8jaBIK)lH=~*KHUqg@tmb#h%C$n9%1{)g-yK(_T&uhjw#eu%9-yKM!qop8((VSOD zkdShQEA#^weBH@5sMy*K@t7sx5b@hQpr7TW9jqGz%_PPRSYET{l#c_jEjc6@&h!p< z%Kra-j8ReLsOaA$1RdT#ASJXo&3Wg3t27KK+9`&7h|=X`s2m@bQ@H>bstAui@%XWL z1zZ^O?wIk-H4N!gUQ|Z@v{^ZJK_x0eEUnwJz3<^ED=eN6JVJPoY z!MGDyJ<}`Jh`4HF)H3-I0hhRu^$Z z8WB{3^Q4>(fATY49Np$zCh<~V7!n7LoC0gJbg5UcjAa|Im|3yHq>F~?HngPNcJ?6; z?+hJu2J2()TAN$Q_`EtO9V3&wCp|T1_C@H3a`m0%hdw!qCJ?>`qerM8Rac*?I5fYw zr~hy=9rGu%5%g%hI?tKk7(#m;RZs;f6Ykfz3tb?1dC*RGLr8YkM$lq!bJ*sok19{O z>VvYg3c|)6DX(l#29O-7i@GB9kz?ZM*!Y`An&*mqXr2SYFh~>IR|oYvb^6H{TP@)k zx}qwlg|h2I!4An?QSQR=#!{iLFXf1KeRTE&m_^oLH&`@gv`QIZ@1pMk>+m$FeYpiB z?3bZ}>SvlCAvVy694LZHPz@S^u}{T107HK`n&Y>y%!~<+N_9X)gX|wU>5O4)u%cFN z7K1~M)Z63Q200)weBcJ4vr?f4 z)3F=@Qz;sF+`*t?Bv_OJe&82zE0q(>K^F!zu+V|V55pVW2(InJ(}E4jAej9c?iDmV zjo9$x;cap+r>YgfaCLe=8tx7<%$>u^-qn*}Xvl>;?m3e3gsK&>AL_5B&zP*kEUSFTrn{o#RTyYZHcZIo|X zESwQSj)W)CJu_8iHBOt_F@v!)EbH3XKOvasvjy`#8)BZYBNleAjpfNsupKIGc@AK! zph^{b)fbZd1!gJsO_@`0#02w$lF6PVTxLg1x!#c{TLMX^@_+bx@Ax+A`+wZ;(>kq_ zbUNv@_m(W#*7lz9-j3}!j_nL**x7rK0fdAR2q7duLLd+zK!89Qg;GWdWkZ2dN=qqi zX(_wV_QQ-Xzt{WD4)puS@A0su)7|NPr+dHF`!$}gzlKmn@UhyrK@dSh4x z_&rZ>Uo{>5>Po6ASl;E5$dpWbv?lExueuo(zL}ml{`Yx>{S9>5?>4Qk0ddF9Q59UFU>C1QmZ*9_cT+U9@}7VE)1Tx*pp-Fa6XH7ju^`+DSo{aHdWo*Un#O zeF{00HoeJ4O+X`Vi_PY@5u>ihik^8hwQRy;CLOffp+&4s$Jkt~)2%m9CWG4ky49lx-wJk?K^jR*;&z>s`e1`L2L2UtDBIU?X`ek5>-@xmF{ zX@)<7aK#y$3$~{{Z2Va)<}tj5-w^A-X$qM6Fupm&RG?Tr%L@s9Cw`fB0q~asDIw^D zX_f{)6?BjwG14hMSc%WULIt!TI0fV_zOl@;*K)sa%k)K>bFQ?p&}fRLdoo79#-UNW zqN<_Xl9AWnL{?)$>}y`vdN=emS#$P56gz%6 z-E(RIx7?vtrgI&I$(8!(x8&W&PptM^msMCJOeMgBckG@sq1fee*-6Q8X||D6 z>odK>wTxeD>Q7d5Y+=kn@Kfu=Vke_Ek}VOtO`?PhPV4~72i#C-%UGZoS554?%FpCV z76CS2SK0{Pz?PUos*p6S9zgWw(`XD#rscNs?oFdv?t4IB|DIbqp03iuatfU zh#yS_r>9LHihyzvo(_D1uHlyiG|i_?U|SED?o$Lo;p!r+Cg2~7d$3A;Am(wPLm+3~ z;GH&Yu%DNAHet3oPyYOu1QO&Eb9kDBF>ul*Pq#ev;nC0Ug1oOULxGN!f8oAg_~vaJ zKbqLVosE>Uba~C}_MJPnzlFjlu0&9!jS+>=^PH{6nv< zxcv`@k>lqLq1j80`MkCEKpE7ON-TPRP?XuRf6==AHG}gMX2#1XBdu$iI+iBm;F;Gb zJ*?SoVdh$_fHHJJ|IsTeDj@a(FcU_P)DiJt)+zpKnS*)GImZYV%t!tufcFn-jZck6co&^=PcWhXho$t3Y)=Gc3YtE6d zHyAGbV{7@1>4AAA(TJtkQWVJBHGZARz3$Of1@eLq&&A}m5F9Q71d=8`BYHV_2zM7e z!!HY`=S!HFAU}Tuo?~GZyUsvpwxA6wbjj z&rh&WoHnz~=b8uAAYhxT~*JZX6yWW;??)92juiL_U;|V+C2%C)o(XYZLCS;VV8ogl?8>Xdlm)%ko znBP?9DK$dbF=H|PW>#dS%MxzXFis}w7vJ4)bDN{7!mN&U?>V{p67ILp523Dqq5|~N zn?(t|_o37xqNOQcmEp$yZo4(zJ}}P zSk_gEE{S^J793`z4wyh!y3PNMP!g#54>?W#GRW3RK`+hAmQpHGD$$3{V{xar1fX_I z79I38FM*~QsOVfh`K@#kQdt&2A};n7p9DR}10zibZ~=o78Y&S!AI{ET98wk+0t!n9 ze%TM(Kn+lV9+&aCVmODfitu7;afq=i58^{V8fSvXfp_^$3l(83$lRS42Y7Hus}~sm z{1?;u8O6mPz@moy7E0wLkg~yX75^ZL(aD?9auomh!Xw;2xCbudF5#rKJDShb=X?OK z5!HLk9WIgBNqgyJ-9SGQ*=F5w>pW|*MsBy*D0x`n@}p?o_Dt=Cex@{eeT}22p~@b& zoV`z*@};(!=-n#~a_z*#~jmPV%5cArf(`dq=@IIE(#w;6+pHCb7J6I&@a zyNxy|=uxQRN)mm?b#XtujgEeewi2_tzT=KOAUcAwpEgXL&xr&>wfjTL#Q>_>4CE(-O zaLz+MDBTW0oE*pk=w5+mJ4M4fo?lo52&if%ppHmz+VaIT)20iEZQg?f{761{Cr5I- zo`CG6pEsb$xBJiq=)r-IZP7Z@0u$rBbwbMw6QEZP|~}j z%-P<2BCIFveps$6l}M2-6;%f>L$`eGYw}6}#*);Cz#C*1i<~c?spd+pWnrzSlr=IL zpWc^&q)aHHW4PCo0mh?ssI?YWPdK$c4;7^3G$nFMv|f+S2iv4nqZi$s*x0m*`{GPK zw|>j22_>WjYOVm`C`g5kzOK$)O7m18i8YFdLyWA?0 zMb>RbTt@4tuxJhDW^O^bNoR4;CKc^vxrfSG9q=ky>oub5oW5Y5)hZYzqbcJ4X?I4- zj2d7>Mxj-ZdZ=4ieQtyNQKppcv?XKV$7p{O_)`{88=&$cuf7dlFt>93L^JEmHll zE~&)7prTr#eQvQGx&*P^JH7EvSo&I|#>!4&gn!h-E@zOJW#eSLz4@5|{ zk0qJ7CD|8LsM!7u&Mnu7iozm214h;t;5xCP&rtZ=yR5-twR#Skb(5~KnF&&?z0yTD zg86P#Q4U2U7wG_)&=o*3K8L|5)5y%wD@bi48yg);TA}du>m|id740n=%M>}dold9z z=M_PnACTbO#MU{L9ZEUlDvo7t4SMEOw|7MdW^g6l~>W6S2w;ollC1j+J^1g)q@X~I^~ z&XoqJWtUz0(pombw#-R)R8&R$W(6r$=)^KgLWpG^FG8gyS1j@M7wPFxPxKD%?fdUW zq5IIiT63dI@6s6KhT>9>t*BjIl#_&9YAHamsQUB%jk`MMmRd8HCLQi#CBXK|5h4af zNK)F*wFiC8p`w~(8QKy{6&EENp`nw?&pqn)cFcmZv)-6LN>mstT(PF&!s_N~xVa9Bhq=GWICqVDv-RbyBLcBqSPZhksda%D>*$!`=b=0d5s@ z_Dq2uY-8@nG6ZWeP>MK2JH2;W1VHjY8Y0CwnPcX_^b!z%Ax)mWmHYeU1o8J{2S2@q zd;K9;Gy~Y3vf|WbbAuwBKs6qiTNG?}=F6JaF|+K4{c88CFB%lADu^m>e~^%i^{j*} zRI9B@r4jJDjv>TEykUbx^83Qr=&U(X3DdH()SR|TL?lTP7Heb4Y_CBfkN0!Uc9XNz zy|QX!%V=foWg9ES{-~r=DD?{vg`q!Hu+p+Cw0`2+g=vjYvXQsHSJjvZ^(rhZV zYyETk{Ui;Y2WY~wHEXxC_02X?rxewKLwifkuJzTEEqanx5R*hJciY)=6)PVeUo@&I zswrUxQRH{}% zfB+!<#f*fUguFo69c-+eQh+ttR?ax31Z1~K%8$1DfTKAMp-J^HpY}%nJ=s6xXmM^eD zXKwM*8ogoxgpI_2p}kc^ND}SpNM{u0p=qUt4XWm~NL2S~IznvBP&CJ8TE&D6Mjyq< z6*3w3>^1iz(c>Gr&!67rY|Jz@0{Tcv^69VKb%lw_vShqQtc{fVhfg^edMs7(*>!*v z>Y#!-7h>;pFp2iU+|Sabi*(TLp3(%DbeGS&%faYJ(&Z25n>w_B@T6+EvA=)@RKPFc zVtSB*%lTe(*YPRazbp6@q@Nfu2pVvrdgxC!AKeVLtpQlfh@}&}88b%UeJt{(ErnF2 zEDN38csa+i!0Y(jX+a1Q2X=sD!Dc&VaHvEO$w2A=I4aNJSax6r4+a3770U}SQwV5( zia&sHfu)Dk%cp|Lupk4G6F}Ikz>E${KrAEpz{2#olz65b1TW;_+Bgh=>*FDVmls`d zJ_M(d_^#l>{6%2MAU_b)m|*J&H`~KWe_$H%UPusMX%Ws6JTEzAdMpF+uYsZX`^1<% zylI3l3l0b9IK-ng!9IK(MivAfFM35Y5*Qu?wvhx&>Aag_#tntT-@+x~4*92R#E)Hk zSFq4gYA!U!j0=veqLzkP-QsN!V(@cc0c>)F`#jbtisu7G&8c)^xQ)`;sE9?TbI8@j z_R{%QUwI^&dgJ!B`CQs!i`K2F=u7?irqP}0Mr8aQH5Akl%vgB zL#apdTAK}0v#g2YHzhB%AX1qLLdA1 zh`94v$RO4EXjj%Q+F>&SV84f=E%ve{@1ky(J<%3s!xE`EXCWd*mLf}CO2vJs1&bwU z^`|>Sj;h(Vlm%U}q_98PJ^Q^_m(S!F_V{c`o3pKf!Zx-Fzz%dJ8kL`NMmn2JdY6)f zk|?#z8&^r7nT1lM5IGa#tuKJvT`47Ft)lX%+1m8L#0}ikp#zAxsdm{sy2f8x6q2I; z^p4iqn+OSD@5@A_E|(A+tTt!bZcwPz05%GEiW-GGm8&Z@>6;f%U20xgms{r`WAjsu zjii@e*eI#p5>>f14(}|#KIK(F{&u;Y&b1Q$?no@G(6xA>pO~5Aij@$Kj$71;hUV-n zr!TPSon1TL-3V>S{eD%`a#f<9_&Dw~=i@d<_8$qwQe2I;LDR&Q{cqqP0FC7ca2VzWzR@L4d;l0=29~el0KsRC!5R1z9E|{$6GRT? zK3HL67RAgD>^B`R7g7TSm@0wUrniLy2`kymaaDveHdiZR+09za~sdvzzdkB{H^)j&KC4gW~Vnl*n}_5LcAN$oUTJqvQZ ztu~kf_K^mc+EnME*y|fylo>KGA?s*<1Nk6y7l76ta#?frwPhUq|;dy~0ov8lCit(5=V$|A#zLwKthc}hC&I%Rp89dN^bGIcy%H&Xy zDk3P0_&-e=O6rfXjJBasr;;z$({+tN27xFv+tVgWuy* z_ADrlszCT-p#zypVDwJQLwMr@OT4>QK-2{>79c>t3c@@Jsgl7-LpTU4HZZskY4B>sCK0tWl@&zdSGt z9z!4im~)tNBOpyA@x6i2=l8>Inua6%y=1}R!l#Vl)>ORo1vmh_gZKYp^vvmtW2FS^ z>r_SpWCu5f?_lP>q-V@kvZ;?PwI%>|ki$|`l5wyfgRvs@6Gb&ndw>v!j$IDWSt>GCrWaF`$|{$a z_|O+3IpCo}Uk$yjT&{{&(4ry&Wuf1}ENFeAiaIr#_Nh0BR4MdDkr<3Q2Q7yV*2)r- z+^f-ul~#%=HoM!U&#I}UTP2eceZPb5i)LSUYqMOZI+$-S;=1-7WXxvf(0bH&;<_G? zaC)<(1=-iyw7RP9v@fA6-(0DXY8Uife5S$URm`8AOA^JSMHh)n=C0|i?y6?m4|bHT z&&!)q1oH#uKyO@2g-e=jX%*X0^G{Mlp!dPS;t=%=L6=Vuf+3^9M%HX=-3>*GdbPiQ z(c(a;C*18v7f#MU5aLWRKYkmz>w0Br_sB!NT;EjfF}KwM0ZBq;C{~sA*3LP*Qs=A>_H0QE9~ij$a=o)b z(LQ>$@qc+so88@XCX(4DYSFkdp z#%?mWX}X~f5&>ovS^Sh~7T|>&{9@WHwV=)K7J4amxZdl6enZOD>G6ui!$nmi;62E+ z9PK#6O~v>1dbs~K`R*My83B=4BxyAp#k#=-j1IswHn2m)#~xua1o;Ys zgO`k-65(WWxY(wTx}BxFYsZgtZ#oU--p?&=EDsyjX8q0-YaS@Sw&M))ilkRPCA;8Q zNyMd%P1);8hq80pLpvT`jA||%p4wHIDji?fcK4;tx2-SV7-#%>m%E(!dvaO#`jhR| z?%|^g-|#!ZX%`xMsCDwy+0a$!!hg^9_d(qn`3hO7(>a(?2WU<&itV{spAWL;#n8oJ+}FM05MCNJkh!z zwuCNfd0=(h)!yzoLk37n8Uttg^U?<(KsW>?As0h4nU@RxSnvtd_z|#Nw?K+r2~=;l z@QILEl(W2(03LaeUjg7pF4*;WMzFgb9=(v71kVynYktuIjtjvqm+{EwFnxFu1x|p* zg}^`HcgH(DfcXLYX}mxv_(@J(5}Y-e^o;LhhE2fX&MOd@WK0*>;e!uhmqs=VPyrZw z)yp3fZj;w2=)6_W!;8%%iu1Nh@_$NBB{LKSWRqOFVLJ|@;&k5)ZP zv)&Zoc{56lOktH3y7jaN0Oi!c$Y*JQNtZG<)(q(d&<}i|3VL8sc2}xpWH1^lLo#>m za}yQ|K&23o-q5Cv6~r+_${AZxk*&NnH{x=;lA7MAgW{I3tbw-C8WR#RE&w(F8EP{^ zh{@#I!t7Y!yBrR*E$`@%olxcuW({O(zGmn5QQ zmHN=b>kv`DIaj-+dg*3yM>^VKGn!xlZS9$JDfg%6O4I1NsrOYTBdJv&6ZoGX9A-3v zJ4tJ0(VMj{@Yt#>BI3$#dS|OW-l|7p9hxNm` zP~HOuRFJ~s@=I_dGD5*Fm{q(YlJE>EaB>=xG>&}1S9zO@9}l<|^D#7q#e1<(z-~wb z41g^nd^Xl6z~C5NGOQ!M`6P8mikb zs(%+f^F-6>r{e9&U{223UVC|oV-3(}yT5S>%RSi^AO7+Qd93fnxv!%azxnQkM0~*C zk75f6k!5^vvcGIT_oTHiA$j7Zf4FUFSWkS#U7rKly5jUMiBN*5&KN zhd(bAOCPV^v|tOV(S@3Pt)7m6ckUApm5|b_5j%pWDk`u0)zp%v`!6(xjgs#2c!#{}LUmPW^ zv}V?2n_@1DMJFfrx<=AEpH6H_T1Jgg7F=uSANh$QnI~#M$UWwZSzN`$)T_F5S#w#x zmrYsoX6TO|fmLvyyRkrA@a4G`@+>qT+fi^OxaSMOA&+N#W?=%`@CObiey5brQBHtG zj-|8kDuXSaU_szxvc(#LR}(+J08#R52p|>y56C(>y?z1e6BG)+vh?$mcixwy=SdJC zVFFT2l~|qdL~H<+f^|EXZg8jPcf(cyD+J-~{BDC_p#l5wj3Qi-r(^zBd4k5=hHnH8 zLO3~q7aO2+#J~V)xEok%!PZZdS6ZHJ9nX|N-l5uR7R3VEyhju)Njk}8l@iOM4tuj3 zK&&*T{+K1IrJ`(V&|;_@)rImRyQ3;59j+>j7|Irepu(D}^_q;Avr9>(wHlfjcqIwDHGs-B_N;*wO8^{PtG316WfG?) zCX-3l)<9nha71e4Mm<|%R%xP?r-7jqF=sfUgvMzqo555kik5rL;vpiJB1wZ1>Ye1O zQa_VXD8RXF(s^=9QmiD6c0DvQ-`XH54s_|8DkWNRQ3L80e{WSr=4fP|HR3nM8{AH< z(p+NkRqEr_+GvADzc%NV{eD1sE~Vex>!o0Fy8-8gz3#0?IiRp^||e4oncL!v84@DFNtDSN0xDg z&EhHpWT%R=)?~faduQzYmw!QeZI1}o8-a>b!5k5yfw%@sr4xQ1p4 zCv=S@O?s77E{|Cniild5LT5JAWGzZ1L-koDda40B82(4w6_HAt>;M;Y)48SOWzwF4 z@q*V7pqyZ9fzKTUVw;99<*AXUJfOgzigUqw0lEiz;t7m@WqIBJ*)FV>fdqLmk0}(3 zdC19{HhXvxjQMIBA|3`0czGriSOL~9I4sP62Mk=E#)UV*CK5Ir%yOOXfMc1+w=6jWHYMkoeY$YKiU07KrLj5x3HcvkgwD3RA3>9&_ur92aM z&+d@jUSDF#TH?O+I;l-IkNbrC@Y{!`?n0K6mtLZjo35{XkbCd$eY@VAm|L_xX0Za= zed>bdrk%e!*!y@5`t69LwAnbZ3_7yN^sKu>Me0~X+ro9LENN@$YKSHgGBu+a4wf{< zRYdWHR>mai$-0ZAxj7kI6mmBYC+y?C&_E(s>!Tozmx2O6wSPsBwJDUgF@|UQ=XC#1*Y9qRVVaMnl{-XLfQxo~S8xJN+tsr(R|4 za^1`Q;gh$nMcV!C9)--poxA_wT@P;QpO^)GXObCbuB~(Yz}Biy67VUB5$;zvdQMT_q)(~O#Fqaey{<{?R%_UA zwpbEI{qB}%mBqEQ#=Zi2!?u9FP9x})s!#5!F79ifG_L5f)_5P|Rj8P{l_ga?{VcY7yL|$PzFcu#&80$+m?j76{hK2G*?x6fiz(hzFj6z!9Aweqxi=Hpv!Vg!# z{>s8L$LecZh4G?fTEG|}o>amwg$4L3-UY{#0hS{(K@urs>+z)}LUYj>9V{4txMmC& zi1360Gmixo4i9DJSN{HVvFiI5H2N2AyZ>vJKKoDBTE5U;?x;)DRX#m=!GA6b=yf4y zBL7KGzV+MVXeTPW`jeZB1|>0%vdLdqZHQRZ45@|g!DV&+xNj(wvAYzJ<)W*hbEkp^ z!|K+J1QWS;-gP5r8?YKFmxPag@Cu-TT~@CGcsh+#tcw2{EkFJ=k}MXD^_^^tM6To> zsbvb;rX5RGvk_el>OB3il?0XKsMZeylg(848jX zPasU2fUN_?1Jm|CUg>(l7y(n?ALLiuU|wU>8@7W1JkPLH=Re|**~|%Gpvgd~@W*5Y zVmw1IGNAZC7fjwO#f$ctsRdj(VM6f^A=)>?wqh@W+X6vhG~#Q)2F$tqs`5y%zBQOn z8=wwPn~yqmLr;FzRQ&mK$ERSnBxLiakB!z1xGoxyUW^=9jr^?J63p2$sO#=neC?;) zD^@Tpm>_G8a({t|wI?B)z1=&@15JPZjede2@K*Yui?P4aBcaY_ZB|RLBP7#`<;w5> zf$YeB;Wuc}N^X*S?S1aGdqn>oNJaZ1k?v@6;FZPep9G{3?p+&;AvDPP=h329-bZVm zDU~VJi9d4>KSq4ZK*~ zZD#VC%hsFIDm9oSI^~knQ5pI;CC!vghw0-ZhGNS6$BKZCA# znfr3>*+=p+S-!BOzA8&NL+uPe=O{?k?GLWo_Q+9{Oc9zLC#IkYnS?Ti-W4sji7Ayq z1xEWLfTFloda6KQ-~>m{D9{;Xd;+Ws)c-=NH!dc_V1Ss#VS=$TXBixG?(X!D ztWvgpHe0s(S%5K!9{E3X_dDn?Gz@+Fy6rIN&Ih2s%MWk$z6NQ)i`Ly$SC!1vRzkmX zX=HV#WBcst-R%wgWg54B$?5|fAQj2}lCN4|S!mz$X0vkdQts_PZYjSOs$bQBQ0D`L zx_!{T5Mw7U2MnAm3vPf$G7kc8zFhEoNdGH93=3ICuu}G6PZteI@-8v0hgb{GvG{3V zJW60HcqYKS^mOYV!5jkKgC`4K-ym-|VSx+>0DTqMTli`v?m)vgnh|#JzBD5%hEG9w z&{#d<6A1on{@aGkkHR6D{mFI-d<)->aMn+KQU9}l<^;GS@+}l!dB>TT53lBqaUcA8 z>0BsCT=5>jmLC7qyWxf-#<*W&g>qc82|}6W+e^27_4dsxy$>SOtHr62fP11Yo>`GQ zeeUVW!}(gDpXrR7ErGLN$Rfj?HU<&iEA&n)_W-3Od@cEU#uBhY8>^cO^KNs(T~nL( zrWcGwO{RF#&U$W*I2m(TZ+*t>p-pZNLy}XQEC!1=WU|nxGGJiMF52oqi=t+=HOw-m zsZT9FfcAEdsDb?ebl+Nyv;_T5wCjKOqZ*M_*#3Pzx#; z`Rk`) zDw)yHc&G3Wss6B{&6ef;0wD1g895lISbR}~x=9Wqy=6-XM(oiPVowLAw+BY>z%Cc>jIRZvXt9lw0KPcDf zjH;h;ho-n!r^F+2Go`dnO}*^BV2ySS&D43rIRsTk5U-<^Ov$=!gJ{u<`~5n~TY6L1 z@CcyMBMD*g>c6#^HH8(vfR6H&)-I1whE&}gq^7l0q~*oJ=tt*@YZ}%QRoy!D2lblH zjKAB|?dS$*Ps+K>5vf=}kbpWduqXy?iP3@$4LY|^%l3Vn7M%i=4OY#)^CdE>!M&j3 zK0c*1>A-VvihJZ3{=KT3TlKVi=wB`JLRt4ka-&@WNH^3YsO2s>K6=-uUvLk73B)4Ex zkEb5GkmxKT2}we3K(7CQh^FRmt?I1xcEy`A;RD}(e#m6&ob>7uIHTz~b|#P)T^BG1 ziWFLZ|2NMR_QhK?&SV&uV1TFU(M|({k2uo(bT%7rSSsD=97q( zI)##n)faw^#=TxkGiu#?qRvDa9}FzYofR)|Ds}0&!Ed)O_L@Db0^*&afw?Bnw$Qb% zUWZZ}3WE0i6}Ml$SY|GmRWO3JF;*%#c!Db{FwabPMnA8Qc})x)cit4l&k*vz01Isz zhZZ*vhodknGf-U6c^JlHS^45ux$C}slk5G)Wwt68{r#2=J5C_aJ-2iJcktcs{(9@G z6B}DR&a2kWnmqK}k&S=7ocqPc=eVnWfodQABf8?l?}z0UWu~9IX>KN-s}Y|xo%(ERbOsdu?Q9(i909SrmVXVu{F;>~;NxBTCquQ$6MdFT6YOe^L* zjp|8-#&m@H?zO1{vaYYt-A~^B*}cmGW}`74OPE^9`{rrVF0y zw7xXfKCng*W{mvuGcE}jyw7kYZ(Pu7wx9S4u~&VIb|Y1)(AYHb-Jn??WIEny9?baf zE~=@U)l}Oe8k`duEdn4Hw$-CBTKyWOdEVR`xwW@F!5w_D<3f+ooB(v8Z&AUoyFR}B z&(A*1{qe!#eT!VHue@vX*b|80mjCVxbo}dQ(At&u6~!xGxqR08zaB4N9`J4A?tO$9 z4TgGSarKF^)Y8?rE#9)+-{HSx9IWg9lSu^qlQfjMZclB`BP$obIbJqdZ&B-LQZ7a( zw-t{!F4#UX?s2*hI8+0PRDlA?z}j}p78e8yQUwhK-Mq-W9J1TrD>w@jEikDV%x|3Z z0=;Gn{23^_qFF2xfrjxDG@u!V#}6I4o=-n~L22{aT$l|AIrGvJ&Jv!hY$imC1u`z% z^!fb4LkEiigiIRS4KRRSQ1CdT9k###fbn>*dm1nGgzG@GiT7JVk~=Smrt@%s+xVO3 zz2|VVfTAu$YVmH&jW_@V!|_*$i85)qI~q0KG4&@d#Qn999Xfp2djsl1;yfz5<>(WJ zWPILQjG{Yrj8fPuY&vIPI|4jSmsUl}O>#!7wmqXJ#)Fh)#l4|fxHeIVVnG}E}=U!~14QRzow+;(pjcllHy_cybt#98g2OxX+f zpu?iG^$e|T=ZdG+aEIMh(O_663xXn{jA6qmP&yKH1M;3gfA;OX(a-FCav}JomcIWU zpiEStYtGz@in+&ciN!~^9((>KjF@j$6L;$E{T`#SEQH1k@e&tn2xyf`+Wm`Oo7J0g zIm(bhUC6Mmri5BeD^%P-z~i=dHQT0!Y!%x#Xw(r$$-YGo{OrulSAQH2>6)5UQ};<( z8k&zaSd1D{)m9GJ@M>^P!0%CzX5V;)D6qP?vsSDxj^&U-D}%IaY4aT*ycH@+U+qD4 z#KNY!``1dX*&3HoOhhXdO)Rq&C<=CS7s^inuDlnhc?1lABLyEqV@*HGpt>156`KPX z=oVxy*nJ>PKq&aKnD#Jzfys%F2y}E_JP03!_~3ae1Ue?BX_}W6d`C=(2MP2oG{}UL z@gUQ*j{`3_y#`-=nsUL3fh84u2k#KJrr(4Q=kJHVE%=1*|NI>qY0QhDOGIh^bncng zbN-8hECdG(_Qya19PcK>w~Xy*f$;Gge-v-PT?oJ2LHMWdoL`#B_i3OrVvW2i`u#BN zrNU(@1y^J;kE7fEK@|Shl`(qjHT&gZvl%-5&~}L2&~6R*G~I=)Ua2xTYu%O$jDTCK z8x8ztu8MXK%t-{fk9|JI0zOfZ+v-Yi+Io$$F>C0FY05ftw9?l1C_@&lBRx@9SVt)& z6eL5MtI?&_4C%9~pu(@XU3m!=bdXvZ8;*J_12!gVAgxtOwG`?VN;KiRCCmOHQ^>&a z&OZOn$zrr0RoYDvtw!lqxq~&VCJi~5ltsmekf!O%)kDh{57@LSr9(^hmRC)r-R8K_ z;tN~|%oSLlu-Ge-%gx^(QWf@96{luZ6Q5b)P<^j_$DOCAE+xfoy;gKZVr}u)J)@_G z|J0*ax@}VrwSuaE%w$0I1~3M_>C_FO+QoS=1?h%ngP;J1Nq+N|UXy;1```6MdI z?YTXR0`A^ITgnNbp3cHXXu+;^ml6Rcbo;CzREb^Ed#EOu$WSX;!FdTW}#7;w;D`mvxinu@tipplV2S^zo6iNfIqKD8b@7BO9^5aQH=-Gw8 z3~V6u-2}d$$>f9~AnyulpG5po{ywI8g!dFrvm7?bejE@p5C$L0;$Oo{6c`-_ho1r~ z2q>U}`-XSii+;_%O_eOU40)~_wu*s zs{b@>+tl^Vu$(Gi*i@`DOHATZfx%*nJ*87SZTcPvxayf*joC@gh2(@jh@LHJc3i<)T25T1I0BqF0yV{OX>ic(@x) z+(Uo<8&|QBl4-z4X{1yGFQEASP zfJd+OtxP)AW?Bs}2HkE_F;t-@WFmTw73YOCS;utf^3oV+EprWM@zlra#9jcQdzCK0rpos~Bm&{Vo>zf*dpNKPA- zy2i@k!ZOM z%Bk@y0ZWFWMtc{JD2N7fkS+mEk(Cn54@MESZpKNXU4F-kY=x(8OqkS`OVu6;>0yM4>fCAgxxXWvgsD z02KfjYqAgAHWhh5fl$eG_Ks7t#p4&(7|KKa-)+*%=ahELBUFlkYFKZLpBs@sBV7kA z0+Zl*YQ@WX@HX<)KC>jnO@gor=HW0QffAIR3qroUlPHshH0<*%D8)Pn-{pDana{Zo z@4cS*h`Z(wPjM$+0Ly6L$pc8e2g>qp`T8$iwL`5x9Btlncq#WScmJKwU*0$H!A;zM zj!i7seqe0=Rn6lyH4B<++SW-Di%#ynd5$S+d4W6j)FsID?6+w4v98`lPrR^uaAV}` z_q&(*xi4>6draQ7?zgK|1520oj%-~XZEnw}$||z3%zmA_N7O}mJ!8=zxZ{LhPjW?( zbhLBR1(CdczH{gGJ{?hB*}TF%tNWspZ9f!h%|6^W`t(7ws$=8G)>NjcTzT=L1he zpo8o9xny2$cz%6)k~{GX!TomX5iZ?UnR%!F;qspQN0+`{boG)4&=wf%^@+kjeZfqNE8rG=!XpvLVtTr zB7;2r|33hWACSldh*s$1h*u-H9Doq<2{RzL@ssBlH!QE!SUj4LmI+57sTAuid;v^n zl9`1+uRrh}7P3ZRwF-&VdeOwS#R_fXrRrbH9X+MWRKQ4gs|P&6mlPRA_cpA_DagZ@!HjkP}_gpQothFa5B@ba=H$WnQ-hiC@5dGSlNVnH1lC8KwrIBvQm)zIbbd^EHcxpl= zmv>Lxprn-R%aW&>g`Ks*#Qx3&wsdUp~>Grn<-V* z=xCKvQ*;7Vp*Qx1Pc<$(=|;3dtF2m)&Cc;ukIm2e=M<$E0Pc*Ed3nQ&pdP$xWhmYd z-s}u&I*W>Gq#~bl{cGIeuU4%72z5Z*LkgkeUy@(QE-Q!tCT0uo7`Oo{e|V>!uw=xc z4t`#zRpxmPpxLBRY~a}*809?kLn<$S`t%N7cViP7{0c@u_MfIESes(aE=(r4k$+Uk zJXb=`Bw;PVYkLBSkC!;)7f(O9e)wo*|KFD0!~N`;(d)Na91?=~>e{=%s@ZXGNbw=^ z-w&#F+XYYFj!5m98yeQ_Jd*dE+%|gO_`i|Q?XFsI=j3(VIj{Y_od@10zW>v$YaJt3 zTy^#AJBj%geNf97tS>zj|2iU*N^+a?iA{N_tmwnJ^Dg?==H5pi;Wn+QtIFgLqUAr^ zg$llW{`&e$=7Qa)TzcV#wc%f`yyjjThz03Y*0!`?oA_cE>T%sU;6E5L3=~6sCc%0!}ZR~(26I!LoFV$PN8PJ zozclt+{u5fUj2`Qec=KLVc^C{TDBS_TD+hXp!~-`t}*avft^4+`O-lj_6ULz1ajL5 zG7V&zScaRDU@}k?*8qSo!OwR9L%72pVVuU|; ztL{PNHyuZ%53HSE`S*j@eDJHsFIa{8mUnL^O=j-dkH6y{QLflhmuqRStlqb%I*t}1 z&*$jlM`P`C+jesIavL6djeGN6EqBB*uR7ggb*eQg*^`d4N{6|+YVV8O*pp9k-(9o| zNq_Y;I{8#Qlb+hTcw1pM@&Wfp#QqJadpmcAJSDpwqMn`bsMhoCZwq|J^pB4WyA=s2 zV;2R3kUx=vXehR61tRB7P@V#%1t6HfaE1MFlz(`DmIi7XQ!dzw{4)h=hG;m(2=;-) z5u2O%L1(d53{M|+WB`8iyr@h{wh?HU`|g7qKCcJ|+k!P_LPJdkTgj>WAYgiU(;w%r zc%_ha!5mhtEV=8^j*Ga{cXt}6{+#{riq6f&&b+tvz9s+W?s%G^9wUBD(K)SqO`j#Q zs&Wv@m40u~U%);6$fMko7YrR~T@_oF8Tjq|{Ya!geMz{^W@Z!DzdBVjI9OgWI9T!B zmBi!)b=j8o3R@QPxHYw{@wN-@oWIsnEkj>y-Fz*j-oDFc(Xev4y{p8%rZKVXYVPv` z%l@!OY_Ykd(%ga+sYuSUTbjPFQfsRGbv;g%MrC(FoFPl=T`sq?Yj^pOH+X1ZJ2G2q zOvxp{Ra!$E_kK@VSr7h)X=1p&@`bYF(0b(7f?pJT1MX21b}Rt(7D#xjT_A7lp~?ne zmS~)5z#}j6WDd_1;?+rzFR+k@N6kO^ETBSwZvkE*j04;O<-jZh+1i@N~k-pvidJNX`HT^MnF+gAlg` zueFGUv3``GEEawEBS=ZOCrl`aPxjy$k3Wc!7=k~tt*n$9=RV;6WL0hMjuXgxSKYqLR;?>pmQza?9IMlXn3~E%J)vN8U@!rR41_{bcYW`n z?|TVwvyOcZz#3Jnl+tpdz1Xjki%?l(pN_tKh^=-fdub>nXv{bz%#wa+GvSX|+G3ez z+d`KqZ}3!U^@i3}XY0-oYRJKo3|#P^y}#xDJRr6!AA4-7wX`zT;%kiPL$p*RF28PU zqSlcOT1`3?W09z(WNp59twrN7Tf%+OEB|lr62B%Qotxdca!*}2*Njhjcg#3YiujGmM}D{(gcy@ciT~JP<9&GUb+`mUg0Pz zN$fi=B`qn)>9=J}(AyqatcavZrPAlMYjhWJ*KhdfwZFf)5`{-R#Qk{&k_L4y+T)9L zih4G>B@-FwWLKChX{1{gD%pI(Q{~q=9VM0Oyf5b$_12vIKBQBrYCoB?>E6Moo}Bm5 z;=m&3hWbbZV9J$JNhnfPeXzORUvAR{x9TE>sL|&yge5Cu0*`=Z9rw-EmwXZ-z9ySA z*^Jq-AQg|(=-bh7RfI^?iEWfsUm!t7ZV5DBdH`~RP68&d7CEs+jtQEVK|m%3ErG=g zPdOQk4Gh#N0m~GxWYhSS7dQebX2GM23741T#ta?;mbO69*r=K&JuKj+hXX>yn?Jtc z%kcpvfjqNWJQ`1)n6Du3s018uh;{jBo#1dt1*HKPP$ttsbrm13##iBKUT6n~wF)mf zr!NZ^gDrf0-u(tT9@3p0Jb(b+J8dh-ct75>HBpk0)3b7fMkj?H1flD=r(cQoM@Bw* zTgJV6Tmk+YM@h}R*g!y`He5RvHIf!fcid%@N+nHofq_Y>Xl{1+nljh+ULw6J?Os@C zRoIu7X#*4n{s2q}w;|$4+gqKCMO|6g5mahP??MCL(=-k!=YGAVdG#{S{6PDNyE7Zp zR;5)93SWi(?49#LUb&MJg@|>?RT7y7)pdNjJ~dvF*|`E%o&@fphxE4r&>-PRGT>RN z&c~P3(^jU_VE0y7EvERMi?pl%w87dwzIGeeq+T#kokUCvlxILvw^{t=pr z>TKoC@->BXA799Q%Dwwg8(?42ho&^ozrA8tVr1S>Pi2zS*EAV2K}k($-VG8dW3bDR zSKC0?+lNG{g>7nSbA`L2%s}Ry+K{HONDRrgG9779EA=jEFhX>cnM!SXC$tDtXXgW6 zt!Sxr)h8|Y45?gYVO^o&yVYndYDB65d^mnK(aX*s1Z?OMyHf0EDwEjEw_i+wjvNtx%3Lrog1sW)*OB6-K($J z_jl7JHDj|kUbb_hs6bV)lsh1QN)~_s;G@6-S3$uWv4cwBkt}{=1y&JG0n+Cg0D5!6x`Z~s zqEQOM`2XP$taZSN25S>o0fIsj2;M=^*iR)mw&8AYJz5$8CjckIozG-};~N96%0kW% zzBqgZpBey4hpS+|!NqVqcR>CRd=-)c!3ySY40BRO4|k1|`tX?G+~RK-egg(R!u*N3 z71BKfrseNK&^i2=!bRX3!Unv`{K8M>`@^u<5hn0GzD4{+{3Eiwn;qX0Ft*^K6Fy9j zg6|NtD!zUGq4RyM;gc`}1_T>|7-98%9U$mjXaR-=i<1E53~&P;8Jr#D#YK`={6L0y zyq}~5;JR429$X2YFu3EMn!R7Lep|k3XU*u&IZbv(QXIVY?ql3W>E4le}e2lA$_#Ap__Y8j})=xsw*gCUcv;jYwwE*Nlp6N~K!Sy+7Yo zy09n8WGv8A%@dp#qg?>jKwNZ>fl(rj+DsBM(dXN5&nR4Bzf>==OVh<|xjiz88PZfSn4Y+lr06- zd9uzPw^}7B*;Fb)i`Fmiu8tTe>et};Q7D!okwFI;A9g9*QNl(;VtGxR@hN5XzB0GY zru4<^;;>I5wa~VlSqpe%1^_{zit}O0K{8JL*%j7c&}5y}P}NeABMfGZSEbO))#9cy zu~*Y+ZYd-kl->sABwj1Xak){gq-6kMWGyx8%ra@aNLH9n`Rn7kqT%XqUykNd$$VE^ zB1gz5mmM_&;cw zSE$xmjI1mZcdEcbfLimzYrJLP^qjTGNm2UZo#^Oa^Oex@>$_VwUZinSrLHyOc~o*) zt;FSVw9B;mniW#NT5k&xQhz@UD6pg&l5PBUQl<7vmNh_4vpOGhy9u0<>+stmM;r#|5|*Ff%VUP}%mon{=R zXqCZ8h~Pd{NT2C7Q>^RBE=ywF&Y1Me2VJ(n_)cf6Oi|YP%{uOnicv^Cdl@isK3Vn~ z#A2NVhAiMgL7@1H8UU6rAf{j-gQSKX1p(XGD1b{DJm#@VUz8hw6r#)}v5ZsRgSinh z$pSf%OalR;1cXsZqMTV`;j-d2ib4y^EX*J@vkU|w78uxQm&ImGd8r^*hXNgzt(agD zWYP4>4G?T_vpCitw_Wrflxo0}VF4sdOc1%Ulr7y%miuGR6YE31eK&i7vSW`JGU>7x z8w|RH&Q&pI?xnr6uUI&eak@uGySyet$hlo)O`e_jZmUW3@a@RH=FVXrD6 zMq39Lu1f&tB>&Zsk-Lz?qalHX>tGLn%$BNk9B_NFNJW2h@Va04z_<*tstR7yL_Bv5 zWwqV&Nu3IUx^&~&T&^plwW}EZtTyyzDl@X*Tw%-x)-JEz(;R;Jp20D#+m!FlWk6#V z1TW7E>c)TEM(kE_zF!=>>J-frtWFnS>Q$-QucR47s$G*$_E!xEJh(cP0=fD40&-0H z{dEREBDz>D`?f(ZT6@uf-;54@BJJ(zymFA$s#^*0PJm~8H92 zg#qcJmCpS*6FJL-IFd3ZoNB9JA2Wm9-3@uvm^9^43C-Vo=Fz0nZg4U*WcHQL<`MDU z6;7R?Ro8YM>S&9${b`6tzdI*krxdX_!y9)csy8-C*9vxi{$JMazP1UHa&ZEfQr$Pa zk8W}%9)0GPCY8!Vm)qSUZ#ItE1)JKOHd7>LSW^yxZ!}}J11Ae3ZRR{+Ij#e5+;wFy zm3;<;gf*xeO#qVV5?Gtc9R;ho8UipFSp`ED`*%19sE%^z<;BUpxPmPH#!FQ*HpKuq z6KMIup)mA)d6L5`Q0V-!gUhT3MF2|i+y+ZTnR**kc{mpjCwc0X!{nj+eOnFsr@|OC zSC-BTW4o$idX!*NX1U{o%MXtoxie!%7q~q9FRv}YDr+>zm2e$AeDV}*;Jyz(|7)tp zLqRsj;k(KpU{aU{t`u9qtMq9@EmmFpc&ts+f|ZVx!UklSPY78UDXQziB zJT927Kr-aRiuaNWlTZHwztVvn=Y3G^ZudDd_esYe82mDxX-H6%�p8T)6I% z^>y_~d->`=wRQ+VOUGn$JxX;>P_{i@d-dLg1_`>)m%umF86w=nKn-oHb?b7*^uhhRBX)L}F`s`GZESJe-AY@sE$F6WQ8am24L zyJ0w7@3E-ApG5y7ic753PVaz235#v5pmJ#JodrfCS`;RvQl7pMSXgSD4NhJY3x&v= z;(caYqyL}u5tcc8AT^X%uiJZ(^hIkOv1JWVZToC|{f-9vg8QEO^9>rCI^1Sd(H72} zb@=<${tMT<{;OLiPLEnDZIJawH|F;r3cwVJ^k2MBI#s9uNIp|r zzvI8>6Xy#gVk#>=U}xoNfTu7dN2)v(FO01kZ`)XRF3=bAS{=i2yoEI7Vj-{F;&8FN zPHi;>oUW@E{(7QsmE@-=(ME`Vovl)Iy4Xv;DZ9F<`AufbO`%5#qz}Y$9fptyldx3>5&UVh-o+zZEs&vVG-zmNeO*^& zTf%A#b)KFy=*DR(uml`x3dm=yqFuW5&aZceB16c1WjLNyqi(fB&}bk-E3dWrc{Oob zm$dN)Wgo=!-y!{w+rzvK=Et*vMdlJ<*toguB{T<}BZI-pa2y7JX60~b9gLeZlN$_o z2KV;TR0uJh(*qhGF_?`YO^U~0RD9=H5l1GYYeB@|DGq47jbU<2$ik4R_vSec&7LTw{ zf?O-OqLK4UlhShOx)-HWzr24_##K?Z@Yo(?7{0bpJJwu{{B8c^EWbZ*Sc%GW*H_h! zwdPT5IBdu`VbPvmS{|!A+*r4=r9kZcMN_$SB;Tfq&C6z_!=c6bifj!*>5|fi)g3~7 zE)-4{+O+4Nl8u349yldn{?8KTCzS?gM&gOLRXcD0}LKn+CsiGHzt->eQn9lrB3 z6nO%Tq4h6yUbBht1}g$Ia1HnfqN$pUIg^lWDOO{GjEM-voEa~`$+Hw30UHr6h(*|V zj;u%~92pPT8+Pvs22E*mMMrycYp^SjTT-(yJ?qe(wSb(gtFIktvgeF0*Oh;jc7FJK zY2&q_aDjq_hmkkuB{cA(-=jnK>D4MFi5|Y{0qMgVgZ*1}u3oTVn{)l@JCOg@)^$c= zLFd9de$ZOKQdg}uFij1)0Ikz|%?p}`Z+oPzafO~$l>?=mKjqE~MTSmnViOV1Yo#}pwdgZbQ&x&6Cr#-~tQ@xRP$Kr}iVrWmT z#>(+}C0r8nVMtF#yvalM z-^z0>46Guj0bQ9bPP-6ufuS`p@<5|5O~mq62+yfAOQ$Q~$r)4#+4vxv6^e5*);(nH zKpqXS2^I83aUeGjH^Bw|B2`P@pL#)xyaj#n;_SLQkLTKQ1NQwWJCe}U0DXeM@axqAtWjyktLFaBGZNUmDUNDplo^)E zGi(D*&~$-kS|~mP5Y|=_0(%+hD_%(^6-+x#(fuqzLmnMPmx8XK4CqS|RKb@S^%oF1 zTBZWi>ITwal5KQUM%gf-}*Bpi~JQ@Ksq76kUE z>4{~!{TwdAPbi}@r)@QOe_92^Pxc+0a_P?GI>j^Pl42b^dz7?R_S3lZ_=~-_ce5g^ z0*Y|^g9!xOm^3--o{<-&$CD0QwIe#}H&~5^LiM=Q38B{UL62T*Sv}>k9CejQ&}%U^6$h(UEnyyj7e@ zxd>(f# zt0En)6Z4%u!DJCctJ=gF^jmXAA8)q`YOls&S`x5W)-JZFa*2dEF*+nT9K!JIv&BR- zqfl8EoCPV>hBaDP!}*r-K2sJL6{3CqL@>kJNX03-l#uY290!|(K;;=OO9?c2woYcvYK`{6gRdU3lh=?1cA&i%vfezr2-cKlMU7?e&+J z`=>r>P%8oHt<-w7Ze1A*M&;+YpDKO^D5nb`s`KHpH-P4BvS_VBtinnFXP+5b*b&GQ3 z%pJgn$*jZYmP8cy)8psPzs%`&FoJLK1uB{qIx1t%l<9uywmTw?-Zsy%`xhXBS=2L2 zQ3&60QN&=d_r~X~L6_Zi?ni43VZ+roef3qe(qz$^95JUSic}uad1o*v2E|L3ZJq^C zbMP}rr{9^#wt=E@yMEr7-o!?h4JY>X)D-Nix08x&yScmmdk1tiXxZs~b|l6fu^K_2 zSn|;;FG>#`$QKrc&)HznLuB^aPf*Y6t43_Lc%I-Ebv7?u-rl(6fNKWdSjQY$)ASXdU!?Z+W@5EM7J;^q!(2**JYB{>F9z3(JQmE@2ao3$=!juFDrBS=-YL$0KoutU&^*|x zik*@v`qA^Sho_w4ndJ0Qs0_(SN;P2u#3vU+58!fmjFt{z3!pq4OYPuhurm%1H~B#T zy)ivlaU(?z@!~_t2VhPwHeYH-W(SnbnsDoYOe;n~g8fn+xw3q~mo`t|0GtdHa`EBh z8_So;(N+JeQQ3S5m&JAA2SN>K`3XwT*&T4=hyPdk%yVb9|DEmsh4fi0aHU1Or$*5L6}QdZupi>5Rz4g6ZpKFCbIWjNRbS;)28)!(yG%2af3 zqH5t%&_xFkVZzJ&6Q3#-+|U{D!^wo(n@ZjQYW`+J!$=|;+K)hJ0;>QK>GX({5Z<+)`aJdgnErt#Gq|@+~ zan~Rh$o(VkK@Y`?okjXJE`c60dHhW8If{EqJnokMg8LPmT4pF)DJ|oE#+;I;RDW4H zMCEh>C}*imjDHw7+8-!;3MSU~%l-+Rbyk%4?j-pgsW?MY{a+I7JJhoOmsFh5&UYw0 zisjP~95EvT#7zsayTyqQknN@E3^YkuRu@kOL;8$qusD+!rwu&s;Ty$q{PG#HGp{sF zmSnwOev;|QHB(6$Wq}_$qiIgh!7|zrE7S7S1JfKcUL6iqSBD~n)3>C&-el72O-b3b z*ON+lylE;9`=HZ!ljJwkN54wt^WjLoCdmA%t0L3U0r0G^mvmKBc63x$cCyESHn0j- zlT}|oQyq)c)I?&{?%riR$N#KWsgvgsKU{l!UansH(^fw( zPR)tUyL92${u-k(5zJr z{^a%n#S#1rg?zLzo%VRj%RMI_m7hH2l`7zI@xxO}{M?|WJ_|L$!x71$C-B2|bycEy z@T?VGot0mo1Mt*fRW^_*)P^Iq)iGku6&G(9L)A!AzqM}pPnM&8=|@+C(N6k>^z#W~ zr6o4Ex^KX|D4aVl}jwi>j_%Fc;kdQ-f`=%+k>MH5H zg0V1hOQ%7tJ-M13k2H5>7LE1oiYPUODgD65_l9U%r?OZlkDxgn7p$Qv?*$zlwv}n0S8@cwzo-$Onaq6GkC^KA^2ioqi|64r{_a!iVK&Qa&*;2oTK@7{O z5QMSdm~lwfr7bJ-QhRt4q$y_&`>Ty68fDI2zRVZ+*)43wrXT;&(d|L62L6yh%QGBLvF9s#weqgAJ{dYbF zRtC9TP(C1EU*<2HR`lUYp!CqO};SoY4-&#dxh2+w({Z3es3Y#zom;d zYr7f_{&Vl5JtkTqy>Acct;kr_c|k!PNY{B?X}!@jlB3jqpC?FGg&M1yy!Dp$kqb91 z1A;yctCP-Jqi2I1{zMYu6kUO(4UJK?F3hu}Es+QnMpKG*rB)8G8(Fce4(N(z_bh8^ z*_Bf=IJK_PG<2ZZ+vHy&3@!vL%`!i~kiS)#;v znq+e5Y6%R&zQnrX8aG^a04mqVa*l+e{*b zMdUR`zi21bB)N&S@Szs3HGtNz-1i>ne&1cCR_VfSPr*5U1zJA$=-H2Nw_4K%`}W1V z|9$mn7lG!q?9F%Aj&=CUN6t!fH0=*1%5w_B*IZ!LI>Kq%&_WT44K?Y*uH+ zDyc$q%BP?-tjVVpn+jUdY4f@0We^pJ0Fx5B7*gh_fWYa_O6|$Vd_e#W(HgDJW(?{> zT6e^fB54!fXAo;Wp+QLd(;d((ryPxTvq(ek3zF#b1Ps=xSCzC2Vv`|xqS0ZFn*~@G zxT!K*qCu?%)R1CsqT2M;WU#iypv(F4W~!ooqFNIh3c7=RqFtat@To<224~Yz(lj4e zhOBu5@74{N%Zz3HQ>VFinM|1-mfw{iM6QB_wEqLyo=qr@TEIHI5dn7qx&!?XmfZg# z97GuO1)OQu311XN9cAdq41L7mQw4MPxnL7Bk@1O_39!Z2d?vEq6X=h7)! zmy6@KL6NO05+uXWGvmp*SdN7P*q4g3s<@HC06`EjX9Nvwn2a7-3>9SpJ{A0D)6%PW z8vKSl(r~+_(-lU9uOZ6|86GbK4dLpTdt4RDL3aDB{jGhJGIFuC;G^JMW_JSCqer)!t4RBS&8_c-&$ z0A3{D_jDPU&A+oB{{OXtuL;D9ScGF~S44Wkt>edp%xd_1!fPz)L({j6MI^Qw$S)Ua zn&2zr6R-~y$QnzwK=?Goe6;*u52JYdlBNHHmLK^fZ>w}||AD9DGs!;QH$ojW-M{~e z`=viUJ#zZGaNR5Dr*~HWPzoF)SjRP-mf2WgR(EnjY$Jr1vJK@^hnb#!f>1PDNuq~U64f+KL``v{RdGGQ z+K^UFvF(H@J2u&@W6K$XSkc&YL$WudHb81s&LVK-^oj;SsmnwMI-7ih=)^)ntLGQ; zkk#WpCC~GXEZYcfAy}O>vFsz9TFt`WCSD2g@{JsXxkDbuCJx9Ho3XpTnNdSn!d#ZE zSMqAM4#Yqm1(Ed}Spmb9r|LMaj^b&7X#jpLx*4kR07;UIIi69{r=Jv63)P~iUZ@g@ zAf$BS=cu(>_2h5BWUX4j8w~uoQrW0f!8uJF*91+elpj%E5>f~P%E)En)eg1K&@iV~ zX&~*oXs5xbvYL(gVC4|EL7-EdmL{8y2f9~R(^Mm?HaIKXrdci7I`CpH=RSzLwQ7@v zej6?M*{vSw#DA`rzP`p1y%Pk0K7G^mk6f@_IwjTrF!S}%SCQeX1+4Ao2QI!*`t*6} zVNTONX6#Lm>(&X74xCcyAhohN+C2~Q9P}G%a~gPedeY4ZlVvLi6(PbqWrUQqVgayN z8-k)38SjhFLxRYwGvVxbgDs_pykPF=KrCu*>fFO z^aGHK0H-P85GyJchBK&{uVs{K?fRf{e}*I)QhW`|vyRnhYgcf5r%t^QT?*K;Gsh_YaURlRd@L5%G0;SA_L0`xYB5?IaT6&z%}&|s49?OLHr#|Le8jbLmvp4!3* zEZeU^bBFjj8%2hLj@j8^8lKyqOD%RJ1mX_8T>zAfg9na=y^_e?!8{6dC=NMTa(>xL z;8404B9?vtt^^5rKuN@^O&$ibLLu8Du@C@N2J3|;6E<1OhZv9nAl-_kP!)>>fPG90 zyt0|%O-47E!A}4y2v8pbvW*Zs3reyNbW73*b4NJLVA2wKB?k*G*%2w*P;uP0nZ$gt zVMrGQa5IosC|sibIirFa83u41a4XETu*F#HqI@@H!qeHq%nXKW6cyMoo{KPLbQlM~ z`GHmo^!M<0S~e;+cw4=GL&dVRsg}6bl(sq?c|K?s9GXCf*_k@|u|X8J|7roRCU<7|Db{&6JuWbgIY z?_z(W5&j17o5BY-cI(7fc#BY7ckrqMqZfSKYk#%sGUBjRT5a_<8qu;ku)CJpiWC^fBF1anEoY&2uo2_~k#ok|MTXt`RC6kpq zu23Lm&F;pzM`+HW?YoY;CeY%lt`5vf+mfdT^nyY)@ycSF)a#eMrI;-su{jJ$pIdr0 z&aeNVgmB{iTV`u?dZD#p8}&0~$fV_C4@u9|QiSOxL(mpww*Brr=>`--!VUSuuSj2XxnAdERwW%<`Q_8g)xf!`3K=SD zuYCC)Zt{204d)$hy>!!q(&;XzRwcAV6hBg0vxgQ%eY|ziooBfdNI?v5AGv7)^vZ^f zRiQRdMsebal`C#1fLL?Zg#%WWSsj`6AxkK7xz?LTsj1Gfh>al6yD&9Wi@1r~^Lz^s zC9J4f(n~V8>;zuHMc=%~{aj%H5D$$WNrY@ly#`v? z3}Lkbh6gOKVC{nOm@+020>#5&vN-n&G-x*fhA2+a5Uuo`=u14`%06sxTY^3%qkr?V zE+)4OfT5q0(igNzyW@{nzqC4x)-xfy=wYR|rFU4LXb%z;S|7ar)>~d&VHZub^aW)R z7hLhkANB-!6N7qDKW$Wn4u0^~lcM?5h- z1Yvy6j**6Wo8~CKM{ueKo_lJRRpdXW)TVv69=yVvjw##c*NyI&+dgk2b)jNvE3cXR z`|R-4Fk~!@7)0Xysf!^6xQ!7Q;=$qe_gV;~;fX6~m1xbrvWP+gKh;M(%0ud#RbN`g z4=4rtquNNQ%M0HxS9$>ZFa0#*s5OXHCq_~-sfnGvsPXDG~r0{IUSP*xFkAY8_0c5uO zO6laypG((3J+K?3Gf;7bE6DTFA3W5U2ImDG8v zhrYM<*4Y$~dK4-ZC&foHi8&+b+8*NC!K&1pp{lx0>4yW^%8^+WwOwf6S%qBfSsCef zb!TNV<2BN6wfoBb%Nof6?~;oAXz;uZR@y{*Q_jLXwVEI6n0$k!$2ywUlM0cA1lyIa z`ZEl=d=fA|tS9@}HbKu&%NJ(t|45=M)NZ^4tyc zS8m@w@b-?aOXhFiH;Ddv?ggXs&%3biq4mzeLWK_nh9-~DzRbH7;TX<0m!s~fa6x54VN?y$9xH4_osB}B`E9Q8a z2FSQyDtiyo-G2#Kq6De~B+(V7D9bAb7DIzOO_>|JPm#MUiyYLWTsVWV^;!7xv{^AQO!Z}K#UADgA z`+;`hY4I3_J1`mbW)r?Pycnohx;c4ZV_{SjAVK_yMR1a$=xq)clWyU*ikAu(&&J$(1<(Yr?6e%o%3 zXP2Y6fA65jZ$5wYrpmR8(mAf9fQb59IJ=>|x{Wu8%~=8!Iw)#&bwBt)H{0D?5~x3` z=aiO+YVNvo&Qn+*J5jE+L8&qsaH^p0ie#q%#LHEthL*tw!ANi2wQjZ$vGA%1BL`4E zV}&`|>_55W5NWYx=Z3VZ@I6KX9bCKEW3f8SHjDIc@PS&r29uTe!K_XBzO;WZ5gYJT zfmvzM+?txhH70?JMU!DxZ7LkDYwwe?YHgA*3K79bB(-XEr_~>}_FoS4YO~6v8ts80 zXJD_#WcR75+cOgb9cvT$`7Vpz%d6uqUcefD*z1L07@7daI=hNDMc_6@8Yz~obw^q~ zH9c(wTdm*Mna;*TW59GL|C7;Hr%zm_Z*iyV^S!3Q+k~aBHPNF457xeV#M%{MUXH`>#nM=UoAqS zhDAGWwVHtkT{PGl9af#$YS&K%&RzP0R*e=i`)S)B9o<#i)H}Q}VX;To&jp;%n>s;y zM9}F3G$81t?R?DPG2Ys4Gme>e=&dbxTHLv6uf=7dX7{)&nrmx9&05j6JZ1^mbmq&! zP^k+VlGaodFA%rVk@$Q_*c%VC~1j}r;?FNfG-&Sj0?Gy$U)VP~^XHDSS zK70180`Xb#DfRZ0-i9C32D1666a}Y11JGhEhOC%(0TI$OWv`X}R>ppcD2Qqy6nYk@ zH?s5raU=7Fz@&}^Ptm4>r3AL6LnJjwKe^r+fdkGr5^Q=Z_=Tkxc3W0L^l|A_kP$|v zB!RKK^H3#OjkVor;lShqIY4>Iaw|m;SaiV}46Y5L6JTd>0IR+_vyAC4=_ND4m^1Mp zGbE*dCiN_Y_JH;?{fI?dmoX#H5v3hjRV!NWusQI|-SM9vzIiyFiFNUM3_+6Jqt-krMteqXJ@8XpoZhcsiq~2GTWpZMOM3yPKsDEKyjur~<<_m)3?Uv}|)zxZ0|!GyVEU;d#c*zNH;f+G{l-lsuABA_htv8rrOgJ`esO&=~nYJ1G-l;r@R;J zBT9%RQm~?$XCSsm*<=JWh1QU^x)w3b0;C($U2D!+JPX+t*1+1d#%6}5hyy{JD-kez zQpn*oIAb2OD@@$+ik8SR4YJoO@B2DKD!=nsg zEw}Yv9mtnUU9VNzkAHaV=r4D4A!FL?>^+ihNL3k}n&E8z3MW~!w#M_u{`8WlwmWx` zNx=fys87Quuael=i4YKAoZL&iHTqi!s)8%>U?NetJER-7O7DBn<{eAM@87q0)dN$% zeE8B?8}8jb`|;r|O@;n#jTF~XQN5-+R=@J}zaR~6q-La&j%bZ7>;j*;!UC3=W)3nt z7`SG@dyCZ;Yq?3IVOxyaAF)amfLc^8qf*hLW(?AEyvA*SH^wRTO$>xL64wTDK65DN znLIm?by_@CcHl%hE)6o5F%JOQ@@Uyu*+u|(7DG@&F!eIi_HLX`hoOPv5^|Nmkb~tS z^y3iVv1Rm?dFIH);r?>|dOUjNj8hosB`XLegj&A1Bag*z7nRt-ec7s)uE}oiAR#9c zr2g@Ct~f}mrNGZ%&pB#krV>-cVFq%bE(*MDKX5S$F>#F@TGo=Bp({iv%P+PmTwu3RN*$+P+^o8-j3bSv#vtT6OdSU_3+p;zw6IKi@ zTyWP7mv5zjSLzs|W*_JAwGSQG(qZt{jl3w`xgD1B> zDt*>>{EgYyZ%3v@$tt2ZXm0s~M?da#ftAQ>l5%0wqHRi*qhjGOa0O06hKe||91?~7 z4$y=QsHd{pD9ayUB9Rp%EVznq2FLRPY8HEO!7hdufH2r)6E_h6w+2ocXV&S!w;{_) znY~Be0zn0$gJGscq2CW^Coo(A&bFiz$qE`HdkIPxg9t->j_hP87M0XEEO&7PP_F3O zlxb3mt-uNldV{RG;`=JzT~Stn0LO|Zehxp5V1ncDBmd2cup_JDGX*LAeMW7gY`4_? zK>EpFQc6gIz49pM3{9`cbeGflkq#=ZEsSS$wXC&36vg zgzEHSV<^}l>gz*!VBu~duEQEin_k#p)7b8}YU~>XeJd_P?=&W2jg7HHqZ82Dw3}9I zCmyIBzHM%DG%grECU%VkaG>T?^vM0fx5f20Lp zuYYL)xYNfbPwMsBq+;%TQP3Inq66}F@ikVf;1V^8TFZp=)sa8mZ8B?I6j_xqSQy4A zaFz%~D@-aiVG8<{CPJYBS6O5@dEd(l^dHWu%Jx!Ld!fItS^y+&f5v1{^k#B{0T*QE zbvlFn1-_zDdS5T>5e(Ahf@v>E_5H1lFe zw!8?#9ULsMkOv5s4Ol)wC!9ujL!X4it$(e6lWzUu@98O_`8`~Ezj zavPo1SbNTI*MGBEuS@PhU4Kh0PP<>2z5GdkU~wHJ?C$vLZNJUwatE9i;?}Uk;Sc&< z4)j;r49=QxpjsG>q9@jGPAyvl-6dj1Pv3kUuN_)@!sY9Kz|QEvWg{HP5dvXYJ;;8wR(2L$Q4JxnDdb?HIph z3wj2nLBc#Bea@Y)XfAUB8|gq9#$3qi4Tw%KXG6{XHO%UERCFin%9PKbl;ULa8 z2a7cr>49Jo(woQYLm0)3!A*eCMixpS!(piV&d4Y{sG%SXpQz-s!$HSz76=T`SMUeL z#wKnHO$_!+cfBlqe!)iRt*O`dqHGF%+;y}sF$j_Q5086&wbI9Y%?9bdx1~Q%Jw_pP z`(Jn5~q8!i}VnE!M= zy0mx6MR#2?)d~T|gTN4Up6Ly_FOb7;4fhYY#X)*ls=6- z+5oFEbF{%Wcp_wVb;bg}XcvJShwln>F+JpYoeOt|F?ZQDn_o0-=FG&FR z(>t-b<))~!=JHs+3W6^oDm>s1G(}0w4iHJ%V>}z&l(n#eiMQHXQw`ldTSbH>1dj(e zz%9l=14#scKB<0)ad}0cO(8*xe8-$H8}__i_4O&U08b+dW-7|5 zb1Tkmn!k_%x@0iB^IQUFS*9_{VZ zDOb`_nw}ab&Jh&m-T_`|)c7kkY8%m}Xph$_h8D()%n`^R9hzpV_3d8DjBN zeKxuM((7#=743<;t+Z&!#Y#Lj|Dw^ksUQ4##)M+6HQAiNPV;B#H@c@??B zWYXZfmMlu*SY0>-5$&8VYDyb5w)%L|Rs$~J;ra8Pvz2*}PP&sbGIG->%OXxD4S|NOXf|4aRs&>WCm6$ki4>BM%$TO9 zx5Icu%XYA02Les5IEh0if=$_E1=uVKqbGpX+w3YjzOW#`79<$<(2^M(WJum1VMh<< znM^or&WH7(u+fdJXW<~o>O2`8bI&w2gAzmIcI7*Sdx8#IIuEoZ`F7#)vc)UROfL<` zFs%eQ|7MN`8b;c5V;I2z=^3DL5;L7lw~PzGR?WcE%qRsHX7L5mMKl^DE%^36*lo^y z)4+INVSy>LxuMWE-fK461!*}fW7K`)wW;%#nHNCmKvUk?ws52Y83qgLir^*`x!=UA z){VVzePeBmtqX+psBw7F@6GcHh54pszDhN^Q5BhdO>vJU>sbojybhO}Uc4zTl}*t zx(e&0E=W-o7bOvxg3zoy+O};Fp=IcAVeSPoqwx9bZDU;c7`e<=4>Gz^Y_3_UnEV?_ zQRJ^swHSA=psY?Hg!e0m1xdG2qZi%D_YF~V^D5y09kAb%=@Y4ZUrqpkY-Pfm(f8#k zu`hFvJwP82RyCWW(%WiL>yIbh8^$KODrWn)r0Z&>iy)h?XvwDBg80zv-nT*{aj@3s ztaZM|rP7V%aW7mz?XrsQ(7ZrMOdYX4C4=n zv1{tvEe4AzoC^gj7C_brG65$n35RnKfd?j$qQY`U?SKIfDhZr`^$*#)g2j~|=+YRN z+pyUvj2Ep%g%e2U@Ei&Qx@eu3XHk4IZeJe5MJ)#VNugo+KnOgiuo0LBgbO4?hGDt{ zK&u;I(vR9JSs<-k^5Y*@aKd<^bJ_5_vk=P#GG5itu|{fMW$93>XRdHqTVRkT<*p%N9KoXRcK|{mga3a$r=k z<)Y?+TR(Fc4rH~q${*IKIz8aREp!k8eE~S_W1-IM`I|SCJgIT+EX^K}s%reibhR@>5gDTI`jM6Gnh*GY(vv*+I z*rCPoMN;jOLj#-+xW<%5!8~+PsU1?hcwxdjJ6`cH?TCP_Akn3_? zH7*?l#3(KY%&q72)s~?kClU;AR0-#qu!4K3(u_DYUebkz=;C$EJa|!q^;M1Q)q+T ztve=F^)(sV&npm!Q~XB-P`adO5rS38lq)2PVY9Oe!xem5VBzBwXsBH@)ajyyY(jKu zOmTxVXVB_G8mC}aDb>}w>YA#6A?neZfN4;v?=$G7k2r(UEF^O-FsQPWxz!5JYYoF_ zx?FZ^w@A3`b}<*M2S5$oVwgLXj5m=25A=aXh(u7kM3WD@Y8g_k76f#Bry8iu3ZmMo z7a(vRcy(ePPo~OsG~#nR$}1puDzZvfasrcvJlZ$OTIH)CoJ)$LAFtv|qOcTLjrXU8 z4i;J<1*Q!{W}Un$$14*Vvni``3R%EHwQpq-4(cwgT|g`pTMNPBu>_k1i6F}vXdL1A za0tO6W=e}-QFNB-KwC3I9;dHUYCn`^b(Fw&xA5ojR)*>k)J(F@Uu_OGg=vbl_^d4* z8E;^XTJO^cs)@F6M{4&q%{Q!=`2C2qK-aBSTJzJ@{nrjtOp8v8A!6Svzu$N21CMBMc+`3?QReYh=~%C`))&Z) zO!mj^@p(!`-}uMrpufr?r##}+gD9giq}=m2NG8HenN*Z{!MO^9VNEBbwpXX3;fivN zx;`FuxjQz%sErf0A6`@^1|x?Pwu|ts5JT2vs3?cS_3-4?lLb%0BNu)!+w9czoHzW z@O}@?)?|LyP#N>i3P9>7;oJ&Hgg94{*teK+0QFe`$~PPO%QVC&k6Z+4Dw9!pz%ve^ zP)rz-<>IeWX2gCA0MkuR&&BUt>=w`ufJOzD2l6Ze4C0F>86XhJy+D)8T_+xVc_n(} z_MVG6pO>DfC6ATI0*)2?U)h%s8?PDuAxeJz()qK1g>Df;)r70>tP>0@BzL8`i3U%b zN3R>C$-RU13y4FL=aKX?Rlu0#nE(7ZIdno=wy3cJwf=PWWzutp8VuU@buXM~H<#z7 zcb|^zpwYd+`xyxD#$=E@Z(nUtNJn_cVtU?S4ZMLnrSpIO9@>T4e>i2G66k z^oGH1H=s%h_-x=mUZX@$#+^TiI`6v7Z$-bb`VQ2E^qDu#zyHgd_wT;#t9#CQ8HqJ@ z(0Fw`R)_!AMy0DlJNx=}hCpN7^-U{xFC;DXm9+rF*YUE)%ie^5`zct``cW3Vg!9oj z+JmkEqhgV}8^jBkvx<}wdL6bd1|W|(a0VM;9Vr3{9|#7J8rZ~zbqf)ri?9Iy?c>+ zb-jueloozWdgsm8rN1vyy|m3l1B)+&pkHSXsZ~g)Pm?qP?jy(|gAB;R*^JCZkYYwC zH&Wf9h_l9N(~*>BTL+}y<1DbiAV|o>rv&e|(yCH&C!bu>nCvtdI+L+Bqp>X-!M`KT zi%#|G0(zi-cM35NL31Kx0#u9Zn4bW+A1F=$kLt@oIv>Jns2CuX05n8O0S-b!mV3Zu?lFu*yMroz7~%tvY1! z5R}_Z*jE(_V-CUtr9N9VT2|Z-IPh*Ez1Y30wU1)*q4;t1x`KMwD(0^Oy z5|0%kQzvvS!C(ti(^)(mRP$IYT4a=Nm~AFW%NUyTr{Td{fLh6M5+MH+t+9-}FlC+~ zUQnv1R3=hw@OlkuQq2)xnjPV=L)wHMJqmO_W)*meay2YA(at=%5rd6z56a^$g=WvNgo74i8Z zVUH)gy!n(Be7{IgJq%Q6eW))c2)|H)8b_KxDv48>X1_mRZh9G((pw8;O=(`^dPbCwlWg(33Zg)Q#TI+n_jq;hoZdPQ3F}Djd^W)7K?~p^GPPoIh0e zn}g+v^8AMG$E2^Gd0b^C=Sq9(l!F(Bf&BI7(u-gYMw(y#15IpO?#h{M?YUPdLtNX;?j#iB}&cAuq&ykB)Wo>4?ZZ^_gdi4j=RST4Q){_rh*L%}+eJ2C7-&Cj0NZEL@1*7sgfuW&a@;Cf>@oNUs#G<%m%+tnh{5e3P z=iv+EFM%a4RD|zebihJ5Er|W1Vu=7{f;VLi6(@r5d$4;J(wa38GocuY;yfdN>~9_1 zxeV@M&^dBg>-i4L$F$-Dhd`IY-#lLojVkI##ZH48#o9Oxmxn)LxJ@HWe7K)7l?EIg z0r!^WPTcG5hV*(owYoM)dn*ue=r8AQyu};y8ZX~KE>dxvp^(|JueWK|?IZg>-iJns zPL%u=8ub3WWrD6iUB<(+z{WobT9HK)4Jl*x8CR^)rQ-m3D}fn1^)9Vsksh}5pAM)!1Y z)LJ2xDX*~F>;?xBG<#EFifoQ8Kzbgyw|zT;aRX;-I{d-eD#z^E1W`WM3Aq62SEZ+F z_crfuw^;J~POMVTH7j?&zpJ&{;mwX_Zh6Wds#k(ViA;9Z54B%`ypN*@;L58GNxy#Z zwRj}wa8Kru&7^-XQ?n)19o+Ng?jh-8DIxv$UFi=kKW#%RZra?r{Mhoon^)Gk!;7+B z_&)h~WooCL~8V=1m_#1-5Q3_Kr-65TZI!V2}7-OK#QqalZvl7rh0$vY>Tj1`>0L@;p@6iyZ0v{dg96C%<2#X{5+VWHmHQ|4v zq&h%94$CSJr0iiIdsX^_wDkq)CTYpCiKt$~u=8|~4FXmbXaW=}h{p_=i0I0~+_O)c z2$!R2OFcmlOa~y!t?9j~MTPe+?K{|Cv$Y?sls>zCwsf+c zdiP&H-6{QJ^-nvkD$Vz9L;q}z?S1;-Rv=;5G)87iA4(5D75Slq{g$r&gnMfmRv7=4wsV!t1UQ&i z0+8$~z{x*Y_B61My$}9CH%g;s2#=?{K+gv+uqdJBpmOZFkfHLfBH`(@sZ7?w81OGp z@>&CrouJly5qVHibb`4}E^@(U(_*hrSx@+Gi%51lyy-nzmFAHxh6$HDC2Po=St z0pnvy%ojfjoSMa76P7}Fc7PVlV00w&;9Y~d^0x=)0<0InwpLWxszUPTmxTeod-%Xb zDUfNPK?=Y@*~%zO1S}Nbmf&g5+$i?208d)!ao~}_BVRnfC_5Aiu5xNvIwCzJ(ZA~1 z6HcW;gECdOBp*T##D<1DZoXmbDs$Q@Shv5tD%tI*ao9b!>PGb1pIU;^@Fh#6D*-_6 zYxWyMf?+Xu76ZevMb+ybD)&6_tG1g@Zut6gSNX$pPUQf-9_TXJ@^+vK);oo&$231N z&1n#PLPKD5U+uhyq^Ifs+8fIu<*AzsWvHp^^_fC_3t6?f#m={Ett8%C0O)qw626TT z_FNdqZf((kse)5<)}7Z#a4nf=J+Q>J9-R|Ws#i7w(uL$8n2`*%)ioqUnu0W(c^QsAE@Dhd~Z#&L22YT<8Rf!+26FaZk1u> zUyjuiYLd#tl!{m-9q;z_rVBRjXdR?mpJ)o44SqfAR*S`0H``;2y;=<#8X59w)X4&` zqIO8{rZr@bCMarc>MG}~@mzWTgY|-62YmB7{YXOeNnbhqwbj;2J1^2`0F+*WP@cNL z5jN--9z!iQ@y8F2K%`#y+;m4^#4KrfaN4CRI|Ck1<7dN18XYV|fu~#Jf(Y(%$Sy?) z^{cMidxQtoj1!~_)CxgCYa!Q?;B3BhK+vOGKmWc8;P{Lm2 zH}QwV4|quw>@y{)v;7OkZ1z1zF%U6*zDu6u9b-fdh7 z28;n4V!8oS0)${nNGJh9?<53%fsh0eY63|pwl#j=JF{yH|NP#2pU?Ze{j8)Zcdj&Z z&pqedQ@+O<)H(8q+Nj|R%Fh(PLt(u}q;Ri;Msb2{Vx}!>}YqnIv%N2@(*2NE}6kCrBOtq(L5MuxR5( z<|{*Hh#+0XGvu%^lSi>SN8%@iRxu7;i0#Cfdt50T8|juDL@wLN!5=sLuRRJGRsT!N zAAB`j8E8gsi~PP3?T=xgKaQkqAsK<(Ah`$=ni4et;m&e7(q7zzf-r*N_J5wjNU#Mh zYtu&fwfrD|J^$H59DSXEGddZWZ^@0%zWe6mumADj-PiJ;Z<6{w9BKvdFY(3$1=N`R z1?6B7Qt4Oir2PB&v{J2~5-2i3%F(hCxmK%ibi|x7BdgfYR4J9ZSC}Lsz?HN)9%s@35!(q@fO$JShS&rO%i@2W4> zRTVkMmg?M2PHj}~rfzzV&+>oet-nOYG>b{yyQheLfyrM@c}olcp88LM3;~*=rf_JFkMl?K0%E1}V9W)TdzF zauuWSBOav^8}a~y$E9TCVN`4W00pGg9MEJ!3RcMiJdG!@6bv0v$V$+QEY8TO%NSOv za4D27^68N=sKE>f;PD@JnFuNx5Un-9ad(`&J)+{HG8vmzU;B0hHK$5wruEU;P5h)V z*tvsqdN+lvCa;5Y_;yhdVaw{fB49Cu$T^9Y`W3&1UCdl4NkJiBEm;etd>1O69Fkls zxmj{T@&I6%N`8OX9MjD%5=q>_&ij~AvY#90OjJSZ_tA{ym@Hz(OJ z139}uGs0WBv={fI8l2;nhAK+9uBtdr$tij!x}L}(MSDJO3>66P2q z(ilMv*yr-+4j|A0!3q><1ujFRHHaAv^I3KfWyooQ<6}^to=hvW?`TbI2;d%Oy;1h- z)7vy^I!Igf)GURDmcB=^9G}09A68LP*5IOs_hT{uw1WSL|F;QCRGFnqYPp=#vNBCQ z7wSITEu-(StTN1FPEWr0;$3d|gZP7bN_peq;^C5;661>K^*d8@B8T_$2a3jojwQPS z(g_D*;Rf@?-46aS(-Ons$hau=V5lK<r)K_v2g;C^Gx4|N%Pn}H2_mypgZfk)BWp=R? z$fS{u@Z|AI)cQaIrT_Ce~_)A+w@smSS}za>AB z?(8U?m6%pKH{H=)G@+#9+A~e@Y2}gTA(wPuT)@}lX=<_-mYKFW!l{AO=FmVQaueNP zP`PX+I#UhU%Wq12v5q)vz^ZRN|01$rKL-3z3)JbYAg+_}J-;RS8*GSXkQcG)jReHn zOvIN!Y=K0EU^u}!A%RRL!i~MOs84}pa2zW;V)rva1B8b;N)Cd!2*shPAXl)7a$Dyj zrr;LM6D2VgB36P7X$f3TXj~lG$I4OxqQb73(2&5wM_sZ4jgE5ld>(W~N{8SpfpCj& zBnAM|8#%-T@lzl^@%u&9A9n#=bu{Nduzix>C$taaeN@nlT#s8)gn@3MuUu7i^s(t1 zTR-`E4}aecvrE#PrRT3>*Zy&1ZuZc}yVrgA;B5Y7lo{2q!RJ@6dtkRhrBu9q??vw+ z>(TbolBw4$r_?ITzFQ~Pu63g7J#b3SId5Ha`D#`QHHe|mO}MvlB@1;sLQsZ24}$|xx*J*la|Sj@@BE#XYM;w`h=^VH7L zpT!?Xf5uFm{V)mIe7ZuP+#{FqaL=qkfmb2N2ySp*&t^bR)N&I(O^C~A!jV5oE$AEjyMl?V$R1EAXElCV` zlIw;eZ26d2qOccqg54wK<(d_ zAEUVTWO4iJt=G-lPR;(uU$#2*UR`yzArn-~XgNLDQdz3^7&#QLC|rur0Lq>Yg_W!s zCevtJwmcciEDfqWMVi6VQpYj z-)o`~vLhiaMGGkSQwWGl7MMd}J&nddBq1qs4qzpdX4${n?TkI^k$HnMdky#SJN$~X z{8@h7zyE? zpB-~WOD4a*;Zy!El=IVfsdR0Q&e_$={$#X-?b*;3aj(LZwlmI(fGp&Zx$V+_@Xvn5 zFa7BE)Yh-yZn*8^Skz)EU%p~lI@TUEC=@0wMO7ynqs{8j7t*WCeiwyRmM(my0eBc| zQPW{vv8gJ-C;6Sf=a2su5LN?J8LjDAR~*S$xH}UwN-n(aXPed^U()-`(tq*yLDQ6d zP4x!+4R$;K?kgYhUsK-SO}o9BPRRL!S>`;Hap(b|0-vMum-2ZMyQCf|JKHhepA*0| zO;iYE0K+DHix6=TZU8WW-b4)1L?H?g8N_iRII&=?2tbD-OC+>_-2k#8Pi2Jop?t=A zzMRWr+?fo(9s)5~>g8G38?6?fGz76m{GGzQC3!w#6cIEua)e_@02Eq~VmFxpVJVa_ z#ZmVj4zy@2!`YyKjdGy8_$9c64&hgjD>I_u3%`(1obVfRCP5B{5EZ7Q<$@VD577$V zM%V|ji{f9{$1g=iFQ|!-;OCksf7vjrX~B2(1NA;bOKWLWOLaNp=^NA7aOOdel|KuX zF8xd@HD^lrC6qUzVLcr6iAwE5SvN*e&>sN&YfGrfi*&0bDm2?X%F0BQxj_4?99h21 zAM*CgzkEiI@@h2wX{w6O|E6kV{3}%X_>#%gEgD8C zZRFhc7U@z-r|Xc?Qg@}>UJNWEgI{Y;7iILnp|+PqsmS&}^!tN^LimeuFKv^Cu z)BRLh@73s(O1?8+(VdYVdPb*4th*kCaT4R)ZcpJ>o!ZPY8f!@JO4@=E#T1HOVgm53 z&a3xTnss#Ze@>LzQ?_}OnR%u$d&&@}BDY%GIu_QL)_%Oy zV{;;%z-cs5ugKGp;zjAO(*ghiuo`J3zbb#hM-Ta_BL1bXO>LXY-H4brw!A%H`qc;H z0HS>Nd;C>;Zt@own`)M(iW5nnv?vkg0Lp;G!eVYZf{U@x5fnFa zi9(^c@rzrIfAPifTfQjFxwGtvFBk8A^$O~`Fu|E`suCoO^c?P`469nJiPMI8&3@`dG(6b4@MrRyqDeet!B)) zM|Ta~5dMxYv*{?8rpW4o3;a`kbG1TJysK~5qZ8ZcuTOoXEOP1etjx?)cE3?;qigsH z7P-8sZKiG9l)()bf834h4@qykCMI z4knd@DVxBS!iP`NP{@2IQyFtTEM&V$(sH=a!O(R2uQ$r zXM}#v4-qe&z+yVljWL`!{rb%_HGBE+BM2=&rPL_V!}^@F*Eg?wL7H=xT0$23-}uGm{XhSo+KqHceEoazoY#9V>8MBbb6JDfQm zf2SWFX=i2l%%PC0bTI0y=wIiuCC3K1WE*vT&{o_Nv?qF~FYmeN_|;u$@9er8);Cm9 z?2bpCJo2wuhxj)OXSY(zJFcXQ*De{aT0eI8^*8WO4xRq|=f1BlFV`EYJz!C4O^G|Y zE?QONaV(m;En-2^5o#Sd!t~9@`VVdN0taE@^%rfbty!yY4%T-p8FyRRq`eo=@2{9& z)3acFW8s}pN80D@ijN7jmiT<#@mMe#?<@ACJE@LfG%+^iPImK)+<|a&lq;&^-|_}y z?MW`#NR|6KN?rD@*p03_*xnn0P4=b)XKRW7Z7mK3hcikK&J-C~O>SY_3Px}T$J`m#lJfYKXCrZe9iwYiJOu%7L+5Ng_% zC_CN(?ycHb8&CE*lXA$p^<%-*Bx(pE%ZB|K&l?QG2N+bOya{6}EUUi{uEv8Vtc5Bs&-*22jF2fm z8jdB5y0Y2rUtRER(JiR#Z*-I}r{BsX9A@c$TfNPqar?`bZQn<+xxy93K-uX_00i{d z)Q7)$`>K}WP9T(N_y||a0=(w!9gdZ~7d4zwJ$Kd42l{{2z2-T&t+nkRg>M=Qo>wmz zLY7h7*&U0PJ7k~xtWs8G2q#NsSf&rG4~|i)Ewj_*JKGkW=zjgiOCC3HI*Za&SWfYG z@DEmecpYWl!`*)M+TMNRZdid>y~tS)65!uM#FZfWU~WcP6hQ=n!b#92o(BkBq&p?u zP}H+&`Ln-%Dac>-$o>DKx?W!Jcqb)Qq+HXfWq*jf5Aw}0_z6aDDd27iwTCcR z!tcUi8)rUuR-6H3KSR>eNgJWRk><+5GP}RV;Y)1?5Tac1-16Q_ez`dWs){)w9qxJD z+je?a+}(J=rR@>Lb4$k`IJrJK5Gn~c0kVJMK~?jqlWW0TXzeoT9Y}%nM@l{J6vOD~SS%3qecA(z4eQC z9O?Oz|Fq^PrTg-wZ^OR0QK|2eRTih&mW-2C_g3oc1J#Q8MRUfq?CW!x8LhExy4`uY zy~yJ!BClDAgwK;qdZ?-x=L$!hj>x94-4TvCxd1=@zD;}Xxohjbd*oX*A)7CtvH9d~ ztK8HP5BfdpYCE_}{@LMq&-1r*9oR4-ff_-oX;dQD|MRYmH(t6rKBw-Xo_O^Rt+9Np zaiZOk>sOWN1D5>@AVf;uJ6Q#A4(JeO%-xKgM4~-x3Hpo#BXs z|2i0Sy8L0TzjdIsvVB}DDE%m3qkIrh=vyVE$^ijACrBj>FwlfGl3->bg#(CZiHH=U z_aKc$964fL0mr8Vqn$7$!kFQtXzB*r!HOy-kUAMm;VPo)K+FgyMw^p~i&91-Qw-gb zezUnBA5ab^vsLl_*@42xHh(@6bk%Y`+m!$DFa7Gahxwb|KlDp#@Gr?{R{ElgQ^|xb zVWwBXLyVfVoI&rfX&sabjvBK~hUg1qUTW3sWE#DMvEF=3Y|g*?*TF(qI&zk zlNi?33ccFsG@G#dcsoR%x%o8ou#B?Wb1{umi+Gpn^W zSKcEXf6GPH6E1WajbZ+e(!5?LQ<^D_!JineK2c2%pPcCeLQ|dGuhgM1PjSiIN=4SM zG1fC)6iKc((b876#tzIS+iX-TuBx`0x5@DMwJvR_uTn`*rkx3{)Mm4|q>4%7HnM7W zQ&`_SCh%Q{-0LdWxm5^dwki&7duwsw+slIitE$fHoiKBvUu*E``LE~R(9i3OQdGcG z=^q-=Vy&#KedS=OM2t=|+fF6j&vTYPE@&;@PgfGMhVFuj0CS8xW4Il|dEKBS0^hWkn)4P$RV6aYKIj$(&;8I%nG7T#){K6`$Le0Ky{4V7QCJvkM z#qhY@0k_C&k`E;RK*S6vBuzXVgi`Qv?wyq768Aw(f?N>{#%M!KifAUP!9>!)IRXJ9 z@Rkw!6&ctFEssWhDaBP%^5$h<38$p4#5$0zT_}(bDBwjxpz!mtm@0}UM zR&~yfj46|ij`Tu)Bl#xo`(AOC^2D^Px6b9XaWmJnM{)i(U7Wo-RCabiMfwZ;Z%b-CipF+bzK+tfSp9!k0WyZLv%oe&LZ= z*&ej$i`5zgE|^uYp@lgugTlLN{;EhS`%nnpu&!Y1(&AF1%I5JHxIpv7L*w|r@im?M z{*pFYT+N~8ZE}PwsF;JdZ)&OESV`T3+_N^P#gx#xWHhNz=rAKiRjW|xH9Av8s&=>} zd1{@(VKV#c;yR5jv(M*ocpcW}aP95Xa*xyKb^M|vV)g{r#T7MXx5e$W8`MbjVy$|M zy+N+xZ>&Gi(SGsWn{`!No1zU?@Npp8rlZ-e=DIX>_UxjQ`>Q6k#pX6?oCcekQ!yIP z<>_dEN+N0k5f=tGLNvo(#E&bHQ)Y)z3BWd5t*O{vRJo#|s6C}Wb4AE$@%rJ&o|X1l zW92*EDfL*usMB{fMr%2jA!rFL9Ztg5wwk|0d6F4GqDM$lEt!NUfI9@pPKpXrX^AAl zS!6N>QcN7qh={E1qlwy7AygC;(lUhgNesSDl4hGRBlHF0R0y6S7h?b!KxYeQDX~#L zhw&D3I*7C;2^fNBLjXROpeYv6M8@X9!A3V31)(@{umVL0M>cIX!Mj07RZzMJ<%*OM zBMec*<{*Z4WWx%f+yYY+r8nq^SltD?6!?;viHSx5d_C-iF?4{V;%$iCgHpwVgcpup z$=EpM0~G%)|Lqc$Q?u^O>p2Chq6$xpt$BAE%K`yKs=i{=#my{5sc9RfeujU4PTQI$ zs(Ek4g_QqNc9IOS**a9HRPlfOCOowyF&Inc)xCGt?x4EQ{F&+6ms>{JH_}MlkXk6_ z5q@QTZUrkf@=wowL#dbY2(JzOnbF8s-jo}xx^bCFVNws>)zO>1c&bICXq*!pYtQ+# z3VYJh%T;)MrBZdQ^n#hqi(5(=h1QmII)X`e|5FPtd}wNSu65v^EekIDGgLy2+Yq&+ zmR9b%y7tUBp;#gwNtIV;ph?n<%qC;#wm{h;PuOJCDom=@nNE)_$vKL4&&nL$#u&!b zschOr(SI1r#tMa_)-ElX8Dr&7XxMR;N=1W~)pQlRRjvkU1Ikj*lk;mYKqBLWJ zrS96!E#5{LdQw2I(Zquk+|%(z&(q)REeYn^zs4R9{w4 z(Tds4gurDGU*7nLQkR;QG#W(ha>on>#dZ zQP;G0s3IveXt&PE(oEEG>iyEXo%Y(GJqffM_#yTUWM*YJVAI_?;ld-iHR_P1Wg;yt zuLAJQ3;YZ042b&%n6)>L1QwH6hpd6b6bs~pc|a%?vxgAzXtKCh38J=QnPD>7tbhoT z5T%tyiaMMqjKGV9wI_&kbe~9nPDZE|%X!iU^Aef@JEIODRvZ6y4gbLvxmYyi_oxBn zcUetwb;Gp!!Ci9cEuqM4r$cVH*n_Fcut7=XE?9A@c-oUx+Z$I|8>eW4Mxz_ut)r!?pB=qiZUrsT^$Q?-#GWzg#2b|MX?wEq}kLp1*|u)5RL2)zY@< zh+YY9c7!icUdCLG49G?yl6wwfoGuWMTDBqj@sQ*yC2Au*(Rta)j#< z$p&tMv4rd*LxoXRi1`3HLEs_sDJWuR#Ta2>AbK$X3&WxF7!w0P3x}6r)kFtm36ezs z6t<)|P23^mw~(vIz8tRzE+uP@JQCo!d>)^8ffVEu$M$lBm z4Tv%1EAYMK_VB}Sz0et$v}kq0H=u(&A`z6A@tu6jOWXew-8QdcVk|x*+rN1)($!Io zBW}0kDe4KT_m#D%Bk%fY;mufDZ7;SD9x2Z@7z6d8?&GI-)+E>GT9VcGSEq{x5Fsbe zFQJQMbWr|zwAsCLeQ(j)ma2--vgS~IVO6bn$vSG!=G9y7TDfV(pH{73ea6=1vy{0? z>g5J!F+E}BCQpN3y?N;t`tpb|u(kuPxX6yKhc>L(Ks~!=)rE!7rX`!+=So}~ms8U= zEMK>B#hMir%NAQooQ2;k+OULP@2>H!T2k2Dw8$^lW06Y9^~3bbODzQoz7%!Pc_g>x+C=IA#({C$ z&pdj`!~;7g9+=QIVc&!^*Ht=f+>)^dcSn@2ny}XvF;G?e`gfn6A(dBmDA`P#?6N)m zyUn}D?b?0Oz#jgXv)sOK0=0fu|88~v&T*B!Yn`RmS9`bjQQJ&D-Nv3XX>VT|F>MYF z!r!9OhkLh*#0jyN$n9!p|E@e1l5jS-k=NBNNQQYHbG*eTfy?4yJEtCs%Vkh73BLvS z(TI&j0Kg)1Aj*MSF-NU35fh6T0ffmY%#i=u->9{QKq(0mlBk?S=;w0)=Yn1ynGjnB zb=GK*oXpyOu>Tu5$8J#j>N!Id$7m)A{phK&mO zT}o?fl#y3^4wW5?2OSn9_nlV7CPva}8@JrxFaSVvMRZamg3`^r({4f6ONaS0I$0o_OlbH4D*=eSpa7K zP4cDW6quz90|=ZL=v74XqD5h5BRmR7qhNwLQLKS=Kz!LG;GQHG80j+6a3BcFJ|bR7 z3=5cNSaikaA@c~QK_569jY`QIm=p{y5Q!0SkL=(qhMF5WjX(pj84f|MM1Mr8qK!g< zC2PJMHC|-E?j7ltw7@@3*lgr$g)bw9030F2CayoiICMndRs{PI_@#{l6``vL z-f;!kETdn2%=z0pe(`|XGxO#}A3e%iHM_5*2LF{CubDhA-+JILlV4kV_%Y_s)a`{s z@T4E6maX{dgY@IPkN?V(q~-F!U#LI-%h+5p?<@Yc^yTl?O6N_dmbZD@qrtXdup<)d zUS)F!I>MeRPt0s^v{18+X4F*aX|*7ZyUAHtA8hwjRwny=&a^b>4(hNR8c1?!j=yi> zzJv@V6LR~y z^XPTPD3&grI`}oDvbaHOMJ$yYz7O9fk7`<_5nQ|E@JxpyQym2Ur>$|<|#v|Z1=(Al9lN?r3J%g?88^BeT+p(*_9 z_g(+Q1Hixf!ymgZs=E4<*Umhje0bBrH=Kcc`D6U-$h?NO8>jUiY#h6;(B0BJe4;yZ zLG#Ldm0Yznooc=?v-h^9K#;iN$9G@RxoOs|d#tyH;@eOWWpACNKIPra{$;=iQ5b6r~YcVoz+j&jW?5FI^riq_)9jX^J`fPIStrvu+~{^QVs_=1 zrx2v;FrWa)pw>ufna`yZ|CUTf*~&J?475_q-B?A-Rn;_#W}yH9@)iQsHtG^sK~2|Bds8B0S zRV%Y{PXLRW$^3X=job+L?IGfgmk?(>Eb@fmf;>24cgF&oEKER;#2Yf=P?QKxFhK`K zUK}bJ5%R-Dq%m32&}eJ{vjy%c);t1fi7TDaR-iRvh5G&~VJe6Vb{P>i=m|zGk_`ab z9a-eF&+^Z_{V0DSZ~3yw@Y?F;3oF|WO#sMrxudM4nWCrUDrzXh$^0)5Uj6$_>Ba|F z_mq?`?#oS8%tvj$gTJLqOQwgZg*Wg^+e;>eG79C2-ja#+W^J@sI~>2Pj}zF#r2UC5iq)1@(?g*68G6rVOm4Rine=PH)# z#P|_6r}*qe0wCTrjEQ)RP=r-E^Pe|OJ(RZOc64exvnzWpTp6^q#V_8JYuMT1v8J6R znM=lJYdidzizZBVg)Yiui>Qu9xcwwgEqK~wF*A=}a`o%+_7#Jh zUU}-n%Igp&lk2>Yn)~EHvbj;4FYntsHN9c;uEg~CgrL_AEot0@`dc5(CET_W?`2Os zbw$mVnXO*O*5*WQUB#pt(DEF99Q!o0Leh^I?W=_a`iS3M5=j$KbwZn<#L+t=ilU7> zZ=WH2Rn#Lb3MipadO)F23dntnoSK;3!K%*@F40%TnQ{I>g@`H;hWinCnBcFl1PEFP z$3A@vf7N&Vz*FzMgM~vAH*mNuqT^Oweb0pY)L3c5zIR;Q^V@#%Mpa4Oo`hUwZYis7 zoy!07SbHgDluI*9ayQ(uarMmF9W?;x4M%5IH7|1M#>lO#T4nG?daEkdhfF;G@Ew<) zq;7tansC{f&kby#IWVTtV^#m+sb`1QFPy!as#xQQZhx+2&Rv`#s;;R2eEyi(mBlDp zX~l9G#ae|PLn(M=agHR zL1?0TuyB70$N}GorMM(8NkYzvk5-U=8kB~Nq0*o$9q#PM{lQ5cu8R0yvIT_*;5!n!FDHQ-| z5R!`+{V0BxLWn#(M_N!QL1Ut97mI8VQp20Vby7MJnvf=5{fNK6CYbd~->f`(Y+v0h01Qy0?r^t!{?rC-Q zWf})rDWg=`F1!8F1MwE$1giJ!S*mS{rzvpR$t4?yjcKc#fJ#`Bj&NvfzTRA$QHNa| zr_bKAmtU4W^3i=ye0C&Ick6SH0duYjfVP?FxXKHfs%!e1HXS`UJ$44kA#_k_x2X&7 zY0aloRmQRTzEIek;UIkEj&l3(#+|>&>S4r=$*laxzx^%$F=c!9S<04zKx7QXOV)b; zkT)2TE1Hnfaogy@O#bQIpe= z(ixp8Pg$?a*MCWaH59UPQKO?id-<}Ld#2^<=Ji!f07mw8y)FGMG6U_hAuW+caWKC> zS(UHr+HuJfZ*Do|)>#eaqwpW;=&vjHc17Ji<(`tTwZL##CU>9_q)v@m> zUW6?_D!GCr9I!M7OtEpM^ zFg5pPO0~W96yI@mFIXrN;C}U2je=IPxu0AaE~N`A_+x9RWlvKxNSWe};j_1#`i2F5SKo90;nX^LRC0R9o&Z5H=aMk_sYzm0wcVP*> zOmZCSgnWqPiFg$wu~l%`5>tmDV89H-;TYvhL~ao7CInFn5}CBb$(a^=Do3Z)uTTfYR(-GQ)Mm9Rhv5c`A>U_ z;^X4|@x;7}DRY`$ynNZNPK(LfbH&mz?ei*Xr!a|aW2zsm8UNYkz5LfbJ$TJJ6|m@o z2Coe+~?{xnns(Gm4g+@yXbrU)+UY^=jWMXS-v$>7ky2T?AXu7`p%bxjE#jN&=y+vI~uSveoP_uWS_2A{de*2=pV=)`2X+%i7NR8Hl#+nuek;gk*K zk{Mtxv3dpDG2N{)>44jK)7frzx8fRHQS_ zi`l`_(%|f0NPq8`5xFJHDp37S$4Ua@u35aTL}$!_-{_ zgO8sOcGCMDbtdXPlj+Ri8hen+1i8N4nxOr0F1QbA)32N@XD2gRiCMxSB-sc1SPYB$ z5`-q_G0Y9fbQNODjDb<{ZVeEy6DkCd|B=`Kn{DRt+UbQy%0v9!q4L5bQ|l<^cfubv zcl7g3;g6ax?lalJ#%~Y58_Z;a?=njpCZE3etb{&-*EKv}c=i8z8+mJ&pDkmrLZG8x zQiKG70pzYN86{iBh?@|#6>N=?EzyOzkpx$3Ac6*jb_3>-mI6Hs>KEu*(08DI37=w5 zTIq!qSM*hM@@t%7BAS#K`eN3n8)X{^7Hl}QV$sDqI#`d&@{1PS*q1iCD=Ux~ld z*W`P9Dj#|%-`ksi2uKB1t1h6mnuV_MX0~d7OKQdd2F|Q2q$_*SN}{2f&t}*Une>l_ ze~Dzph@a`d5&^keLJY|I!b}0w;0Qs;fAmS7AW`jv*+HHugtG8#35WPEPxQzAuq{o) zh5C`_SU=oEMROHHO;n+2s3LdzW;Z+UW?c(Y&^Z&|&C zlD>dHYJj}U8DPe}{8qH0BKnrRylFU*rcy1lX0`Bdq!Wcs{w#g;EPQY0f6)IL>2Rd$ z3esy%Yqe@~P=xn6n9M$w@T0Tr zJhH@t6(cLfKz;J?@a#AXY|`LR!~&Mf0;F6#E|%{Yk(^=!fi|%+z!L^jgEbe*3iKJV zJsA#B%wjZ1?iz*y{f#I<#8yG}&n*JT4*DTY7!0lzFEU8~?R9>NQw-lCWG4`Tj6TW0 zkCZA1;g@Ceg5$$SvrPTe!UZGT;esg*)Lf$)t^tHpvy)rP;d!H7>9Prv=+BUn~g+_pX1j=Xj6%|=j>bTEkf{#Y$wCRhiURQI=WfuxsGDI?eDCB}2~tAwFc`Q>>%Gu7RVrHq4_zICX+>ELt;OK zk_ByKq~HIx|NnRU(fb&^FAB3oF!7BVUKlbjzWsPggY+MEm(M9z8Jw|5R1Mq}xy$MF z(8`QG>ei^8N|=r5%CsAR$WrRe317HMoi8=zf)16`fXc9xDpOZc4|mLFx23R|s0H*z z7I(^DIO$K7cxgD}E^Zzd478anQJ1UD0*Hi2IOvudIltGw(X6W?WkRb6+xtJaUi)9q z2(4emX|36;6)*fRTYN;QLHe%FbozRw1pxJcKnAd@Nh3}qGJy&|^T$g2*KK9w=B7{_ z(0A&|_a<7L&bZMdzXp(znXH$X=4tQM#4bboct)WQLteWDo#qlc4{gcQ~i-5M!YF7%5XmxnP!onlVS`a!xv-%5xb%c5XG-JO-6V>S?iM7WB!r! zX1(@c;i1wI)h%eJU)6ey29K6EzlA?)E_vT&jKzv5+LQ2wXf4~IM#gV2j0as53t3gh zl)?CCu`SW@6aj6O>$~aigv!f9?seKThPx1T{;s?E}E#WM(nWJN( z^D}{Bt&K%!p%F+^J&72n(Io~`X0^J=8OX;}8in%A@gH0)QR7j%gh%O+cmNd^ffksO zlu5>+^5kU6;s2Y5`F}jiQFTTL1pWc*!b2r|3spi!9~$9R;BfN~Qu`#Rt8#xg{oQaGbZo^f z?kXL12;-pp(J+jRhV;UXW|Kvwm-)QLP18Ge?4Fd(_I9kuo{5i))-k0*ql#z3&LXwC zrqyWEX}EY%kIsf#su~k((-jA@^8%H6v)NYUOtc{p>5j7+gjO64-iEoOiglv765&d3s@04c8;_~^#l)i#Oy=F0AcCq3ckUt zyj&sf3LYGc&*(IOdptFd*WeJ=B8DAk(RG3YpM<8^_Knfy= ztS`mMq}=*+XGwQi`x&;gz`wtNo46IQ3l^%KYTD8}cFS4*@6P!R(yIqHDNg;q@Z9_j zo2GYO>i2SeK7G(_wK0bSnI>DYBe*pY8nC;mJq@%gQ=Tx-@VPjJR>7II73D35*Nmx> z_B0mrk0+a&l2l`HW5J=cMWsb?gV9_rX#j+|W?Gjl9Vx zTR2}jRS_)nzD8y@ijV9|%*zIffgfOYxQu28Lixg&-Pe>Vt=~^>tW%rMrrD2}Zh)B< zOG;sasDwV!h_$^qpQnRG{%=bZGWk%A5JoC~va0}IJN}ih4dg_)p$=dEs`Sj*tf9iFOPw}i={w(X(zMPB} z;MO3k;H!S{Ax#}1J?|enbL
3=AUYLi1zm#9`5f{OeSj(h#op z=9{DD0r{T$+tB%ihOD4)RAB&rrXCa{LPuPZFVHDnD3<3|#BkG({K%cbYL*Oa|ts5x>BBLT7Hn14jAVH_2+=ehG(xJFe zBbzuPuJp(jd)v2vFt$4wi)Zmz?ZxXIrCj3T$zwJ|)BC1f`^>vH^<13^ZywjWwkX`4 zC@R*&jB7y+cDd0=BM9GMp>4v>cgubLdVj>F%Y?!WoZXa~-!x`OZBV-Pnw#cszw7Fa zGd6ggN`qW$o7*~Rso$e$DxN+EK6xX+0n=mtN)MYgQJ4#5&d&7M3ls}c0jRw_<}1&? zeC){YD&wha5sC?!t!lNjaBK$+$T$95DlQX0$S!@NsKxCc<10;-q%&rYQ!0mUU$f)- z<{F@LSR?KI?!{xRMviWQOY>5`gFVVT2e^k-fT0mEw&9foGlLbLHX2Nr=Oms4Gu&jP zz7K4L-x2Ex(dn|J^6+qIBc=`pX_a$_}SqjLzJYKl5` z<|M7S>w@+7+_Qem9n?L1c#|nOb=STb&gIl6z#h7QdU2C)#)17aeOrJseN|x`<<`3$ zK3B%meo>7X^#qU!k0eHwzVt`~)PM?~*CU*A1Zo?-z6x(q`vsjEg>7t1F?HufGZ*mt z`3L!D_-!B8uUEXFpv$}Z^7;OO98i%j=eH=YV)np`d@Z7kNp$gVkhk~(5H{XGO-K@B z453}_1rmlr@Jd2xnvjl2EWd-M9b^wydcv9 zkNeyixCwE&Ad!ifCx%vG)(B<=afc(~S~$rBi~$l27eJt|z^BeJEO7&T3b~A81H{;c zs8BY;;Uz!slfgE-)y|BcTVYavGoUjR4KAk*svxD#cwqbRIfq+vC^x<8>ZcrOxPYGW~<<%+; zmGJ15N`u-U+t9N$>`)=? z<74J()a#eU6NNpOe*XtYUxq)%{s*)A02nL@WS3Bar5O{Fm|n9PBiunj=+S0kXe8JP zWPAaGEetb6NHzm86oW|$TZH65RzGfQ;Se28=w+l8L)u#ZAA7b^*^uyxpgsGaGsm6g z`^}Cs$LF?acT2{flrdGf3AUGhc8#r@wU*U-!xvPgicGl!{n$FS#Q>nD75(X?3 zdu(B~%mjr>&GaAOZvh@3RjJ~SR#olYN=2HQr~{0w3Q<$kbgGTjFyrM~jeOt$emstRg}(BBNI{ak;S@VAj#k?8OBsnVha3kvrwB!4iSuRW_}?^Eo6 zs+E&;zW0 zXnvwZ8_~F6{(wt{NJNs{B(e+9Cnrwnk!BGFPl8`ba1K690*hep3pP$! zGvHeGT7Pauo_~_Bnb=DM`IftN?d0wyR{sS(b=|h{cTT#On(*?epJ#%prWx}uqfHTu zLqX?}F>9DtS`!JEP^`ePm8^>gl6{U~ztN%ltZt1V=Xr0`_KjB9De$;F!S>cGqyDHyk zNL7w)C@CE%yy6Z7+`8(nNzuYLH&KC~UHRT}p1)_?%ZG*n4Ne0J0WxxxyWTT)Xy2xt zPyOZA_xLBaQP!*fNX)in@AkW zlKT&~?`cv&#Q%f;@W{*jo0Rs0%Cc%t#-I-xLzm33Gz2Y0lZJ=PhUT=twc25|x;9eu z(bp)~MejcA)J69E`OV=ux72amv`~n1p5E(ABz?YQ`da>zo*kW)W!wI`^QAklp_WtL zDt`)mO|Q0ujiH+qI-Nn4_n|PD+v=u`alg%x3(b6KH~-o5m-4UhpFEUtb1Nz1Pp|#p zn_cL8E;a@)zTn1+=%;X}m3ehrBVS5~CVMMW8t6NIVYK!Qya{`JvqZ zlf|j&F&S-%Ogh?@s-Jamc6H@r?T!%ANo(`Y_dW?oV*O?+%1Y07F_Dl8)Wtv@11ppSh3!CSOvs_0xgz^B>;cSe` z$Eu}_Or}DxyGD=dg$&tME1hB5X3!dJrID?a!$~(SgiqFxCkrTNL85E?S2}2{i$HaJ&k+bKWAncAXBoI4h1KA;yLfpbp z(OPgGGqiDnI*;!mG0S97@s1EWapI7!2u+9`h(k!`9^q$<@N0xQc+O0YP-gOz1ak!k zk`W`TF#Se+H6p7V-5a4PLK8$~!cQPAM@bFeBil6MsADDalw}(q=gB0MV=27F!2|hyFEy9Jg|}CvbcZTEq)B%(auM7 zNYefXI56a^#LKOtNz>w6TJG}>me)?T z1FKHyPjQ)r%3{B6`c4x74V_Lou)381n3qTVp&kw3jFpIsW|+L!*M;O79TRjmE>;;( zL^5EoFRfd6#jBV1A5FCUa{WyogeH~g6TU=TYDG;7S~{--)hB?EGVx${<*Xos$}no1 zNu^*JMr+SH8&*dZ+Ca>p(<-%LL$*(^_i5GYl6s{YqwVrBzo)5#mKt;qC$WYfM0^VeCz*|K9R92KOzjD#?Yi2j5 zI%Ub2k3KlvYV=s5lcMH1t@G2tsq_RFG(@&_cxzpZv)1oDJbS~t(XtlSyJVtC7YLW6OqRZ8N=Dx`KhY!&E$wob z7=v@GXR1o0p+;X!Z)X$%d$`jn)5Xp5iCpU@WKm(}S#8n0PxuuEmEG~fsER7v$Fubx$vMx2Tsw^lt= zu1Dorl?G%*A4%=*owjOft0|g~I)PWeByB!(S{Jq2XphI?NBFAIqti1g7FA-c9JQmg zD&a4!kBrNeM`GFXaP?0obzlwGRzW0jJ|>8*Jl(g)-ZCvKbAB*eeXTz+*98(;Q;-? z$D+ft$Z3dqxU)+PhQJJ`?nD0TR7p~Bw=5l;IGqy%C)3vx}woy$XkJE+LuWFz2R7R4(kN2b8xArMCHX7JS}WeYqL z#0Qd2=nf!5xk@cp8Ua2|e66XSu%>P!n3B@4X~m-4(45tQP&-8lv@*)5QRkc=0g?dK3?CJYl1OUnl$e4?ORvAoxmBI zJWyqdvHGXirOx&u>wx4cdxuM}Jd75#2~fz^NhM`4wioV`XK=7FW4+QiViG zdFI@FoNp8yI$$&QF=kR^*Q5hW~q_*f#)3DXHN zkI<6_MF8|e-~{Nko}CDGK&=wI2e8xRZ}YOluCA)6Wxq6^-UA|?`_VT^~+xN$9G)i2i zZ94_=ePVfZh6w<5caTyZP5PqAx#pIH#1WAPE!f#z+uu1tig@!r**&nw+~eiZo_bl> zZcDtr*D4J=hjz~CoQvn%+d2#}sow%wsq?YvFJd*1@d-$>g{3lX%bCIr0>&rTi7ByU z?$^h3k>Oay0=>rGVE1%;aJfZ!)n>av>F1?xH9+In(d1}U))C{yt1`n1o?Hm6v2|os zi!v^jLYos4;3w~W=tNDQcY4BEj}1UeOOwZAniWhpDj^O@)9n9%oU^$kq{V5j^(*yK zq1EeZ69|n8dsZozM`!il<<)LqUgb~%QlKB;2ZUl_T<;J;eRj~OrY$oYnUU`OZA}<4 zw=%y{%qME=M9~FF_HOo}MgKEjVNl3~Jf%}@%NT8~sCr=e#X9kigx5pt40RtwF#PWT zoY-e2;{@gy)LKLaMR?Fjkxiqbe=Z-kINxZ(y_3231ss)!R0}xm50NFO6W`HMjOOz;ennzC2=8)b;0j7RL-`@yPz$ zM{=_k)gC$KV9KgIt~fOHp{VqT z_`FjcH%JU&)6IIPK~Fi1yneTIFV*qTvqW|uGXZEb{^~~EX*qvN6#mT1zxMoYn{DV5$qymkQ=4cKtR^0@HDv+ zmvyJxAx&z&{k3hKg}(J|mDs#+xz`-7aI?5GIW54O?wfX`CWd`ck#q)g2`APd5++(S zz!wHWNQNAsO+k-%nWQ1kMs1X*QD?ApSDH=Ea+}6c0DJ24i=*Px!o?6gzaLW9-i1Qx zKXG-QoRffCqtPU%V8$oS7a$`cA|M(~Fq&a*o+oOtxbun?A0F5Sf#yO&%;`X)jmyyi z0f!_kkczk1pQO0MivsE#(N-fV#yu7^3(f%z*TQEYQbbP^5j%LhOvYUjn4Dqdgxoja z3xGX_V?{|gMYl+E<LrYNDUowR_zms2}wk-e`XduaXfI&&FXI- z#PTua*k*^UZ(jQ}0l>%Twc5bghHzt+vd?s{yK!l4bpE58>d0JDp%tqL2_^mRJ6tf( zTfRuQ@E+byAH3}IYMTWWBdiD$rN;&fJ&cheQr#h zt5`yg)n+SZ>^ez6zbA1aXZAfabEH1QsP#0gU(?cCfA&Ca^ZbxTrSI6=ItNS%qG}~m zmxQD?96Bk+W<`&7hh}sYX5nePt+9SfArX%S4M6%yrm9D4>D|*{Yh7HUqE3xWf9#iy z(#^M@JygB6cIFZpG=tO0Siia4lqvT(a%A)yeN+#n+j*}Mw=yw0r*137|3 z3!)xwNkh~EC!uqZL@C*{9@I4+9V+hQoQx%jG-2VQ^FdWbeJK!J3P%&j={Ru(+5`Td zGSCMQaZg>prxMf;wo9*tC{)ZdD#eo6nQ8MMj4^U?@4>cduvmWb) zJVQ#03HI;C$~$WYJvOD+oIAjNd-vbio%v}qB0Qqd6b)82hS{fgE^F*w*k6NPUt1H7 zSJy<@&Ed+BKUWt3b!`#5ka$s~hopecvPB4t2%!W)i7+==&=b8h<4DJWa$XQhpm5Ow zDE9%1!5l?zCWwp#xH^gg24!_1YT*{Uk}Tmw5!#7p$_@4kl*l3PUFiyl#lATE#BIZK zzW-uV`o^PAV|*uOt4k*g>}Bjz|72U)i`9D``SOk{<8_#+rEuS!BNu7_SWB+kwr6W~ z^?#nX+B&Z5NG^D7i8ergh4GwovxC9lZR`02zGmt($>?L^_K{W4PyJmRz35@=YX1FfwI=Fy+kX`;eZ0oD@UVG{8gJDdFrG`ha93~_U z{(gukgu7`w-0Vx9C>InEyy2is4;G&29?YKNd);KtM+*I7fNt)**hu_FcqL{A>;%6{X|TNLW0B9`=)^Yij7@&{!8S;0(ZpE@kz(QlQY=8{m~zp?5nxf+kIY(_ zNtAklWH!geh)ZcsKY?FA;A7Z7p;H5EPHuzJGPx~+`@=`%qT^PJqQyA%1bWKJz~8{I zaPgGWVFciG;whPyfOtBgkOwu+tyNg0)~XCGwgjk_kVyk^L$Nlr(y4F=7kjl5ad2tQ zXg^+nriS8Mbt>D_fWpT&c>2O5HP#IcF(j>(VCqf`mrXMAJSxv?)m$ z>Y|knLvoRqvbh+OC8h-szED_e0}xvqtp~gf5!s&xeC&-ida0^zwo#^TX|Ogpf}RG3 ziI6-VK}d)nZ<`@-(a!!j* zLfYawbr695p+8-NUG1AC(gn4oYNmn;uC?fNLA`9H+ANc3(`{yRNU>&%icas&NYYg+ zwHn4gGbHv{72$3x<-fkklAvo3wW%$2F2<0#pTNaJo;sk(HfYtVMZ*SFSV;1!?tlWh zWdkCOtZKTPa>q2Sc2_W>)KWH5Ds#~qjnC($v^J$CrPtFzO(Zd7{zm@&GU<0~sKCC@EJ6--ER0`%xV=5WAOF1K*3#77t86ls(TY z(R5Y~9u6-k*c?w3aBjD^WPE{GLP9pn!zNgUO9i1(3MWr6u~>1%Yc!PgqF{w2GDu*hGX2w8(t|eV*Grb(Ik-e!M@oJWM!fwNPco?e$8n8k;FFP+AkOUuzCh zTf`qJD9Wb#;F@exqOddJ%KK*<$uK*6RrBfL{JA`IX@6E zTYLel$+!Ik!|2$b2-POspvFRP@o$4P67~-i;9LsDnC?L`V0wXkQfi>}@<;!wRrmth zW=02?*u2W)tuO4vqU<{|vqr2IVFN-!#oi8fCiaBVsEzRj0vreLDr?1Y_Om-xwolbI zgXTX10eOz>TPrtfwDLtO`2xv$3D0fPQ4G&zz^__`*CdlpPc-R-)@J{>xLlkS4giSS zqfnRe738&>p526ZB;o*@6=uG&m0}Z*YUsmIJ!f zAZ;83rv(~S7d3AuGVz3-;#p`H=cMPxg}V^#5^EjoXAd{*&0q>Ut%Oux37_B#!QqWb z-LwMdkzhcJ^Q*N=eWxiSlR!unzpMiSm6TO!wlGH2s3;u^Z_9tgj?V)GLg9?wfFD^X zL-VF>4z~2o1+%i$|`NvYU>vo3om>og^>c81og zD=hLTB{er$3x{xXZOgUeVryV_wVP&WrA65qkE13;khHX9nOUz_*pw|Gk$D;e+1d%H zEPhKK1(+EU2>b}NjR2!UXv`|rpT9Qt$7U!+8FA81l_gi7=nW~b{*GQl(yGV#%*fj9 z@&_<%*`Pb}#?{+DS%rI@fJmv-T5g%SO`5o8g*nR$tHi#`22Op^+duHy3{vlGsDJRk zT|+`B1p#r!BT2JNtd;WlQjxG_&%gJrzu~+$TZ=me?@9vh5+I|=1%R|5lYyTCk~qck z0g_Ri{7X)(lhdbugiC94)^Hsj@{bgf-f&GKFw>QHG9W?%%}LPc&$B1QA(0Z?7H^b& z3&rKoJd-OM0^&n0P@u$qLcc)rl2Z$cG>C*QutU*1!G+(vMNYCJ)E*>VOTQ6Y=#q$B zi^qYI(H%IlhwHeVpg$m`AIP4Qs~oi>a{}=WJRiEEApS%GonvV3)XTfszibx!CK(g%i zmY3Ljes}6}TfoXN&PM*|H3jISYNADEtHq-%8rRwMIyicv7I~kobH9S7be*mE+%*JFWJd+w5S^>gc$|?1%y8FL)wr zdS$<6Z+$MLjE&VpR=4-Td2Twkqr*HsM>*8A9nZFQ9k1>^)1O|I_X&*3PldGIME}?f z%Z6A(4%|?!HCiA5EW|79VxK%&@Tj0t9#H$Gw&2XIN~6Ipy-H;eNOq`qU&+T?JX*%$ zvl{3V1G1YU$Nq{dKD{Y}8<5_98eok%WN(x}au5A^l_fXW{{`8=!=>>je&SS9U-OK^P$9U5*vp0=F z#ry2C`QkSPd!PXM6)2i2(zzGN2mS+Hpe-j*PtlM-v<5%v6o7D|KPp#tg2sR_iZUH2 z2W=tJ%yM8=MN)#}pcKH3+`tAvXK?TT^&1gwF?b8T!b(N?S&t|vcmY?Mt#t3G6d|{H=dgbX4;_Mwu2%B$q*(A$_Pny zNL7dv4ePx^z&R!yh6JX(*VBBJo}z;~%Bc|gO_D{|*j?dTwMvnS%&}SQ0bB6HVas&0 z&hCj(KT!p((!I%{cDrX&&?n{zB~nOBL17pfAA+EcS|1U%xVl5bPe$bu@kjvx4^pI% zZtATEoP}6dKu0?o7hTsz^?EGkh|6jTfskyzc%S$W!Zt21XA?BDJ~0uAK!OZh&7d~m zFvm!Mm8b`Zs0Akl=2IB{Q_Ld1Y}PIcR= zCU%0168{#*DN#+%Hm`jZ`_|XXD-BG_>WqiVHyUmAPAHB|8p>N}{BP_%hk8$KYLY$p zU30WcDkA4796k#yFU)$Z?zd|%`%EDcesdvgaRy>Gt6%WALc`FJnmU&?clh(MIu*S) zuGiM2tGvfQ9dDa60N7L${3F-MSwO{~bJWNpmn-a1%1V9&jk9G1K>I-R#lI zo+$Lx-1YN~K063PpT?!53VCN*fv?B-GalI!9`YF?v^th?+M+4Pn_#f$VkWI?Bs(5& zqjb^5E*e|y0v5?LRy%mwV;AocefxIqG5${I`ur-?s!Vv=fnz~Qg18*yOWB~+ z3SuI(6oJK`FGHlqk(7+>_?K?~^m0&7gY}AC`vvn4-3dNT4VK zwBm2JZFSd~az=)di2R#=&ffho`&V|^n>E9B4aM(ml8FYf6~DR~b6pjhn{1h<(T+T& z(yr;xFYWq<&3pXWtlyL0>+(tsQX%M5Ds!~U-xbvd<_bULLCPJ|TBBBbG%AKxl11t?tpNl|g0JX!Y;#Qf8{Z19Es4|98tD-#EUhdm14VisTzefbCXkRLXKG zxdaQ@WO7BwsH6o7M#uR5ezpAktZ5lb(~wk5N#p|X(a+ez(>;g$k-Q-L`d9mZik06b zoLiL*=rUGObsPT7uQN4tprw+DN#i}u`LV7o)rCl;x;pa5LJL&)@>GnCfqHtE$!&D? z3^ODoPz8#yIL&& zJ&&hUDM-j;5?`;^DpeAdl>Nejjc$vKH*u~q$Hwvo&n!yQx4CR33 zwXkdh;CY<}oabVPLqJ+|aRQoiiJ&=4q9p*;xq#*llmkLKtJ9%8XjoZbMyGD0<}8S! z(0;fI3d^AuP3!TfQIHVPZVA5>sDtK68P0*wCzLd*W*0Id#Njnv;|}$j15K-)Haj(J>~qr3TAN35`&ayne)! zSH_3`(+pZ?)!;AdKRS|8_dPxQEOux)&&haNovU8QQ)dU-y7GgDMeNNF;@B$xbczb? z8Ci>6g<;i%+%nFNeS4Q}xFawlWzh{TpQf?%#tIr4bO-{Z8@WMmfi&;$KSi-A^HOL8 ze&)&LZ?8KG1=&#bf58DUnxWVJYpa`VF&xJmls$rnm&L?NoEWoi;MPht8hSwjeudMDr z*-KZOOob~qvU^kv=B`eyFYx&@w*+_}?{`6;5S4%@At19|B4zyoI%jLMbxXJZ=ZQnV zng0g+#G~bDmoJ}k`_q*^ce2{UzF<*leQK$su39R#%msJ}ntf7CiEe?aErh#^T8Cd< zb{9BQUIv)*KSGlRz}f{2ZbW539UL@R$;VF9K%iV+FdRa}gNg|fWgJ1msemx+7P&!s zm2tqtf(3dMvjmq1qM*J50f(`4%zRr_acEQb4;A5F{vqP~rHxMq78pVGi-V zSk&tE^|@ciZX*@ps91pULY5T8Ec^tx$;AB;jWqTW9OED=D=~xk{tswXayM zpPd6${lSYZ;?2S*A&@Y$Y!|5NWqdH!iV|xAn+}#ym`D&uUT$6|6lS0`5hQ&8eUtY78_QJfS%@B(+Z_l`by zo6Bw1x2o??m@KBbPz{Q+&$17MdV^}EDe|`ZQB3~A^tWDGf6G%{?)CnB(|2dW7JD>m zw*~osKXN1FGq%a>l$yc%`H6T{)B9J>x^9MskX*R#@(ashP2BZzxlslVP`kn0TvP5g zI7S@WsEr1jyd!^O&h0YzBWHxw*sEBd!VT@xRT7!Da}!4INH0J2+&WWV0|sy;{y;qE zX8+y++K@ml2{`$DuRnJ8x?qir@E*GCwLw5izlQy@_-f&IWy{JoLid8}iZ&4}DHGZd zQg0!dAG~dd`Cw##;lQy5mOke!gOx`!fr2t|6wG``bKvB}q@qKMO3@vL^N%u6jxco| zIHebHC#XI};eir&E=@vOeD%(4fQ&EEJ5*x)>vwtBwT)F3b%0VETv47`=*J=4-jJ(q zI)!8Ss~2%X_Cc zwZbMwQUh?H}PeBrEl6i{#s(}>S+h+QW)=V2^fqrue9Z3*! zIRSOC3b{#Xp*oXRqjzo-K@k#xpb~RGrVG)O=Cl()4{D4HQj7 zwR9*{G9e;Cjv?!V%OgU{DtI`6PmmT+oGTPC@60A@Lh>h&zp zZ3zil&=>?sZXU9rew6fFxB>dF)BRvxcFf-0y&@j&cYU>j51p^Y9~%`ewd_6bdlML+f51 zvz<-57F@BY;iea^`{pyHLaqsDR5%|DN;fH2c%k#46vQVcaA6-fOnR4-F*3&J{W zO_Nv|QRf>LJ^A49@;M|AcY0lQ;C&N{-ao(k++{bNJQ&Qcs7!*A2_93iOeJqt_#hS| zhFFsT=e-c{nwe102!)Xfwp^?i9WC2ijE$Gl)sTFGg$Ib6v)G038O9Q3h_fKLsp7l@ zoOKB`=L*$xeI8ENgN$686m{Es&LBd&aI632wosl(|XNwH^?#uhl-In0DVvm zNQg=uVUSX4qgvVIbn-e~a6kmZ3xRN&Az?*v`Mq+3oSePR>gr5So2wXITv-u{@WFI6 z%H=fG(K9c#=YzGQPn=oVG`evsGd>;TtsGvtK$?!?5+!uT((34GBer^*$*&Y^%tpKP z_WgAmlPV{{S6Mc%Ctb}yJ-IGmvjqHBTL633oVWWRMF;#5Ne>YSly@z6Gj%$P(hdMd zVznN8so;^4K*t|`uM720Q`4AC3{qB4C`F?3TSiEE*slm4`~dTuYTpr8yVQ`~%a_S4 zL;l2}D`(YT`{Hj|$KSWe#%Ja8?PoF9Qy-5BO*AR9C@CFeu`lm#YijaTTifhbk94)& zQa+L}Ec$-e@7Yfq2z59tvL-VQdk%6EMc7rW2#VGo0K(_XvJ0Smf!`IHbC2i87wVYU~gu!;LAj!0)T5lJR{^-Fg0N( z=w@(VbdO@31J=VL$0MTPgnqHH73#*$?J7n*(7~dlK)M-#!IvPU;3Tx7@_{fpvMQ0y zR6oU3cZLq#vUJXb^yj1uJd6J;4Aaj(Gv~zL8%aJc{looZFR#Y&LglefX&_YrA#I#q zarBhfQ3-)4LrWjlHfRWG=Y@*un=Iw_uQ3m*m4G%{o~K-mp2RLj zZ#62c`oRuw`Jm76y2}D^KYD!xyL@K8wY|T&weAzI$HcguCJgT{_ttf(4N~xfrP$9E zaM>oc)m%Ova}EKgrrFbkqV!p zdQY7{BU=9uUkKf0MB!Nvt#r1zOc{oVjwFuX;)FyrF@f_6XLr;YR4!9iuZ#3m`WxAQ z1wAH%-^s`poM`FjT|A)hKuxRLr1#^ZKAVQFv-vfqHk5%IW-k!ug}*I(1@NWRm>U+M zq9TA~2HJuJ0p@VZdNjH6fZUmsFi0>JtxvRX@_g=Gl>Svbcgm&cf?~!lEM6SF7Adnx zXk2wUP(9~HgU1(FHZRIo;|Le1wM4LK?)4@!28uc&SEU6{fOQYuo%p=1aF#~>>kgBx?A8cu^EbQdy{Lsy zp^|TMKuv;zFY#F@lSH78fpw~oNUU0=K_qMrIZV>XwMsx-YG}U0uFnG}hG?6Kg`4>VY+H-LYS$qSZ>`mZgOSDgMTl2cL%y zWTci<3X>b$dQwZ!(dA|(U{kppgXe2*;u|!4DK7EQ{GiF`;tK-4F0Toy(f~$HB2J1l zMqO@tP26X(LzkX6AgbkRa6)P(?#js}Nf-7#+!$b?FxpRv0nH-CKi(Dcl;hS)V<^ye z*hji`QG;I9lXTRpAnhG9MikHhTF3{(f)X0^1_e+yAszGu>P<#ZOc>DWl*_u(8HFdV zV?6kT8&38z$C84ZIw0GwZ+9WpR6z3_(E{|b!+l# z^V!wun1P;mSDc&^*YYcDAWDHo6&><39q7k_a=nd8jjZJ~%4?a~2jpud~}4zC3N z%MgW^m_~ef$SVXQp{;mbUr?%mtm&M}%J9^YyqwZjsV!EmsiTH3sBkh4+j_v_6~sao zyImpmY+ih94Iz+0zhzRYE6hpGc?jeCGPK`fbc>TQPlK8A7+g6gLmOmvCbVL8gSR}c zp+Zgm_%z1j(T{3&Ut>~#cb9N0PvB%MVeF;VfGMccsOv3sl&o3hQ-_CwOU}O&Bz1-^ z18KIl$Nj4M-&c@2(~{h{PJOx|N~*du27)^NS6 z<;1%dLJ46~!08h!ypBx+i*C8bOWW=7uvJbeO-#&T^2vOIk#s_3%VF)eg?eYyj8sh@ z$h_K_KD9yaHu0oVp;E$lyf#Qg`3e%<@{s5@P&OZC9CY$2vxkIbRZM1JRQ{-LlM6AN@^h)6H)JkY^ZncDY4lTcMO zK&b)t16p@RHa0%{%#|I{w9p`o{0`gnzH1)VzNZ^1tYyQiFPNHw?s!AMnZbM5pMJWk z$1}|zX?)mkH%b^|EG1vF&|~Yfr)!}HEF+6<^%%{w&grB#y--!_bR2d#Ykyj}7wdX; z|K-W3bJ5dlhS|SdPqpR!$(HgU;746|@wE7aFb)tlbpTwm7tEELICBM>ws0eZBFS9m z^K{DL1r4Mmz+rUI7-dlxP`*|G5R?!kE@=n=OMhuIjZN`Bf|C?Rtaw_n+yaDs=_5zw zrE#Bn{iVWOPU8@t*!STYB`j0)( zQt84&E+}RcQF7WW!dlQ3ahEfWwV%6x_169T!0b$(H~!^nzTm31E7@<49Knd=%{O8^ zY~5e>{`g;WZ@Iy4$9cQ%TJ`PI?=1&6`HFL=SFb*sV&AH+3j>4k=`k+gm%Z}VeH<@%0RHPDuDUUE?<4~gUbz)iUSrFMOW#6Is;^J3DKN&Y|Wp>nrmm6+m)0fg;E;Xls^OsOe-5UEORh@MD1xaL?T4~OJ!VK;j#`wjarJFC)% zV*<+~JD1Iye#*n=RaeEEYwY#Tj%HP?J6KT=V7}X?&00O0NjM^!Dr-|Hn0fc|X&cVG zwOVh(6u(_@n}TxGgfu>tqYi4Zu>PuWhm5D87Bq(I0HH(qldH{tz^t86BZvsWESQN}a zBbPQ|F`m#<;dVuOD@qX92Fbz^C4EVZbGEV*>6dVw*1-`Hz@x%~%`L6)Fn9=eOXQiH zSPL#a5xpJII?^pg+*_bqmBbf%-AVqN_#)iv7wHvZ$wg!W^au@CXMwhgg=?-1VCaBJ zSX9P~-xPg?3BE$ZfqX&muBBa^ECd-`93%8P@B>VP&MXchdaOt&LW*+f9RV(+G^X$b z52ToJW86gUVAEpgvREBcouSKYDj z_$xK9Ke@te*D85I+qu2bWr-k^T#D$r!!1XTM4{ibZB5wN->2J83c!vYQ>sa_6oIM?WXMld;12lnE$a+ zMuZ2#0-aQD@q~4xv&kVeXhnJxVWuLa#9G^gdpf;xo6TV~Bu%(N;cj<{Nq~9^P$qMS zeQ0ef1{wk(5E6VpB><;5B_X|b9-pZcR~mV|O%G`_bMAV-*SOWUnL92jQj&JyrjihERH5h2}hS|*>Sj$l_6o9IrKW=7CE_z)|O{tY)v150f zhkI`b)UJ>L;1_iLTDzQw|8(A#k%4{LJ@wdgy`fxln9Qg(N;H-8%6Ff0TC98?M7OZ(pxT&vwg%|LYy(w8BwytT$5qk6k5TU#P2jYKICo0`&>7iRBmI(|-JRp=q` zkIrLMcy2|BB@T2oZq{StbkY-ZqE|3Zl5sc~MjY)n{gQL`D5Ir0t^KtMSmwZgXbqFE_BoGdzHIO<1 z0s#C!s0J0E3LrkUEE_al;C{HIC}m3Gazd<>#79Z0phX((;iSw&O+06jXrb*dcLS72 zi{cC#A@L!*-mGWj%2OY*54`gWcELx@xzR>GPuq0#SIgENdRZeH+df+zNG72F9?0z5n{>zC3yqD|=wxt1tej zhVQ{DvCyB69bb1nyJG1y=EWVWo6>f@RCM)f?( zN~oC4FBaQ1BB-*3o_-2a8wO~0Lap^kB!WY$ZaF*_^bBLhyLD#PN@=HW`xRDa)z`77 z$CwK6^W1iE6LCPe3_4EC;>fF5X+5d8Pbz0L&CwOjEwM<5BP|;dAeu->vzc5cN=UjO z)=O&}x2m9yj(`_X+=b-+)Mx^sD6jrIJM=#vv2*@bUJ#LOO^u(e$OH$%;elXg*=LPS z9n$fyq2Pc|=wb#(n)dh-39ns~u|wuBfTu%~6rV8P8pz0Gf>_=7$92JQp%4z$^5Qjs z%v&1{)$mTudTZ?$?4K?>fEm76{?|Z{*Js{79+lw&IpuK5 z2p52ETcGArqI5baXy+jl82r%Sv?7uK{-Ch+-M#tj^xpD{o}P-_v}rjogPvmt#mht^ z;K^m7p@asrU;zkRqGBgl3enJbiv#7IT>FbPP&zF%87r>3lRQMzoNpgS3gr^P=)y&p zeshZ}Tnte@bl>7i3s-QJQ+QDkM{`svxq^8rGU1d}eG)vT#HZ-hfKeef4<}vPcS#-i z!9ElzfV(0^9W?7?5grhnVw^J#03nKB=Lb^_t#L~)F;#~bFJq6e?_A4%yBIoYEE(EA zdU&lh)~%IEjWUIN-j4=mJu$~sfuFphZxh*=_QZVvI|QL-C|0TX%k+l}8_ze#w0gBo z9UaM9E6t=qMN4_*348Z757XZLGsW^|zo*6{)^^rZ*djNpUC`M(sIh7JLYF(16as|t zjA;y$Hdw={gSTqbPEW75WgbHcb#gW3l16NXS|_0*0Q+4Dv7VYtTnC+)pv9a-+-;{x zCBU#7dqm!#r|v>y^v4x+ZP&UbsX<#i`;A)h8ddL!)Q-Prw>zss=3oRcb=?+~OE%8N z%(OOW&{@~8ug`1__Rmkr8Tp!#d`q*kCTcWV0@jtQBBOUKw3%Y|&#SGpU8>h9k_x$2 zCnHzS8}t;0cJ(*~D=d^o4_b|}Ug2u6o0=`{3!O)s4xV*`K2!Ua-9%q}&i;gp#@ev&9{CcssASM?jK_oJ^v#%b}2nBrz#Zh)V@xGa(^T zY6YJJybB&FV%mj#XbmBe3&H;2%hX=YwC`_ZXHeGK=jLsCy}WfpOH*_IA>IYQdYwC1 z6Jfvd#~lW_$%5UbNE+QKjfVY$kJebTejPNdz2)K>@gZnoqyk{|0OV9Y4pkm+m&%yO zA&m;K;31;Pm3t=m05+|iDkP?EgT-2uBuJSo#-@vTvp~TdR{+U!OAqIWLG;V%uEq6< z%f>_(aiNd@N0 zWQ#8e5w=VFr=BB9v-hwb_8;p%`DykUtm=bPSQVyv!TtRmKBv1nYVua%H&yyvv7F15 z6HuF{o8`91qWVNzc0o=A=r_2^x7X<)2Wt&+T5Z;mqHo3tAth8>n@|!}QWCa z^*uE2?2EP56Y<)JBi$0v6*7K*ZPta24hWq&`<^bRXLm75j`T8Kq;8n(wr; z-x$rb27d4|qgk(E%rrY@*XqnpmDc*Q3yNYbwAR9Y&8XG1Ndb9QEexeGSmYFham(Jm z$QRENZ7C^QWqdB36nqy&X*=P(g`)QfBZr1n5oMTy;Iu&ACknZtHfl(C{@~A~ZC0e) zAErTEa#BH3@2unO;ak{y*{=jLo7rfZ{^Uz5cK@+w*P4Nl=BR^GgnY>ZIYZJ2`X{|D zV)W8_YP5fU!IX4h%$DA34o^RE+{;jlGj_h1uQ%t`eEWRB;7odT&T{PgM8fSzCERB` z>10x-^v!*9@yx3lB<)YVwjjUx*oGx7C;J)KNV0za`*8seTYXje$(_P*wU@D|JW7o$ z2rcLHjIk!>^xMPEhH{VJoHoXxU$NN|!1WqueJ+fDZSq(w8ibMqm{e zKPSEvd=04K>9V;Tq6_Nx&iM%;w&H|zMvlc?E-;!){Ba4K2*woxjhv$!5>F6&@{w(h z&O`0PU^F>4;OJH;qsIpRB9tA8b|J3+0shecg+(-VKk?Gie#Z!;JV8Y|nGys521q0{C+qUnnFq7k zC$f;Ok}wMZUxdJHUtj{vh%HH1K*k&Zxsn?gm{#5RDS0F9*H%~or`6q8Y zckXVs&K+@tf^KsPckjaPc{L!BDZAf3i|yXG=m`EOW;uM6{o}*)*gF}T>=Py zqhX)Mr%zcDyFBG9E9r#`^;JtNJo{p%1mo5Cw8vg~{Q#z`k-*vBPB%Wca9cxtG*;gb zjn>!2;&t^gws+&gidZ3&_ySVcXt%=vFr@cge1+I9_(z!wio54SE2^E)WE_IkB?S>l z*=mSRpafQ!sbW+bIdzecEDBRj6@YnxxdxxgR39$XtrlmmbY>FG6vt{)6~&TVMSdUf zI=}@JvREtn%Muj<}HCm;)&e=0I^RCq! z>k6SrO--2XP!K#wW*{VLaWayquF3iH5=Ns^$QhGf2PM6HxfuIfp(-3J)Q0iqSYy*J9wvSvtOXrC3Ekk@x#+{{vV$B_FW7%jRgVB+ zRatJ$%NCXKi3vfH2!?g4<~0=p7IHpeT!B;2Ii9JLc4?*JrnWd&X*3%pM-#`k94i7l z@?k%48)sX3(!@Yz9cyDPtea(;6PC5{L<(mXNQlv2uU-4`Ld{D1%;?7Rk6*v%#L+vq zY`Jk6__h7jRRO%h(d;rexX9_@NGJA4wLbuUY=8B5uTG~osnw>NwR)3A1B(&%4CqpN ze`jw#A(fe+kecxIfB&b~V#dTYKcQh3MES!9*#9g7H0|ZsWmwlrj2ZKl7lOhyhYFNO z6AJqS5l`lpcdN$t4fW4kzW1`#J6hwZ=B8A#g}qh?K*3c#J$QdhYa-d)oMN8?3pQ01 z2vwd=qu`N(w5?RWW^sbCW?B9dd8LVVTWB5hyo`P|S%~g8sj)q?B4C zgfH0J^sp+@a?14A6aX&3`|R2U&jE@9UXRMc~oFl zjQu7164ruQtH*B5+g*LJ+fO*XQHx0_lR_zJedhQLpjw+}m>ikT8?}^Lt5vD>lvblw zX|*fsqmjCrSgh_Xh1{+(n^ktXLgn8*JiObF?c@$LM8F~P?~C6PDZ%H^OQrx(r=haT z%C6u5VZq=+BjDtM7a&M}uu7vaX=!+oD|O1)aw~`s)f$5MnQ(W~Q?ps7*t+Qc+*6Qr zOBR_6CJkK*KR+0B71Bky8R7h`M2fwc-47jA;6Io?*Dv$gmFj`9!vj@I0=-WS&wX-Q zY+?D}q0Q4GkEh!vQERqQi~FEvL8Yq-1Ww~ zmlpN~{L%xxeTN77!jVXLcK6Zg{d=U#_WfoH)6woA6?&PN#JBN1LFG`~#zg4x@z&;O zqM<&){#b=FOrb%oQf9RqN1P7lHBzS;+uhU@O*A${abKg`9&5GpF_pm%Oc8}PeBTOx z4`5j*0LT8PWxp)@JM!rx`QwHEMcAP!OTy&x06r@L%60TYS`v(BiQG9Az{|-mCwRA! zLI5E~lpP1dUQAu*7pI&n00QZRTwE~q7?j%}9pci|0K5EPx{H%M)vJ-@-fNP>V5*C9 z@c*CllCD^KPsGNEzlm=l6GAW5k-4bm)<;lSnO$(3-OeNgSJn1=p_KTRvxfUG*w#ZmRZBW5bae8r!h3P`iHBlDY%m zuhUwEjAz-HQfDP(x;czY-ui7@%oA^mI@8#5Qn4-RkJPpBu;LdiMQUDb!){si%z*>y<4iHNOv@=lHoGIw*UY%9H2eTa&ri9 zMXNMsL!}3lME}y6Nv-Rg$?SEmg?d)N(lpuVE{&9w&^Y9V`d8>gJjP&LeDe{fY7bVn zV|v#eCJh7w$R|2F_psj_OH`j&c3FV8K&Aqi{;12SqmuLcNYRJKh1`wIhaY>FExu%at0lJ)suL1yE|CbIwSB z9y)Y!5%G?2Cv;Y==A_0VNUz-qEfqkUp_QCIGl3Xbl*>u*ZJx% z#PB8V{@-bEQQ`McCvG!rRjSOc5Y}5@43p6M;%FL0WYG2p+F-C4O4EA_y2BKyRs|cWl zG*H3Q|LW>%fA!=Ok3ETz6ZSPN>Zh33@2NC3j8UZ zmi>&q@t(5)gYRDd>!ryPwMXtSl^ab_Y{uP}m2XH=Lobcu54#mgg)X2mcSLGthSVRJ zGy!+ss13}jPPB|)Xlafon_CjAzWnm;D^H%h5^E~$=H%rw3 z@Mg4!VY+r*yq35}nBjzcH#jGkK;6}$iKb!js+`XoQY6tdOqdP8$3Ih{2%=Et|L#594tR#)^j>HWJ`N*lFx_e`0TZ{oDUWlY6S3!uI{{ z!hm|-4GU+j|NHi#UDyb~p=C1g7Wfsn?KwDi&-kYKqd9JuJNE}_=itm8DuvqAolQ?? zH0;xTRh50ytE#8-Wy8>c*#$LHDCDo9wRf&$s;=v+N@R4Z;2w^gnrUXK(2C zsx$1%&zp5hC2^8{@V(vG_6w&mx9l_rG*MS+B8_V5* zF+bnBNa)lbQOsVk@A{|bE&RztR~(g-qKzlmUp{)-V~1kX-q_!I1y?V2(+f~hp=@WKdB2#ex9_y_B=h3@urvY-8NO-1#zmTb14eW5c{yR12t+}O6X zHl17C6u@WC=#WS(q(t5FVsorw-Xo)9?_m6fP~H0aQ1qaFRbwK$vo^siYtGhoS65fg zVAYB8@_glt+N4;F-L(6QDv`C#RcPY9b8`D{1;1XsxV2&ajOLdXE^n@%KiI(@=-Sp< z-gBgGaBFvY?-jMVHui8m1QW6KHJPTDS6#mG!VOKEyAs_y+K%q%N_XyWN_IawI@;PZ zyu59la=5v(duvn8AkGfdYH|?7*pXZJ;q7feU1mHC*xVH7EbM@CpfYSC;}b>^jgY7G z7q}7)=Tu?KQB-M>pE#96uR6a8LfEUSVQ3MdU;6nvr_!5|^T=|I3oC6R;%VKVrSl+uay3au@0GnDy_ z2BIjsAYB!m!GL}QNq~+r9_M?3gUI?oh|pyeZ){D&tuHS+cArM>5mhcKAH8PXVlAaJ zgv_*B0Lk?GstPqZaGc#gd;T{5vI`5@7v4Ml_X>pocUv?L@%+=XZ~jmB#uFFI-tc_Q zK0727xM_9v95pq72ANl{ODkATiX~$tbg|j6*YHl=h=k~fmIpGR_TC>T$xGX z(sn;Sy{<86(YTlG+lOB>{sCB$AKkEsArvAbRD?*!y1u~1pyTbVyI#jsLiQ8(jKN^> zLVB(3HEiaq?Bnbo;Cu5fUQM`!Ltug}nyAn%s&B>dn6#SEz#vft1HgHPb=*n;HMnTd zkjCltLdX>H9ICiZgjoLi znYl>4F>1(S6{Bw~8GU>GH3?OA-r?E?$`WUC6>WR+N@ZqOcU6{&TT~%;s=JS(93C}2 zbmP#_&HXyN)vly6ZO#wwZG?n){xX}^t>hB|QQ_Q*x>G%4k*#gF-uK*PJAcQFPlRe^~TTNQjpRcC^rY? z15;9eaToDF!kJ}OFm5-&WSjvote=(^95lKxVv~kZiM2UShf&7C9D{sI56+vMIxe7( zE&-#58-E~T1rF(nAV?5(^MT5t5)vkflOZZjI%HJP7!Q$kI*sVRWbVSER9t%?b+1VC zFrNZV(pJ1NOfs5h5yv%%dAM1IcZGAoGULsf6>Q(%5bW!TZfcKM6lP`l6P3_tyU$jq zr!0cQ2Np|4BloiZz4m(cKX-*k$c2YaU)%9CMqYUelReZ8d*>^KcA5|=u5#K7y{P6w zu7auwfU+V*HW{sMt9Cf9R1k#8BvP1L=D&K!J+IAeJm0+Pm<%dVap~oLfk`gc2#k<6 zQep8~pgjWM=^RYssKrk4=F@uJ}#HY zNN6EZZq9BpD@5N|moi~3>J>!_G)}QiiMmff$z4Ddkeh8L(p1L7E7+ByFGYu;zWbK4 z$ADyz*n&V94Q)$ktsNC_A~BbMoJt5<&B(x%Av!D(Qs~2np#45D1}H0VyFA#Q;L22nq-)qL|PG6+uu` zTH56MKeKz6;QRW0zc0VswwZlq_WAb?rZ$31J-{83Ee=g${s7oUeqKCqm5>*6iqH%% z=gchVlx0x~_)=9^>L5ARLo1{@K5bmFz*6Z%$h>RO)YSo+BsfSUt(hy;NDT3sEFG}; z_1Y_u-JsH|`LrdKC9QyL!`-rjQo%yEG7jjK#Nk}|HGhoMZEc8+2w z!&tl$l+{ue%1tw`L=OG;LF|s{vly_3CNdbleFuou0AJ-}d!iV=Dz^_M5N{s~-^I)J{hTA8gl{H+;R_w*<@DXW z7g+cng$A%VGG0oShKq>L*ieFqLr-~pCB_%+CWFP7?9lZIZH7|y9IS6^Qf z?bH3{@l|^er*pjqak!Q-a~BMYzjeTq8g=Qs zn_oCFxP0-Km;QX;vc&|!lgjroXP7s9Zcvm1iRhlkl%rkk=k_1yt8edrCzD9Db`H7* zo86sLgBpjNBGWxV&TSNEAQ~tUh7xYqbVNK(XKUgHsaiU>Mk>O|RditW)X|+L0y!%! z^Hv%ul4n|Vqm@I(!zw9Xpt8s)1Wbo;&q5++j3>P6Kyju=JY(|#X?%TEdWN5aS|AaF zFUa}4kr+oIQtG)JF*jD@EpRzdXh(1r3au{Yt}N=HkCs0sr1@$+mC@Qh!uXO@|B86#K%R^Y3Y5rV}jXaNAN z0kBYDfsE-HPvxcke`V%faryln8bhb$4PPrUfEvI^L^hTkGfJ$-K!Rf_uM~9TF@@sMfzN)(9oPdNT20x?%w~y>b_Aih_MC2~f5V|Q1zX%#HLX=uL|w0a_P49Lwt??R z_!v6+@?TIA1fn1kmheJ$rAcLBqRiL6zHsv4>EZFi)32^tvcRR6bZ6gSK7m|JUgda( z)Tx7mGmKU(G^>rfv}T)#Bx(Ln{_!K`DVgFBPYT`Ya z0>yysv&XTR0p&5t!y=s(G^K6_P7?Dl;51-Q!4HRfW-o+MTFbi2CO^ugA%@R`tHSyi z(^(->IvZ|@yord~Ilytx3?PI=5~G@ivVED*Ajt385xBW^eIYo~DcZG&*tf(G`YDcf>6h`&+a_gd&Gl$hL)>*Y~Z_l9_EiIw?fAkp+u5;G7 zhhl+E%9tV4%pH4MrdhGLP<7y_Tn9o}cKoqnv|6|+A&xZ!9nDImeMaJ@DwR-TGr_1j zdB#s)UQjo;W~-C=lqRmy3AJL)%qQMxk*Q_8n%xPXGNO$(Y)m&zA0F@~mi4)z(=;hU zuKeN_kW7N*^=IlRsY`F(wtbZnNrlARYq!i7s*>JE9uq2R?%T(0l=AJ@cR1Bh{7%T5 zwxCX#&&7QU&xK-LNC?7LkzsT|0I+#ypg)Dld z7s>9~nM*Q%SZH}h_HbCmQ%D&4lr(WF_E3|u)*I-6NmQ|&_5KEZ!-_&A2jn^jcuTHo(Z&(^z;D&s2f!ep^(Gj)WOV=GPL^O(8U+rb7#^H_? zsm5lNv8Jl1m@B)Ub;bi6kFqL?gT9{OzJy`(`HZ z5c1_VdH1#zqqG&~BMV{P9&`%6v_0@68IQLjycFV$dFFlU3-TzqGZurh_S%Z~Dn2U( z-^)~lDGAqbm(3!Kg}9r2nbwjp+XPS(6fD3{7D2ToES;o8`~s4^TtJ7If+o|$oDM?m zhCl<*qlaKg3TMbB5$)`{0m~Ue;;)TRRS1&vZ|09NOvS#dpf(FBgs(HvzwOR zW?aR*I;amh?EK+mT3(VazisC@i zXQbqCrQAiXOPV_#>2O7t2lS+b-^1Nf?VWv%Q=5fe3$&%l!>z5I#j!~xTD96rG(0qO z?i(lCHR?6H^w&IGm7P6$Xl`nLwOaH3cF)7x8o0r&hPX;3GsYeYgqoR8?|Jk#=7V*% zv}zGDKixEZY5`b(ky?)@W${gr8%(%Hj9@fo$Vq~4Tcnpa~KINT+BK# zY5*3p=$Qmm1?w8)2(Il&7>3mQ5SM{sAmq@FR#AR?V*=5ZG)Wq+kWm z2i#bV&~Q`}eF00If%w>ahaI1Aj?=-qMp^&6bFzUli>DxtvhaP953UF-KQh!iCW)B1 z)~q&L8@OD;YJ-_11W_~YbxisBkKUeg)dT$YCToksXVqB`Pg{5#@rbBn$Ncr%Q^JEg z{Q`kq53Q~VA~(-w7hlqK?n4eT>%+8+;EVS!PX67Rn%~*Ibu~ZeHi_gyscRRc+3`do z!fEk{O=*slYtd9aIOXy)4+YKct_`cgwGNSyd9AL!F^1?xq2iR3!?Pu_NZy%`@}|br zkNqL(qbSX{>0A}j7xF`=4d!er+LPnC^x4C4&O3b63=N}1Jif-CQE~kUo!QC-Y?5d+ zVi{C*i5w~z5XFlH+`Y@K{tZi%HlfZjzu7;BmUgrUGyPVP*wC6@z1I$z)=w?zes1A4 z9}a$U>r{nWhY0u=w-fW#XMaHedmlpP9Q6M~kG3Mw&%T|x_+IAY!`(NBoigZXqnyDL z=%rRcra|YOB^FJwNn|xrpGX4g zXvUbB5v(GVJYIX|t$HFLsPm-?e7?w;4-(rR(bFQ$_)_MZx*2LIEqs>6=#9|T;TSZ* zybl^3y#W=(|3O^9Xh0K(SD9p63_y@(0?nWv0q_X?0~p5ui7tAYcndlL?yLa^;S}JA z5k6?Ltmgodpx~fW#Ro?MPzE2z4%OI~fZa(f$0SNdFkT+;6|RMqYOGL|cruH=5-|9{ z3sM?I!_J(+D634dAxT&f2%A+zDxPV+2WtOoF7!ZWj6iPGtAaDU$nZR~SN;O7oKxEcEig`Z! zDaN@Aym%sI&jo%Fb}`|5tzhpIST|h43)Q{&GY&4^hsm&**x)Y9 zH%cO)M7#i|l+usJXxsr_5=_2yHTDr5zW>Xgz-s`<47AWcy&30s&8zIHq?T}g=R{2z3s<-||xeP2#(^oUn ztCx>hX$u7jTp$1O_VL_FwBmNIkS~^y%EG*YtIlj2$s|TB<~mp7CN=Z|nYp!LXhp7) z$8XJEvZH(NjZ?M{wG~uvGe`FAK*W;oYC{pZXP96w? z9VRprs2K3(DbCT#y2TQs&>7J2mn@RjTvoWH|EY9yiuq^WUgd-iOzsYwPMSadQc zxIBBELe>)-2*}l5rA*BM$$PfiAXG-@g&JI5Xcp-VYgGlGVb;12{p(`-{DT6uwZ;e~ zGY9RWngD7N@Fg6bQ9JEB8^efkHL^xkTVLaY;u0AQZ45F0q0jRF3C@A1ksjOv9cTnC zpBU$bDFqV?xMDG(c=0PBdu)9;OXQ3R7AUX$0OQ$l$0?Rglu4OY-B=pKbcRJCZtw$B zvEZQs)3udgy9D5)B{{|xK9#7k{9%;C3OE=If_wA=XTrn_L+-jlfUAtfE57f$QSc*&D(Kp@VOUPvRd7 zw*`9u?R(jK@sj0Z8t{|A$M9)6KNmADH>j=j^J3|gzM+?Gnm}R8w$GU_v<7b9yX)uu z{J6(Q*#$vFH4pcSmh?+0-HbwI(op9agpP0`E~rafka%^?)k7;eJjd(?Y1n!{)lJ*F7C+L!TeU9eh_$b-G5h=ip46dJQz3}}GRS3}PA`WK z6*ydx#BLC8I@utRh~#Q$D;P<0W!tVf8oMjK=vZ4g><|fFh{g!0Xrf3YS!5MT%zT}_ z(E{T{-7ua*Z62{ZH<){DE`=U^*>*myq1(Vv;fJh_EFZz_Le7`Uk(Qq+paRl)L$gh5 zlDmnp?bKuZn9<>bP%kagYK4>`VNeN4QpQzr-=RW!hmT7yR~oCVHieX2mM|$KpQXYy zO`tY|CiDYs+E_E!Cv);Sv?PXve@tyEUb*JV{x5HC5?3qx<$DY5D@ENiQL@UU(~b|U zxu+hoLiI|Q+^UGWF71IXsp6#vT>`6qtum{WukR757gdKs=5}80U>_3xdIB@5JC6fCyeo|MMVtzyg>NQOXrNdF7P@)cnrUsUu zaUJQ(({j5wp|d79F_Az53Ccox=f6T?8Ya0aJ#xRss)_1MF?o$f*Q-n^)dJWBIYkw{ zl%#GTZ_R6UHomhe0D zlgVRvY(XECKOLxetm0Xixbt?!pP>29_tMD9N-c}`7dO)3dUzp~Qr z&F6ey_Z6!92EZo*2~4FQwW+lwk!Wd+$6MLISgs%cFOpUnJ|w8+o?_2%b}-@-9y zTr~9!a>(=&8FK;!3}P`nIV3W*iaBvwPMlM!ro`giazrspr3UmRBQwg-^~~4u&RDcf zwJ5Q?`Nj^-{CO?K@sTMVv6#aii#mGY*%^yDCG7izb>cmYH+YM=gB~E|6*@@Y?1rxJ zOYpq)ibEB5gGT%;^o4&1_l4(HKuRa%bXI`d3m9&B6f0J7J*>p4V0%RY>&6}`cq}2H ztO5p_gMtdizp4Vh3SJ2iK#+~bFT0CbeGrh22QsVdE6!xgr~~deEXuqWAA%+~C8>pv zsQ4cCUHQcC`Ik$YFr3A%>%oW(VYKo!KzIO(mzTiL6Y>sjhaO-)|B`v|>>n6}3}5U< zky~>c>yZ@QqE!rxtg9~c-#+_srki;?F<-A1%6TxdgyL`f>)7#MqxXq_p!{>cKJza0 z78Ie)`)C;ztYRK|fA}SIr%dmVy4!6q#t%v5h6wc9j46>NuanEU1*KGZMrn$;j8>Zk zaru>cg}PTD(h=cCvsg|vn4xOt7X~3&jN9avu6xaOT8YxsV%Fx=GJ#mj$?N6jwhv7W z3aO%7txzaQDd>JKz0j8IXa0l!&EcZq?@(~&kW$OcVZJVI%-ziVcHg|eZ@7IUC?ZE6 zYn?wJgNX?gOwZsHsy}|6`T4mF6Zz#ao{FZGyO>?fKRF#gnfE@LmOm}zs%2V<#h_O5 zwHmn|TKefJt}U$rVaKgCiAE(FwN5ToXm~Mo0A_Bdj1Ya_7J?Sf+^j}~KG15R&pULw z&JiB9L?wfIAJxYizQ}5pOX9}&iM&y$Yf?)T?-+#8tIq>d-6{4#F|8A2`q$u?%4N0%oV6gn<3ecDV)=)&OI~dIrV; zNcb+>F09=T_{w#$dPj*F(|-(k%`TomK4qz%tX400b?K5fUhbwVgO)|t47|adB_geJ z1Fs_Y-sTzeZ+SZ%sjshXt6G(BZDP)?*t);E*LUX+!HgbNI{I2`+ZMRfnNz*l>l#xE zzcQazJao&<72G8ApC4~qx$d_cH(bV9nXh&$1*(XpK+QgyNO^KyMQTy#}-G^%AF7aWdV6X zv&QI*`7YMVC)@;BveQ?r-E5XiEv6=$h>r3}4o}p~H2&l^=9U9FKF34B%wbAoYc`o> zGSjAw;?_IAL{nd1`YZI#?{ht)XTST+Ew}#mty^w-i~APy$qlqjVof^-nDn&em06KM zv!N;8HKaJa7!5DuOS`oiXWS~0(Fc%ir7o0h9b4)+G+*y*$|GxYOC$o-02MsK&Wuug zay$IY`H-}{qv9U?b1_|(Vb0?mDdL08zu!1y3D#G$2Bm26P1$0GNy)N$lx>oy5|J z;>&!(YrB^{t|f>uF3qDPv_`K}S#41W2528y*}qrlm+7r$6YWQqPNaVPTO?@q2AlQz zv(Unc7Z7XL&6Cp-4o~M=a05z73VfR87oe|vcyX=!s;}-Dc(BJ{as2ogF&iqzLBSU& zMwWVLo=nH1h`@~tgbF??GOvENxq8tRv7=j{{}|-8X3+1fVYSAok7ELi$U z00ILe3lKX=;fWUmH2B`* zcU=dzElAfH!Z$3>@904wI#sqrmoYG+(GInBN1RHdN{WW}g;pF2SO5Nxqk~tsFHu;P zh2^tbW1H*iH^;q6P1G46N}UG7`6MhoA*+2S!Tr%Yh^y*OmTS4A8fzu zk6m1@P9_R3Z{VMi$ZsB7m5%m}9BOs7Icm*nwYli*bT?f+e_ABD_L8ePa*5AB&0~o! zuGcsujfXeXua74-)vmd-R^rgyaMO;L?s{?6P-Ic9AuAagmeQoi6jR7Vibk=)_u!Jb zN7^JswMZt5S``A>=oE3*aQ%_Lyi0bBa^(1J_9OY6LUNL5!ncSjV&;u;y92y&OI(s8HW`pgqOofIUSe> zU`zmjv*ZmT1cSZ=wo=AD+cp4$51)X23_#HqToVo8EHMcjHpTYuIs7g*gAWn@G2a3OzSiaBC|_>RS<@k|Z&Tbx^-8Im}!uv!Lgg@j|NVc0nf zx}QXr+bF@{&)3<*C+!Xk5h5&J=eY`%Svd6Y&?kS2NTkHrSC?=8=h-Kac-BAmc6KeB zwt&<<(iq-L8Fd@&EAQtVMd@d+@N)4KT5+)`R>(E9_rNPKz7}y0FMVwndSrOf$fNq> z%=`C_)*Aeo=ycFbyYrV9|>uiJ$~}jdx$~9kQFI|Jn(oWF2e6 z3-xA8sMUK@`*0{Cvx?21P={V%PESSK$0HP9Av01^s#gP($vj-47F>c5#u9R&V|K)X zczjisnb{!F^TK=)4Cxbhc8Z|@OAzM4_>`8k-Kq36LAgZ~YF18>;)*O3>Ve6CfzE;o zJ9Kev35ID&-eIP%(Xz0@H~}BC`NbT}?wy#xOGJ)w2LC~JdK!=iqXwMH=Ck7$TJjWg z_V!s1=Nj&s{lhc0FXgo&B(kZYUL0}M&b_;9tM}}yR!GGPt5{~sypYd80!gT0-&kY# zx~Nv=nS0V3a&(1)dJ#cNRS|_MjXEB@4Rx#+h(s<3kRR=;ZD>EXt?s%H?oGmwSMWlR z;>z`f@m;H%o7U`DJmn!dEkrhptK+(p*VUZ8+X`t+5HDG{vMs(-sIL6X8?bsHHVk7Y zRdQFj-oLc9mFH7H01c*RQ>S3=PCtau@CdHmU~Jt78-^(c6Az#D-oX|rFxuiG$5Ngr z8_i`kTsQ|vfQ?9$^j+9q;KoU=reyG7k$OR`g?E7E-z!U(mB|Nx{=Z(cng(XPKqmM+ z>QuICVbT=l#Z*E8A?m!Ut(keBdGCSEpC0?Dxo%&Fm=>txyr3~L)|Jt6=05yDwAlX0 z!&2mZ?};M|2;G^#cOQ1Epx2|tx^A#})%uXl77p8Nq5t)a=2*=F6|J(~)y}=7t$q)8 z0pw3=8xLD(OWsoXlgei1_>04yg-`SaI}gqDO@AD<_GF$+*lKM8N4dpoFcJ18X+8PCrn-5%h2*!rm2n7=HqP7e1+M z6+y?XB5k*CSuhH`@7L#B=xH#cstG1huBo^JVnr;%17vW-2#~s@Cj-{9y-`Y{4z>{h z#S#I47X)UBF#)n;dQcM{kIQM};y0x;p)Gi6Fc0+HU;$lLZtw{i7APgBEj-x^5uSEE34+jNaTkPuTd?i7uid+Vor8*~!UnR97UnA3-9tAh{aUH+<^D_GdhoT=n+5{Q0n z=BszPMG~{rp(?(3Kf$5@^%kblaG{ecwU|`;aO*3H9(UzTT4Z`xA*+D(v&8y>2Z17 zb^>G)RILKEuvEezp)SwVo>tXl38LDk$Yhob-Zvf1Tr*U(6RDNSp(S0~n9^Hw>P6#l zD9@3gicb!_v0GgkpKjR6ymzg}oVG)jM5`dj{O3xfC!V68xO;r}9Q1~{YM>BlZ}%<* zMic64?L#|%HKNi++yxCK@9I$J>Y=q;SZ1=@N!gq?nYA31!`XwLLw`ETEM^8gu2^3f z%F;P1g=Bi=D0&dxe>ZdA*>fg+Dy=v9VB}Exd=vE_{wEa?Fy>ZZ`OAky2e2=|ngCNy z;Q}2R%STqk!scPF3b=#O4n!%dW;q>P?u3r6aMc0|;!kj(9fu4s(m_3n3ws93xW}$R z{ipYB=6B35m``^o)PY->Kke!N6m_BXk2Y?#$qf4R{B(MCG8E?z5FPM{?h!Ijv8NE0C>&&Z4 z=w|o)X6-`B)*OQ-YkjQO1yWE_T13|x7p`sV? z4nc#e`4xNd#4UssmUOm;gn2FmE>J6rAJ|Wph}raOV2^ASx*K+v#icAXpumirHRsAo z4%eQjfqv$A)+{T#CPOIr8?fI16P9mdD-gef%BvC!fbEC(=-F_*2Ie=nZ+d-GBMgLD zFdo**RFYYNHC5e9tZ-f7g2fx#`>F}T^;d@ZjJf008<-DCd*jXYugU5 zf9E;onzwIa{-uzpd4mF3<;DX`=BB2haj7(H=Gk1K>TJGHH0#XCOS+I*GCLS;%0VV# zX!NPuj4?yu^2qf)fvw7-Yr0xa^SL8)ikjk3z!G#iMXH)cYoWbUlxYw&h8x`@-B&NV z8K&pJyzuz+;q69eV2-Eq7jvPi;8CGZ-{?%l;wd2{+Wz?bQF;~M1hv>BP#AYd#pBS! z{tp$Ou)~u|_zmp?S=b z)~`<{GnF|yR!G>d@<54YA%sZ+$|kU&2FT;@b74HD3m88zAk13;Wy-_`ZGDnSO$M|6 z34&7dCk+_98fdp_KI^BNcn3*_S`h3d3iNs*KTJ)py7uPAU1$Hz{P{WNuFHRg9=mdS zbG?1min@ij!XUINbB%r7v6fZ9()vmejX^}P0e2Dp{nBs>*Jv3X{;;ckY$>~kETP0+x;pmdlIb%{mP<3~ek}o%gziF=t?5JGs zi%>&GvGLhe=Adtzao*?>O7A&2qHy@R_wrz>QEw8ax9Z#`k{i@`i7&rszr4zjIlpM% zs{JT>X5ZYF`ppgI>KrN331vTG&b_bHhP7((phBy%EA(8K(v;%6%`o%Go0Ew3)YhWh z1i*}j^`h#lJ$AX{tTAbtF;DNlq^&g`$jHqGljNrM1Z38VA>Kih zMPMFG*!cj&nU54ID^~`rHgzR*!3IuUb3RUe#{aCs4mGwtET?82Be*&iP=;W8j{whx zzL*&-wiIf2>A)n<&bY9HTg#t7TR>Lacdf#a1=Z}cnK5#z+l@lu^u|s9VeUA&gZZ`& z$=*W^x4((>I}e>;P89Ql$DUZdY882_A!P>^u;f^uM5I(m6#h-43sYO4pDAfryJq8F zglFXzTepe{ZL+wKm0}>-%Um+cYO3Xp!mgaxKJOsWSOI#Cahh80*J^2OmhC%C;>-|^0eRZa=;jqvCgE!tA-E^b=>@Rg1 z%Z6Wc?E*?wDiT@~{}%3^TUyMn#y77Zj;-t-KlWy$37VMemhE?&h56?&!Q&7mD`BpO{34aV15)IMuD`|(t|s#u8zRj$2VTG z#-@69)6BU+R|Ts04YQt}MqUc0IB23J4u!cIU=&twpqn>rbcxro(jW6mmhD089^+XV z8dz2Ja>}MU=ni-@!AirTO9gygreN)8C{X&0`P?huiTGlzH=GOmYttM`Y*d=vD#*Z) zwW3Ok$4F3WW%{&^Hk;GZ`C)z6+w0e&r#|g%ZP`EXos-Q~1G=oK(pvXsZG%<|aRYjE(X%T@hAcG) zU;g)Bu0M!e(fFL$+~~5jf|AwMPmejP<__swQ=K1FjHoAY zs5$HeP19!ad2?ZXsLjsw{CLJe=H++BXKno*83}|f?b{chdtp~^v3>s3<|XGo3e~5R zvHleEa7`$h&p<~1mX@$1+?=%T>yKK)y>Y=WJs>WuDo%e&*G?c~)bs z&%MS!^QMKw`1U#NwTEW3n(7mE)1a~9{1@8VGuf%Vxp#-+u2f&#pNdRPSfc|mr|cKjT-z72HzAFq?H=qK8ZAhK|3{QZ1k(gknkvT`qaYj9_(?9 z*GRUtI>Ne$_D{&dyz`#fE(tjL_ER;Z&YnlUn!`YQqQ&PIP|xwlfv1nKLv-=vqX$cV zFi^rFkS-a)e792 z|Kc$F#jh{mTA2v~B4W&OC(618tKE>iB;F=G!xyiJ&j5Z|KDC7Y)N|WAguFbc>u1k1 zvy)v{&Y7-P{Q+%#>wUEOF{yjj7!+4f=|&EubV6cAwSCke^{4`pSulrz!xzZLH|XUe zp^#@A+>_*R??>@t$5G+X*_F^Cth(Cot0rD5`2AIdfWN?eI;SqbRzSl*T|>@d&j9nI zDKSyU=SSm3n7GP2ouD~fPC6DRUV%Q`PNiH!iTNZXfEi#=*Ukm#&3}FM+Jd<~%Xx)8 zbuXc+pB!V}cq+Pb(~7~?InfS3tr01s(q1vGARV-XQqclgHeO0J;kS)d+3Fo9PVBtq z??2mER~L=c)kPq72krG6Ld-iYm0rk{p`>cLjD}#-+o%Y&t(vgbQM7Vwbxv(a%dZS4 zivuv+mcu9bJe}1l1B94)-bcO8UkUc=I(87&olunhGsyFRCc*$(FALMsBOnD1a^TFW z2%cmEW&?}87m%dPU%a@RIsgiTv=sb-l>l8Xeq%{ntNcC~eHF|TFT<;83W=^&ioRl;c71QWPZl%K8ZsXK~{rDbXt#hh< zDlUQe=}5A!UnnUdlGbLE0wUSZ_t7MOAF#I}a1yRzU4(bx8evT1UH}U=6$yxj0UDGT zOq%W7!$Oo>QLY#TCNsfWoMlFY^(%15Ly<3uVT%Kh8l22gYOkCw!Fz&Xcv&1IFXAy5 zIK>5K1C^=xHZbP}Tml2P%zf~?5zheZod1IP@r^ey&;C0Nt!q3Rv$F=&{6C?~ zo~IQ8_>a5<3^SQKhk2bK+KcjxwCs}zL}M9$(W!z2T?ymQ`+fPm&z~pW%=!H}{CDn` zN*YG=7}c(jkPb#QA?5&cr?=BZukVXR z4A&gJHi-o->=0+rO)u+tiDrl+(-DU%aP)E1gui{rlv&{r*%BHF~ z+K|T6<~rR|7ZS)aN(EG~(Wtgq9}3hLahu>ss8|gBjD&%&VKL0?jN-P-Q<-O}KR~Pe z7?l4HRV;w~hXa5B*iyg@1G~CP34Wo7({JEd+-h5__UTKjk z3(UI@`^H{+=iBwP!uTQ@xxtT*Rm-_j`BmpWsmgnO z1%OhHSX1zN3-}MxpM}EqtlB9xio_~4CD5kK&9gJ$)O1@cZUISWJ-##wFyp@ugM#f) zZ#PVt+BP_^Yl>T|c@N!ytd~P*ME$|mR$|{8v84M$=G_y=0zLi}@2yyN$=dNtK3pL) zOZ~rDE0C^Ob-b<^3D>aQn&G)7%seJC;Vw&3fq|qI@+#wAfyX1DR9?RdH0|2+-_n2M zeSw|_&7-spWiNmlSK-5dyk9tKT}FO53QQG94ktliK3k%dl46b_$if58$7E9;*gW~& z$y@;T_{HCj;9z-K(**q;dZS_ryl^EJC*WI?OI>vA!aDfF(mE60ET8YiLAFU#fy8~U z;P+hkRp|-yfxTwBB)stblyGtaK{1koUdAF&Noc{53wT%h2o8gcD<}A7!1W6EGQm#3 zyM=#*mUW_+*1#OfXY!`J*o|)CXxT86mc25*k7S99VJ8c?UFg5rG-e}RCE_3s zXe&HkK5Ys4A^%ClLoo5!XH6CqkY%FP#ugP66m2(X1X5I4rL(%-rdSvo*!C|Mf%B}Yu=xYREc$Z#@2gshEtn`?yd4FLg>{swQPmW)z78s_r| zz2cZfViEBqQ4^Oh;*sDl5DGOgV?GtoyIo?JTNh6AxOS5T>Igxy*@GIJLnRQ}g%+t@ zO`E~dOG3IZc<}|2h86LJS3oZym{ffFq1?lTVoDC7(GffLabwkcM;Gj`j*V`x znVUN6A>p%N&^e)4jaa*AY(3#~=nVzAMN-#4O{?T`DX~l-GaE9gm3`u&W%&rV-Ys`R zNDb}LD-9ZvR&0S0)MmY}O{r1(bP0(4 zcs$D>?a_Q|PF?BonLR~kp`MmYG%-V^p>p~r^W3PzVaruHmiQf^kVB)Nt5xK3Hitbv z+j?bpjXn#bfxs(OTjMNx%wC~{bW3D*nF(_BAsd5B8k7|DOZA&9dSkM971~}*8g&+f z8zzBe1+)sduvjk^2(u=I3X99AJOL9I2%Ufs%7r;=?PdA91`IXunh>Z+NnWUwQ-s9C z8ArTPJ}q+1Y+t%#$7siFmq=cBWNhHtdL=mgg&LJqBK9V2GNpa^s+}8Dc1heXmPl0^ zkVYDL^XuJusFk)kHMSYat~Fflx{mll4V?(9W1V^#u%4HgkLmgRf3Qizn2i8e0pw>j zLkOf`I=R4e$}9&To%|38>ms({bP%uzXr1L^PBuvkpUm2sWj+NIJ;50+T4^$Hd*SHh zRd52&e-khc?rMP|S=z?x%Otk~0xjtY|3$^6&=@^G=d4`QVpK&PR*7=8&l!v+J#|$= zu|g%~LY{tg(VK_|-0rnXsnr=)nc7xXx(wM40c?aUm_ua#(jM?4Yx}aRJYDKqOw-N13ymHDHzBZTAhZNzM;H}0UL)7bXr(1)!7Zb&X zn6us4Xwx>Q+-`5w)YbB)ARbnP^y%t-AwP5G87Tp~EB-a9f0o>p7wexM zK%_L-;9z>&ZCtRuv4aVu2JW-YV?9bGHb04ste|kQA1WZL9A?XF!HzF+b7yxu%o1syRZ%6V;4Da^%^IK4`Vt0aF{VHO)Aas=^8;pJO+P<7(uatKCIRUHo1f&zAo z5DP6n0=l&ctO}u+=Aa5reI{Ap2;~Y_M+b6EaYKG`h{{GY4FjSuO!bEO@t`$pVNk}E z%qr?WN>q`D%!=b|dGf!Z8;coLgJYDK;5;QQrEK4T;*9fNSvwruM#^N#lw#A;*f=R9 zUJ&3uEwk5>cXe_l{ML)(rC>{8^sgaV90s%p5`dQ&-{9m4y${Tzi@Yr)s4VzBECI`a zIs_j8GMANhjIL}f5EjB~1G2J8Vhwl<@W})ZlpMGKf#2YOtpac#p8?+kE<8a8p0pOf z!Ia0+38sdG2QCAF^a;92W)nEW27f2Xev#V0;FS|;Z_=)fJ<>Au$uw0HG_PPje|68* ziSAlse0tX_=tCHk`U0ALXzFlGn;og!c^MPB@uN%EH1y;{D!oiKT|KFA_e2+N}c;a%P_mXsT@@~^z?mo_c1j;J@>o~1-`l;{d8x{8EAD&41A%>6ZO$5drO3-=_=&`rseTZ zncsN$4R5{{SuoP+<8$+FyV7#au^(?<{$|Vb(RqpJuwN_>*W`o29CR_DAP`3hfdv5` z6RtXIZGDSg?u-w^r`7F6Yf#bP)p1C^)p}D z!6ABbGU)4Km0J=p8CWQ74TsyrMwKREwV5H5TG_|tY<#maAyi8FhI3Df1uBYmiIYCi z9NZ*KLkG_Q4IQd^G{mMLF`L6P%VjkpSs!!0q5vR)qG<|9TV@pS`Fsf}0~ewjDvsc8 ztumez<`)OAEV}I@p3UD3pzlaAII*3xuO1Y)MPXE)(2!G!MZpU$C1te5$%mVS$<7?7NnwWSA`q5ATD{X)$O;(=DO0g zD@LNeRy`P^luG3>2rBi6PU3uryjLBXLUGl1U2BUBm6I%=U@Mwn%ge_k;jwMFiRPQgFKS_#~aO+f%tY zW{xJP?to#<1fr$lk?~A49vh!7meSDQL8_11)S8feMx)QutU*G^TYY(eR?)d$XwgLJ zRFset$YcVvT%(mz{NAikEgWWQK71)K?9&MQQzk0B_fokkREu4_V5;+BI2H*eF(Dv` z-bcDs^P2Wdw?vgO310vux+)n-a-at`kq-v)g_uNTx_7ZypXgK2w0PR0yjg8()@d%= zjRsKrykM3~+7`cmS;t1^&q@y5X!C00UB38y6HVog8E{`Z=nEn!lQh#MvvJIX2v)e!IC&Q zh(VbFP6LGE0j>=!9CE}ki-jCjth~!{n@AOKF&FDe#Jole9o`5?X97#!>)^tu;1YAk z%)^BxLHX?k-JShXNqcwWt73M?deK-#9d7CSdin)M!@8d_Q!I^;cL9UK?5cbX>g((4 zV5E5TW9Ik+hnZJC%Ypo?IW%`VE&MB5{ESdVgX0CdetagR7Y#GN<<#90u2P~l<_FA6 z|E}c{tKLAoOQ3P!w$rO2lQ*{QUet-4!)AL_Xb)`-X*i?LG0gRHh+rumWnOs=wc<&B zxxAO-D!4s`Jk0a^dR1%xW|0s=PnJe!?Wjl~CF(#i&osq3)T#Gi6A2=dN~MSg#FQZH za0dmHxB(^>qoWcS&4Z+LTxD)K_k|FuokcL4zQ%|CvJesXFlW~;{^l8C{)&MW!q}|X zACT%cf?y6YzYh-uMQf+}EwU;kSaH25I^J~WmZHseJ@d`64zvaGHKc zu2vs%w|dke4Zjf0HadkN@S|&^G0mG!D6;m1MDotu4zMq1xqE@d7@7{LTi>1U1-)T= zMGg{XUxTErk3oA7ARY1m3bL8cAZ2h=10*K~9mWq26g~l%u}n!~2Ldbb*d`{punbOM zr6Z^S5}Zo|7ywQqPFYex9Kr?ZA8-aV>H&#{BO#;%6cNyY@%r!tSd2ZVkQRXxzpyC7 z+IkIq6Tbmj1{b#QOHNz7D)x)O0cZ+@7bxTJ#X)_@GXagKd6d#FQNKhle(9qv zhLkSUrBD)ni<6lWiW{PlP6dQIHL@!_5PI+`74iD+*Xkh>rZ7}Li#SVOhWyMU_Bb^5 z%B?Ze@`ERnkmfF-DVZv0Fa#AatSD|Y19O>-?& z%QFTJjFGei%u;^uvU;$}q|hi1YF@G~$@(US2W@eo_Ss)~_Q&`sSMU=2p=zmfy&NWz zSDEadqDjo05eWoAmm@4>(Y&EDrIwHqah;14qmvSlRJx#$vNoajR}ZNT1m~(}rZxU> zzmT4`d;9kHpS&k`^^u%V$dS&uc8yJH+%xWXMvc7M(8e0(JHr8YaCf9{uCGoyyuhII zhw)I{swW3FEgDNL46eVc^?vl9Idzwqw9wJHb8g*qb9O@l+P2|-`QfUmy{&P+r)BrX zn^xR2TR^RCd-&#AsoR!n6%;RK(|#?5S!BbQikT&g*Cnce*Q_>)sQ?lN zKuN=s7)lIulDT5dkq^fvOvVcs%5u~ahL$J+ANn5FUqk`a*Sp(EQdGqZFn?#hVOpyO zcdVQ(E_kOMe)`x!pV==GS(lS5?^{EXYnCu?*wpj3G&LUU(MM^eKrT}TFHab3F1u#w z^JL~R)b#vosPm4~&;}gT2`EpB@qL(CHb7P>@9ZHV4C0p1YN^Qw##yb^lS8sab;ZqM znwp)rW=uGFK*AJ?!CMZ6`;wPV-3g9gN+Hn7gvzB7p~x;2DPJLPdhwG__uu^O>j#MX z_NZ3N&&M;3JWgFUThPX|uBIbmNYUm%qJY}rknEXK<)aiyr4;f8=+_$?EHx(f!DEo6xe9lQUV-MD51I%j(`})CD{Oyb0;CSLz zRQJ}C+@xpL^6RFXVHl`a$J}jjLkX05=Jm^G*;4#r^fS~B?CuxPEtiU=@-?6(pZR;G zAA8Io#`t1MM!O3iY#f}kLn0DxX!6uMG-8oJ#yomK4A+MvMdEMTv0~NS6e6Br&W~Sm z{s^&PSLl~iN{9|$hLVsc7CDIQ*DkW3+pjX2RK%Mmr`vEY&c0sl7`vYN>w!4s%I29J1Zv2n z@*H(o?P}##*95U}4pmo&*eK=0Gp7aw>RP>0M9M|Cmn3jJp#jd4XZ{M!PrXnw@iro$ zpV@SnxU&P@STf83(5zMkcuMg9@(41Z^2xLE8(wB2ym4nx4fY-HwjY|m_6cQ7Yca^6ckaO*GF?le*s0<*WW1GDVRLO=qrh4) zXNn@Fy&)WHRZF%XP)%pPTj;SF)S%u`}t_9T0f5JDIsKtc#1 z?7cU%4bZ_Xg#v{#+EPks%Wg{t&=yMK`11eWogC8s{v}wtJH31N?!9llZw9=%PQ8Hz zqoH7R$Jfl5y(tl~*@HFK;hB{S9=)(C+i~9PY+JxCL=qe4h;I{J^V+TLR~AjiaOPHh&9`ApAKy*)oZc`OYcb`=J5?K>BbKRgDdtz8mH#^a0E?^|lG z5Ui!)Xl^-VQgP}^YfHz_f_=`$V4XacdGyszGINWAG-aaF-} z<#KUwM!UDk07=)Z;32Ma)D#A;u4`IRaTKL4*j!rV_Ny36K3Bg^G$q8} zflVlzaj_b+c`4)bF=_*tZWTIo^QjxZv0j$za;qc;}+@eWb?!=wQQAkmL9=`!M5rf!96 z(tyiE0K{|b5+ij5tz>#2xd>y@^6eE8o+eQbAUa`b*wVYJrLxWo|tWdG|ODEE;F5WWZGG9Jur9ax*FBdv1V@#u>~Bk%W5(1N=pdR&G90qt zfbw)tUvyT25t5vqoZZmVvvBS-dc!4beN}k*-D9J7uMDT^S|RvkdGEm4@#MA{GZ!(P z#5SAFJz~doOKq-IEl}a(nA6Vbgw`R4APjc4tvoj|T1~A9xLkVjPgUbV+T?PDmQDV# zt0I$ypd^;DHVT!)B>i2%uA$k@4NhW@3Lw2hYh3J9t2ps@&MFpEYpSHxY_>5#P4$-c zI-OQO&Kk9r+!q?QS#65j9mZP9X!dA$4opdW9mN=Y5Jb!|Mkugkwd(a6Nc+K3oQ6jl z11o5Cq)LDqQ#G}vHF+y!STsR)`o)Q9>)Rr|p@nyL8ZMtUe>E6DqJsfD{O38gdVbT^ z2KOQHjqf$o&9(W*gB2I%{c*^{j}$TU>{$k)FOXD@cGWEFRHcP*wY^w$`!siqw%rd}P|_=-7=1b7(i`=go|+5Ne(^ z6zs}Vx=JT!O!SSm5I|Hc%%S>Rq_4+Z6Ev)K=ISpyx!><+HDS<4nCoLI%}d;P&~6%c z2o)R(SUf{+jlW&osdAas21xG>wM|Gu&)J;lZW|vCWM+BnN|3hg`HU5cWWFgLhu8xR zD43Rjqv8#)g?R~lUO!QMtvCgn8Mz)AARZAdqdfpJAhoe*0$D>!IfjuOOuj}bW*PSB zlH`ebFsR`%nv{L2ru)=K@m-i{xT0I zKmkBu1jpDV9hC7=A{a*Kzl>NYU9zoonwW(-^4nVq*~|H^ZMnLLJ#c;=Fi{KQa#t3w9X7Q zq|mulz6$ZPK)B|tyG~j%5g4hza`Ct;nA%g@-RBT=q|P2~v_~7B4j|@o?InzXAy{$U z=ZCY+mVTsvlO!%aXa4yu%!4Sm6iPW+RWk_9cU5|-zwJ%&*{8)sdbDWho@UP2!r=iu zUqLy-eix%=%%JDsZ9fqwQ78%(%#1!(W9Op8;XfXROzT#aiD7kSow3o%`E{0QE-R!h zIa*^&v+;c+wPeTHt@p9L8VjS&EikGW-SO8FD5Zq>d%eK9?{Jc=Hs&JU8LpWTtmZ;N zRG(Z|H1Jk`+k^E+HOa;To_yOJ=V$4fDhDL8O1CuBgx_x(x_<8MEC}Z)e#%e(SpBH9 zF<%Ffp!efVIZ{Ia3`mm_y~mp~aK8n6tR!WsC2j`X#|Y`pu!}#XFoMk(93d34*2;{5 z8DOIf!2qaWx51jz>JXM1&+w(ifQKI;U4mNxJg0guvmOk?<-R0W&$24)G#`q78^|Jo zJN=Y){BT{XkjRQNpZ}G3Q2gqk67ho8YA$%|x_`RMFp9oG_LtG{^GLk)n$_ZE;@77X zVzCFx%F`ygSUdlGlU`WSm`ow%IsZTd&n-dTxwDsud!IjR)2-P=Rr5gJQFPW!L(L4! zc{qQ`X0z6L>e_#_H8&4j4QP|rW1WB4qWAXCj`?fFO%C_ruFknFm4V?tF*>)DY084J z)eziD9@-{;F8*O!U0xlI3@^J%L=8K1YPHJ530BwAzWqPL`#A4CfiUPRbMuN zHi^%OZ|F>{mEo&yAMmF)REJqDvsC=_kNe_2cEboUyz<^HP2H<2=t8B~#F>&Vo7KCl z`@&apanQ%6~k@mL5Q%eS}|JFwv5sTT`dYd@|@B@3Y~Wq zTNQtWqAn3M6YWI@(RJu9^h5Ml!cJ5Ybwro6x59D-E|7>d-twvy0YES#FMC`Dr4u*@ zQdFiy0vyWVgKNMvlK3+O?z>nB!FwGH0ho}LEq%Cec#I4qm=XcN$TAa_Poy$3Wql5q zCb7VPQ|?JuB*2n|7s+i*1~m9AN`Qs^bR@zFSL6<{vlu|ox7q|8DvK-3;gMsrZ4_Rl zta+8*nUbItykPOD0l1A&?Gp?D@G8nIP6T!uZee7S_#=^Qc!44e;q-9e;h+PCAx}|Y z3f|W&O~UCmS)jo9XNg#U3-6Jxa?{SO_7)UydLT0v98 zelthGE(vTBMn^m7a%m`$3bsPRD`=Py!)W1mzg_R+xoNHKTfv0U)>Em~F-}3*SP$Va z&a5_&Z0_({S>udTipwGIWM5UKhOw-Qk^4Tpuz}Kh5fLb{gY^~0{KCSt^{j%EUpi>F8WLdA=RNu599C_s^Up{f=vzHD`v5MqvrTkcwW)?K%+-+EIF zYNi(p(Qpa91+WT|Mz|z?eirN(X|1i`$jpG(AFCx!LJ3i`veF$hr?kK#!Q{eu3@!o^ z`I3;=vlT)v4bchG!P@n@3L)N229rV15b9c~3b&G@Xu^zMU{p|yMr+oiznZodC}f+Dg$1}EL-VwC8%g1TL~su zz#g5a5>U=WP|Ws!LApdg68W;wDy?j+sGbcJK};Zoo2HC?}U@4H1{sU+2&u zqbZuvv2BaoN(AC`)WD_@>17#+um)TFXNiQzn@D;lj@8x2m;Otpz7Co_?n0a95&PyL29lr-Xw`Oe4gDVCXFMnbo z(J3ax-=iq{HDdmNcKrGc@#0TJPxIiCXBRG7F#Dd7_k%&3r=rqLJT$)e=|#g$qxZ}K zi6(ujhW+GJw^hTxQ#VowR;61k!*(#BdA z#>*%0UXtJ;%Qx~TIDT3!64xmU?y#}N{w=cLjd=rz|L`O{Pp7YyosaQ-xR8ykXDd@m zX*y*dS&#-j#Q-JdXccfEoy@XiAqda0Af=o7j|YHaTiT%mPb>Ei^d6@!hs8QQe)$*q zwFGcy#i2IPD?W^3)uLpN!BQW72fX3HtV0?gEIiP3EaHVFF%L!*g_l<$(Ok-!$v2eWC z!RG^dJ7>#QtDLhdLn{N`;Y2tta4ybSU!Pb6QL{WxQVf`OX-aDrSZz}?o3GZHEOw8( zcyZFOpucC-Vy1O8p|W0SwE3w^O}x29O}ls+%8CiAGVU}(YjPDQuFz2ins+f>R^DjV zWCn_mC6?xBqcFF9bc9D0`wJeoV1Zi5(0fy}hISZ1V@~s4u-BNWRI@)gURhV?gtTEA zT|87VzQ?!A{CEDy1f}b-K>tUef|<5L)l1Bb|OV zjLw&}gZo@udzpo`s{Ngju%2ctRG{XgDoQ3UbwH_hcDO zrFh*RPo9eB6A9o#(?h_F!R<|FXdcrzS<>mySL(C@qu%Q<_;^a)+t#`*9%zi&r)9Jn z%4=5cH0U%GM7bCkTI+Xt!@l;)jvZ@g?<~JLX{o-t5Sl+XQx_3D+j_#y4t{n-y+E^b zc|}5K&fD^OL2CntQMKx}Kjs%j&UOVu`GNSv3nWNL@J}$M?fDiVo?N{Hl+{(&0FG|V|UG1inBrhp?6V3}vBLX}xJ} zan6#9|HK)%Qn+${+@Ksv8?{!JR0h0}k%~&lqObel@~WkY(w!X+Nwl#Rf{?mYsCFzI zC4clG+0|TH&eCo{%Ir|KT1O2kzq!KSyxr#r%-mUh@>L&Yab#9GSO#2fj2w{w|R{Wi@y4pa;Yx)GpiaT&BM!loHQISblFNYeQfu#k>Ne7?}^P^_T2`zr)KA}n`)9q&QVEkxNczod0aT>Fi}KtnomL8?8eJ#LeQtXw|$5qmgZ2dTw3UrlGd^ zhh~Vs8;$y|?ydx1{lK~N+lDrEHO@FBcHXc)IYY%5wCmUZJ|$R)%tolIrDLiW4A;lN znw}STf$DKSq*6RzajoJ`#dC_^D*moGfz)MdI+SiOLsnXu3gF_(@{u&(#iF#A1@y<{ zL`oSU6Sb*NdABI}0|}sJgjSfRO9V*L8>)fC$SfZy33M#WFnEnXgO9(U^=UE#n^iCn zU{OLMTypX?X}xhTFbL?WTy?#Ovzg)!$~$@J3E~~4=c@1>Wy%IdO%SUT_(mX@_<2(X zN=(@3%NCv`{+7oIql~(+e|`zZ8CM!iYwmRc0UW6wbWoXg*B!nBO``6mB}I9$ye*B8V~7O7>i+$@L$9ZVr|!G&1LfDMHgBCpo4TsP7}i^v zEUPjhw75BW_VkJGq2Y{c0r=I1Oi5cspQ`?%p1w;|6Kq!N<>w1FcZN0k_36XC$5(r` zl>N#r`z_K z)93ZMW>q$?nO^k;iX8pN*^!dDxV>rnPufcNS)K0K{0r0@E}e%?KHk~lseP)tR8;ph zKs1lmG*)Ms?k_z)Gcpt6wJAoa@dpMS%F#VHy>@fw`L*vTAXw3$aSfZuOj6OR>ICAB<`R|>LGC3#A(hxQhcsB36;;RIG;orlBHZf zGHhTHBJ(c*W(;Wn1{QFV$6yR{^%Q>xe{|rT0FH9LZQKxA5`vVZ^VXA>71Q^RCy9i@bz+|oL-vU{aXi_7vce0?Hc zbGFe2d-Wir;cTW$qJ?-JxO@g1^0A=C6SuQ!?1o%F8k>G<4=-pN3NfHC`k}cU9uw)@Owic-|ypgS2W=F1^P7RklW-8IUw_YC}cK;oIGQW z;47{#X2u|5MQP_Ll@Y#I_uu$>#?#@tH4qE04%%!k&dc7vXk((zJ+~zSwr^dogHXX6 z7Y>aWcw5B^bkBQlytsVzPkw$_{HJ)8+hQ6yRM0q~IA2FzqgFLMJS#I-sB<|;rH)A^ z?Ovx9R0XWr9Wo~>X&pq46^_)$l93y;ETc2V1&DynfLD-lLW; zOa7{|S^ErIlVo9s^nj93@a_E)3gi8{j8F1|utWgiW3u8@03|Wzad-sfd_zDQC=!qn z&w@laf99zqs^HRv7NL$0i^| zcv7O$=xF1-oLvwqvb`G0taN~n4^)K9&?d&XeBX$khgf-yiplgX=7J^ZLp);K0YvOQP4&(v^`0v3?NLqX3yF0E7nxA zF@js>pOLBP2Bpnd_4KDNhOkH-?J`5Ey#4CvImy1v-G?~AK(i_2b5Nf?tgKrN*A?mvhod5+b zLBGg?DbW!KgAx%6;^Dd|M!y*(eTqhH@szi!(x5W%T9XPSV937aR6bn{hJ1+}>vshb zlYb5f=77s#34P(W3OP{os3F2sUbPY3fJHLt48MdEiY zak7xA&1UNCvd5!ogC%eoCqzCU}gnlbt8rCHXE zy(?RtIxggCyJF^`0MkP^J9SryC)b;+gg}dbGpSL#Y~Hw~iKJ(p6#p3z2&Go1^Inu1 zxz4YHP#>sgsJ6|!e{*o9Ye6+EhQZ#y8O<<*=96}Ci~qIJK*RDmgBl+Mn$&K+Ij+;? zpUBRh(J*HnafPrhKnZ#^zrM8mqNS66w&`@f0Eq@~2ju}6-+cmvQa81raLkf&n++`G zHn?;uP28jl>*7mmpdyPo4*Px`Yk}7WHA(iYrwOGinCaAoct^-(5qP6o$JE*;93f4$ zL4YE(gf~d;ce`w6w^I;YZY%G%*~F_II*NcY!ZXbZ6>1Te)1OkCV8_s_7=^UHpsNGJ z0jLMS?3hwxcXo{E@*g218E_d$Zef(L8PZY6$~ACqmwe#>Yq1OwIo^?R0V*A7{k} zTzVrV6e-H7(HJi@!?!C`+3z46jH9K31}%3rKFaAs$FPP-jVUlE@!SmYgjX3o9W>#K(6=rDc-oAgqo| zvZus!Jl}#~oZ?vW0zi|pyd!P;@VdYykd~NdQ?3h+i#xwJa2IQHE_vb@^6fbbP=aOA!t3!WuUA{6{ z?-Ac?6F3eWU&F1Q`iC1Jyj1Bm3x%`mqWw{`@tH%!!zt73PrtAkxoPO$cC@u#*!cDj zqgL{+@qxQOj$ZoyynOdYk2wWt)~BQ>oG+Pgf z?;W{K{LZZm0oho>`e{WTs#Gs3wjb$!7-deb)5Weu-aCHt1E0mH&#t}pt4r?`KNi1) zYMfBa$=rh~o;R+4X}#AsC#x%CZm4XnN~ovzn|O?UEw zx%KsPq4;|}zM#{0fuRhtr&} zs6#Z`eNpqPO`0^)-jqEr1$E|XD^su#40+nx#3pJz2BCf6rd z&tKHDG9bRj*-KvkwyJod^*~r-Y`gpDU{wVZP)~3g zUvsk$AL_K?rxX-}RGVO$0)9QjWnC;@!rV$NgA|qr6>lg$l+-6$c_yrl_Rsoe7X{lraVfWLZAM55cXZ z-cENV$-3B42FR`SaH-jULA8wY81+I>3KoCEQvtQ`nBWMq9l)og7cTcMUBq$OfE)6l zEk@k2jN@lYuVv)38@Cmap7!)3d)^ol4hD+fs;TOD)5#i{3rQ7b{FJWX_%xqkbsW zK%4vgmWb9?|IW)sV};S`TFi6Rdk=sIzH%e9*OhkazO_3<-heTBFN6XYbO6 z-0JbZAN>kyy##oa9@*dXg2l5K7P~9%cl8#`MT?7wJ1bAp?Be#eB^s9RD%E!J#547N ze^q<6;CraTQ2`Nx$+n3l7@Lf&zAl}v(=%q`oYUu-I*^VgUjg)rIP4Kht;>hPx#=Fg zQxlrz)#*5E%3jHVVM?hs@>`D_-p2E4CCNh`q$*n#r`3C>nRw^;SCBS2H{=>;f zFQpa-8l0V@L^J7h>69yn8kh9E9bVN=5o(R*@tTp;25^d7G(^@8H5Y5C-mv$ZuXs9K zR|k=ewYAF4SrD45{0{Vocq!@hRYy*I*;K|BoxgkpSp@$C@CWt6e2~qXCN2}lshg;u zD5@20ih1A}bOE?M-m7>NxDoh^O3*H4WRvtJzz@nbRlz11!2s#Og)9Kl;eYT!G0AXR zM5niPWmLjwhH*mPT4By6UjQJ*a0~U$I1(pX$YLvrRGfM0M%ZRfVJ-BE@09OA7$-m` zq0-@Sq@%&3%GLHJwhesu*I~V-r#cdGLl&BE8X38vweH}W zp$n%^&LkW|^$@hG3QU}I&C0azN>A5ys{$c3A!lG+aH*pyJbNoWkWLCqR8#4|&rU}QtpB))$tFmeN ztUbMcUeDRJ#PYeT2am3Z3YL9G+sJ$KaYD(pXFXXpuFqQUJpOpp6-ku|R8bCK1qKad5J-ps zD+6CxRw(<6%U~qnmBu6nq){Mxc>QqagbW#yk^{g2?vffxEdjJ-@;G${x5-o&LmjA$ zad+Py$zXSy4#K_gBL*3)p@V11+GXb2FV1cnCJCLs(UGfKoG$f3Oqs^~w`-7iqj)Nv z)<6+av6E5?dIN`kWMsh4TJ0-rt?69ZHOsLBeZiTiOKPi|YIyvnsYfc?XKFF(}C^e^{ZWVe4;$@>$-95&YOy zMuXKEGirqf1_G~r0WeI}Y3fLO)Wn(#T)ooeGqU6>{@%#k?LWD4V2O#}W^z|H(drZL zG{!8BP4-YnTG#-<_N;gtvy$otuagQzE0}We_5|a$gx@%UQK5+N9|j}I4Fw>K2t}pU zRb-|8-W2_yz&s(~hSE2t516pv9xx$cR*sKP{?(GUidWD2{jOWYPwtsF3oX{03HQ9e zUhpVNJbc1(S2McB@kjAbE5t=D?TC9^dA;6M<#$3b}g^~apKY9D* zvxG#K!zXw*4_{IkD$IRsXyr2(-1o;<1f|x!QdB1_wexCbEZ?(lUq|lU`O|-h{P*-A z#qj%0Tb|ALT>Qf4zg`)x5VG!?I$=CIn42~C+E$0})>F3SU;Qk9kRaN>o|hy;YnB*$ z#HTK;hmGed@e}45%Blz`)+sK;D+_D^@+(NYeGqA2&xw5*@&4X|kstqoc2W|TWXS_> za$p}31pOYqRc0i3w}DX^T8E|9Kq!pTCNu>5NgTxvPU=|8DM?G1pC^N=n?EB z#m$>V>ci_?4_$KEoW{_3~=UYa+b{cgt)c!&$XB8}OW$g*Kq z)?tgaNBhQzPfLg18-42Yc?Vv6_N|}3_UcG{)tX|kt^Q$idgi3qbxrNu&2_`>S8ulm zysYpgc~RPzMdUg8RnQabU-x!NK!|5VaC}XWN>U`kbm=~`Vs~1%tU5!dt zEG;Zr_aqW7Ts?giJs93P7vh~1;#*9Vng`mJ7DczB4{Wr9U``wZsy+^x3YOtgb_jNa zm|e?%*gFACv~t!k*d+=8RYmXx$D9RB9+(!x{|cP@LJ2h!urW7p@{&>5g;LRCR3%gq z0*X>V_(1_f6-`kA32-DZesx?Gcik*Bg;(Zs=_+xic*jTLm9M{y7QJy`pO$b$v>wO5 z)M~RzC7QIKan$4m*Ng8ybX5Gybr77zf$F^XTpKM|P27{zisaSm9NL^gJ`+3K5jwPYYUh!D}nUABbyTLpwfv6^%Vt z@lLGYalgtLr#lIvL&d(SHJsF=(49w4DQ*=-@dlKgQ0+ed7n_+^U!T19t}m;hHq7M@ zd{cT;Jp1OQc9e7Np8Po7szJKO8;Q#NLMYHa3u-U?_Ed=JSHGw5K?R=}IM}CQ+H0Wh zc$=aVlBO#V=1USqi$rAx2>~L7tO~d!CRtF;)kH!@W5}#R!Y3I9DG+`^uGJF%`7)ez5p zsxxS-#9d3kPO_yT9lfFT*9{HC{gby6V-WgcctQN@K(;M|S9aaJ9$nn}ULSgBjo3wKkxgtuU%1v* zi{GGUPkaNFRPy9s#XpEguGFEs(e%5UCXPQxTn8o(O3@`g$9zCNtr&v%@SiFE4!LxE zC+o0fp10Zrh5oEd`iv+;PL45^I*afVp06fRh2>_f}kAW+!U^W3) zWkMo#h^ffwbLd2t1mt}YoJbXW%Y*_84A}2Zz6F0Or-+P1Ua=`eQkcj@7k4FFse$m| zgdOlBz63sX@^io>TKNL8n-3nZ1yVWh}S}D!>4APJ2($=y6^{4 z`}Lq?XSl|V=N;_13*{c1AQ`=xfk27KwN|qMQUxG+{X;*tJ*?E14Fd6u2BX8fMW0(sJ;BGhb-&arm{ETUgFOfD;#$cX=Qk zCGFwCbJ*%dlWIC<(Z{MaV3w>S6#u%vBjMicj8#Q=lZH)&?MK^aigR{oK_90g12!u& zG$*Cr9_#8{J1h1u>e=o-@6{zgM;mtjZgHojbzoJy_o(>RO(;|y4t45i-kq`XtO=st zyk=u~B*l4jwCSw=c)DK|a3)P_%Y__q79NNEIGailZ8}EtR#aG@!_ZoC{ZwU~(`JC=*~^S)IbH zcu3sKTuv=kJO;^}XF!m~mqZ3I_#Wc#WS(3>UPInNK0^M8e2M%e`9Aqq@*6C(mj!h| z14-J&U%`ML$O?uxNOdHERWT1CUJ=ZCF(SzZbm>Tvny}m?F;Xbwkb|ZL8E^%;773KI zg$BS}h4N<>OY-WQ#5#&g);mIn)@}1DE zyp_n7(GDItH8Sh~fUg3EN9C7P6Yyv}kTY;vvP71fVSJM5LAgD~F_{{`1F^D1Ms-PT z0u&n41nGY1Ys%a>Rf7O-1qLd=iTnjKFVhG-H&erq$k^#t@P7Cwc$@`K7+^o#0rLjq zE>ETuI9!A&oqBqC@R%6l^@6`3J_ha~%_+<*w1g+ZXL)L-<_Nzl3^7!I5q7ukRZ!ejAVT%s53%V;nbz+z@>>eyb4~!rHTW`EN(kp!X1D?q!oWhS5@KM4$ zG(2u;Sdi@nDso`3l%4^D72svdKMO?|wLHsH!=3U##-JL6@T1d{XN7_a(y2Uu6zKNx z6Tm=QKER^|=@K4{XX4wn4gy)GAS6cW3hS!BbpnN9;I@${cz)pdg{hUG0=}o78^%^~ zcsE#GxKVn2Jt32Oc#_Pm@H9bB;DmB!_zvU`m=b8SQ&tju1$H(tZuz$;nxgY?jsbY; zo-=R2S#74SGnXfVsobZJXwsVxm^Vk$V3tLD50~k$)c-lynkPuhA!BEDU=6SM-%I= zW+)Z-1N(}qqzzKY=?qazJVdKmht?NShCC*%7P68jLu7+es}4o9A$v87dMG0#S!I)< zDm%^SY&zCn$utBTNM41Sa-0aoo!X{A4sq;K(8t@je5@b`7kpAY z1wXiL5)|UJYPFEGDJhbwiqQt}fT1AaVx3$1)?{Krm?Au3PYLS21|5V( zqo;^U8{|&|?`LH#lGF`1+sE>r0$9*ZwaX30pxMGeq9;OQ*X7(f$Lu{GZf2pT zzxV{dm^l70!n*K;n}Rf{3~B0)>K)y2OU4Yz7RfeshZjulw3^~Ig1#aEVdX|n<4US+ z)(r6|S<#R`l_WBZ;@TNI7t+Lvg?@}@d zz_MlBf|3b{3ILdQ9~D7VLUZ zx@uK5O8|hwNxhaN><$W%Hl5O(FbjSURXK2LwN2}G_NIlfPMPisTPzTz-{ggCH=N!L z4qCZoSz~}Vcnz}_q*U!1koj3Q+)5>!j9P__?Ruq(RWTZ;5NK`XAixNEHYQ2R#?nSQ zH)4kb@sQ+Gr#5PXBg#VDFmb<&;>>(io=OKeCj?4BhDM*!m(@VwO4=1sy3O@ULK6>a zc$Hosi`y$<8_s}3uqo4seohbxhs_Lb`7xo|tp`7f7zLTbf>DBC2z5BfnW2tq+8cA)G9V!`tp z^q1~%h=e+=6wk(LR0ty+?@muFCR8MAfIy6**KG4yTut8U0RbkGq_Hcx03?JWyd4^n z30sGXVZ(Ga?)a#!n%&^%a5X~C2l#Y*eJ0OiI5dvRorHP~hB~ub;bQO%J+N-fq;*(HD$ifkQ ziONAK{DDg<9oq7oWqGH}bgVtP{1sX+KqZz8P^?6(cL9K`QUw^fM zByv8+ln|=TUF#Rngi>wm?H2#uSPDg&s$;X$;b1-%s3?lInjmaMgNYpZe55Mn3zw3? ziLWw&Xl->UkyUyS+UUy%0Jg8Zn2VB?E*F|wRl4j7i$CYHKDNC0^ce@L#L65o`3m8BH>Aj`<1 zTX-)gX?DwUlq@Ys3ZT<_rziy0<|R7;aA8UDRnEzR8{@nzSm?D#$&RD4+yr(6Y+!)F zfOG>&BzSt6uR=0~Z%@Lfc`a~t)tM)l4}UwNZ+SA_#7{|VQPotZcm|*i7Rw_CQcBh@R(-t|l)64cHX=U?gkH0CtA2y_V zn`&uP`O%-ySrFCDP=`_Sf%{P8nr{vk2ewYD+tm{=d-YHxI?%AEYV_QJ;!WZsomq9l zWUf%Ad&GN=idUWg-l~H?*b$hYbt>N(q_yURnlnJ60%Pd@p|%Y5dy=9Zwq$pMmNxIZ z@If--Za6f3$M=tC9D#((6RmKdC&7`$<&Gp=%3ECFpu?H)+r?duc*yCDdF>N>p{R=* z;MK+Zj1U$?)4hTGT;gg%3#3Es4mx9GfP(5JwHa5Pk#`!bQR6oc`2C1cRR++#Dc5j@LRfHqK;aZApL4f{hC<`{ez$ zlq#E3raC|RGvf|WWwpIjC=hRKdtb&Vo9$fs>&vO?pWOGijkk|lnf849#jOY z6oNI*$Gx>~l7IlN2=yFEkz5|fpt1CCVo-#~UFWWLJ!n>IX%=UIgsM%Ju*K+M)cSy+ zh3w_;iLWu!sP99S?^ZC}?FQefi=b#D&RFs)*l>QOco%E?B>5GGQsDe}xZXSuvMk0= zjD5J=WFGOsd^Vr8WGz8nYENdtM-XrkgQGBI8kUg|%b{?0ij>XDSVEU@9q&x=pL~3(o1l3|+zB@VPgXEX-=P1%YAT}7j~4pn)07#pZ;K9~(!ve7)CGgQNj zTIije&c_4e$+@j3W?4KrV2l=*PDePDNO9eDSKV*}QV`@5!(w7=c75F#F_B$A+n%uL z%#2YOPxr6C!Dq0x{&`yyz2VBkfsTMDlrrfwAXVu!4$ic5pnkQl?(DhZD)9j{a^eSQ zM0`NGb#^A)6EGPZ`r4VE)WDUsI*wmBaZ}t~zp;O)!Q?jT^_gmKVQ4+%|D<#z6n^}I zGt8-L82h}T;aGkBc9;KHz=2*<+qh+di%>cy-?q5j7GT2QKWARX+0@9D2kC-HL1oE( z`;Lk)OunVmsq!bg$tO>?ztlK3+@QW}litL+!^Lxl7k;!dn(SJdnT%heBy?6Ctu`?= zp8iOGe8~l~x09~XuF2aUnv4^_en|D*v2-t?>Mwa4E8M(&+HGwUUqInq+mcj$KM2>t zyt=MidX<#8Lu)i@?{9+Ke}@ML4~%N6-0mvPDERIXr=DiQYAvYpbD&M0h3oN#DRl&| zIIJHIECbIY4=kPx8#vTf0UH<4q@7-DX&-ZXyC;RRh19YdLSA)QK`Mg{z!s%C=J66W z4oBhq={MbX{pc4Dw`^a#4Fa9VyARC2|DR8bvnDzjH0RMSAuOE9z{80RiT;kL+J04vEITTerSKVScc~ngsD!$vFbp-4>UF|J9tG8XFSE-ro zxf>aWa%JVQpn341MyuA4V5(7{nqd5@FBMgwWCxpv1-y&#G|6ZomE-_mhRv$9fs}T_ z(h=Tff;kiy3zYVlGBQa{+8CAO?Im;q$9RsVtumfBqebOWA}zz_nVyEuabOUbS@2oUdjajK>jZ{2P5}z4k2k4bh}B1J5r+8+_CKW zNwj-i$vIY0nWc)!PJi=3H2l^pNZXvwXV`$zvfaXUWzA-vj$e^9`)4-<&;{c6#E(b3 z29pjPmh~D#TM;?v%zID`rI{ckDj*|2?z+$+qW7RWlyL0r&r)D-xZ$(jrrdJ zpjxq*fD2{IuZhc*{0A(-q`+3SJcH6qOp%r{msH<#SbRjRcx1)&D(lQZ$@`bP<~+XB z#X+uo>(Q<|J|OO1vS&>EaC>B0n07}Zn;zNKaYb9uU+>#?*Ps9L%mwa%xh)szx<0dh zYwKOeHg=r`WX|l{w~{lC^u#(ShE&@*ro~e_HTT>fk6o(2W*hhxy#qO)XQ*2h{g8@s zEtIjmP;r&wR&bMe7N{&GKv9#T02YxV6+Ny0DU(criV9MLEN&rz0HOMJy=54|*aRy} zVdKev^rn<@<6=00H^;$P&<7>?_h6X8b<+VZ!vB3!L{?$oTmJhuu+jX-1Bk<4elc)a zFnDn2Z+{y+=o)!pc_CP7cVv9|Ij;_6=OsK>jUNAI_()J0ylUiKm5KjQ{P%&*{SDJK zZC-B&6-}&ffAQTPubI8)$#Yznp%2dPeC3sn9Uj-&ov*$M-{OwdYhr4b#X9y_3=;2` zmR_I#<;<%_)Rb||+Sj~VANlD%@rtha&Ykftlc6VZ&N+!59zCvMO@63Z=AKwl{)z8S z?B1Q|HvZwi&Q+bkz=6T9zaBUc^dB60Hs2Akx!ppfrB=heGjdf3MttJL$W>l#<+{>a z;tk(hm|j}b#|#Wok@$l2CD;7&>l-fn(b2ij4xUW5Z}+-(bpGt+_8lJg*_|)lchQU2 zt*mcZ^yV|HMrjRQx7l-KcyhmXrn#lK+Ra}6K;)5Fm(eH$MAFMD2Gbyq|@*SOO2WHhWg_as(Fu`==jOfcI0%~}G@CvXc z0TCz+gG}dfF}X$Q^)Ib;l~&g_4Tv45?nI&Q?0I+L+z*x@JKDKTeC>}zcV!ahh&3_i z`H??~FaGeR5-?Taiw z@~by4x?BA6-Zh2#I(1_!(EV-qJbU)zh>Dav9?j}n@teo~(R$17rt`dlrL$P@*1Ol~ z8Uq$*Tcw-D9x;!dTEc7w>8cG3*?S>_q@2=NCe<=w!s`UJx$#QC>=uR7Bpjo(gn&jc zQ5CR(TK1YL6im{F0$Ylp3)$ZR2wT~KX38HMuPxYtgm0r@@C~qXK?F~evc=KcSoRaq zyuO=ic6pki0JNz1HFCYO>ZJz^j?nNmo1D27(_;RvaIzy;HF(wFwZD28t$6-s%`vp^ zzHzffL)TTEU+5PMd~WsLC5h4WDFs<{TjP)}PnB}$A2afxSE<){Y*w|{r8Lrr1J6~6 zlVHIHY4f)(Ci2j$nTTO@nkk{zytodt(dF(bD3NRQ!2!i+T`W`T z>{VX@HO!M>i>-sD>jj(R9K|BYNxo6B18@ye_(8D>Jo(sv2`?VTA_<8wUsYJBLaykS z^zyhcn;-BIr^58(6*>L=AEzL|NX;O%D{c+0DALcc?8PD^|3h#QKtUycF?;_lP8_`G zN6TC4o}PSI`~da*W9>UvSdB@I#_zI@Zn$&NKhe+%0{#2%2sARBSxGA(1Q0dd_$Qtz)mSG@K69cC6sfF@^;k*j=FD+m&A5_eS!t!k|c}bML^}nfU8029DXI? zGSncIsxCk$(ha5v=m&@}vlp!5|K4ge$H=iNb#Bp>PaY5_zD9jt9Qf!vM&tc|y7XVD z@8qOltD1<}lNM!tg+-XC4LDUF&YzeUYV(o3#)*wIsEh^)wNs@f5+Gxptgk!2RXJe} z@h3&HHA-I3JG7HcuC(okly>6J99!N0!WAgEB@i|-CP-#WO#UU%79cKzELUpgeDOG$ z6L*OZtT7uyr$SYs0D4r~| zDsN{sCpd%hkgq#5@u=FN;?T`}&_uong!!Za5=?D>JBPe`rOIz7H{ z^^4n{T^R^lS+i1;E)rJQ4o!mBh0`X0!qf&H4pU1O2A;J8gr6%EH-ZA_7>GZwD&A3i ztoQ=Ld1+(@%YFr@f!e`fIS;LbT;1oP1L#_GCwde;iC#j#MDL@&A~02AJcJd&f3V0Q ziB(}GNEsc0SHU_<98b#GCQH)tIR}~Kj0y<-h30t60Jte_I55gd@+7`a6%^no02Ud{ zG|SryF!G!}4*ic~!x`7lZ2!OdQT~tiku$EJdGDFWXFlu9e$H$oy>fZ{XSO->_{{ca zUO)3V^gqUPW}7qnIdg3P>-w2(&g|#R>t`M_d%qC>b+Bt+{dD!rjJjsS+u{rF9_qRd zQ8xe)6wGnP_`Q!W0z8}iEA56VqSWK!S4kecA_H$f3T^^MH@*CpvkJdNf14I|I>TOp zn_*N1Z1xc0X>vHiVTUt}R-hOE(zEF1$(<306Fh*Oq3`3S_>YK2q(+X&~KWz&&2yvjo`j=mm8qt2vqV7fB*mdHtC=GO+1WSOr7Cx<@Bi@Cf}Lr{)wpus;8%p z#KTiZX#LdDiPotj<-pX@$(2)`d^^TT@UoD{jW$lb@6E0X_c!iczU#lg$#(Jb zC&ocUo~BwrkOv-oGEXVQoeALG{xXG zG7PJQXL9nH@-M{m<)$ZR{Ko`GoCGH=TG(;Z)II8Q3*v?H6>(d+UwH|`4uUN=d9&OC zLx;T5^7sF_W9m*Ap#(saH0~i3lJA0VG*TX^xD`JPPL$d5L&P2BoA7~jgX@2e@C>dP zQ=~xSQV%7r`oVW^A$aX=fRZXeGO-x=El-pbC^Ego;y+f!Sdhf_u)kSUJ_LdahWser zHzMf+Gf8{`UIbX79<#gFkqrF9RI$y2mg_TzL5#7q9r; z#aW#IfvRlu=`^odHaTo|0B2rK^e6re> zb&xYzPBr;TxY?gL`Do43s(IHyq|*R;T~)X}yB31?UN|)m3RiDdL?B__FfhaY5H9hc z;zfCVzGZ1=$Su+uNh_qVkf7YbG(8%Swp~R@sUvUJPpgciQ!JKF>0YsO#OZh*aLWJ% z(`k1!_zixc9i% zD`RwDmo*pXc7Wc+6n~y8-vWhkHIkjX%@|4Z4`wz-#*Es0N3P&aH&p_l8~lZku0C5O8qS7Czm zQ8c7^U7uelnkYmEZHkw_2Br{UH^}= z?|_f0%Kp!N{Y`n(=S}Y=nPk#uCOsh)(kV%RPyz%92|Y*?=|~4rL=Z*Af?`EnJ9c+< z?Y*sQS=V)4l-1QmAoKX2`zB%C-{1f5^ZD~3Gq2wF=H7G9Iro(BAwx_jL5#g^-Sh`g z|7WMs)JRT+!3aeFH?>H2{vcf~#LPOihF0opsN-FU@@BPB<1?svY)CglP(c28^u0RU zp$=@B9IIMs*IOV){8>LZKME`!#>=Qna(JjB6$IEgP=H1yXu%@r6+Ee=$xMZGzH+-# zD9uxFMPjib`8D8g(KY$;=)%g@yu7xYGA}IhUz7gCce92x2V~%O1E9~1Kp(ivhqD(z zae>)9Vk5+^EIb`ykg&{ojh;UAHa4G&WNtW|=8S zUxaW5zpvI;=by7N!ep!kEGDyowv_ogR}*h#teot0EOElj^bCwC8z-oi`skakQ4qUk zbnmHIC4F3-OO(NiQ!Mzy+~oN!cFL&LietQE)5m43ykYGsqiXrsWFORy)q`bo=fu*b zMMkSP*y!c5GSbHaf@`V!$5o{|z!l4e zLZUA3uDnSry2pA9fi!E{vlq+wxqxovg^2s89L!q@Jh%x~CS73jn+1!=MUWnKIamX> zLVn>jX@}BoO1nMnZYVzp1;6oRf+4{fxp)v@LgGOjF_V+5Kb$Ba;ZJ0dh<6O~wn5&e z<+BLR`NOHRD0M`>hL%5rzq)t$nqMDJU5_6krk)}nfY@yO#MILf-t?#LC11REebBe( z`1HHaOgM1g;nu41XvR3AqN?TaiEGE7K9La{h{XoJC@h38nug87!sYU3RXh&c=Wi<< zfDL9ve>~nFPws|O@i_TttUs2!D7gXlVzHr5SmD>v&qzjK+@p|2uH&;9${Zx~95USvvjLpe|P_8E*M>LT?r|=}Ahn_t&um9k| z{@FJY=fBOVKG6A)^p7=*23I20gOjhX%l_e-2SLUg6{hzPBb|E}D$<&zA0F#DbYP(W zz`;3CYe@iyqLo#qRe)fd2TW@(7`RV>iyv(D!(@&5Q;JIgalj?_ye>Kg*_4v zSn3vgC?Kf4ltQaEGt>!^z-1+=`4gdr?aq8H#ENK`GA%GsTVDk zCXIj~N37mENhK8fsTT#SVD%D!)GR>MOOu8|8XdY<%V#g{j9XME&kONg8QC^dqsn3o zs2|ht+6jc!;_J3p{C;Y(j%U-YR(kRzkHA&|bns0VmqTTpnQ3OA?ix9T3+Q&(l7>kb zMg`nAX}Az{YPeF)EBPWA6o?TSmRdfb8!V{sEm#PqVma^_f;j*t0&VfcH{ClJsQ1?^ zI@N)&^ydC2q`yeDd63bRe&3=ZpBU~N{|tTnt^N$3ZPbo;59f#;cQ84_>dS0dvu{?( z9VL-;WIp&7(lW&vH!Xvrdr(5eI_e;*9=!0gS!J{39)ECd+f!Cg_OicExbp0UNA6x& zd#Bx9fDDJXovYUKZu5|s*|TM|-Qg(TBHcF#o}llge{$cl9bmPZ1n#~i;CbH$(%>eL z1fZkCuO68S=9EYx!jC-xG`wOAHW|n@fScFa)-i!U)S`EkDP~AxNqN;4!!!ibnxxBQQzyfGP9#< ze1@1OT5We--u?00f9p}Rp@K5N7;B3*9=-7Wg1f(PJK_b3(LKrR>U0L^Cd{)o5sWt@Gm>6LWLZyGq4E zb9DOOuSP%73Y$JB|98^cclSRMhafT4F%&+IlEVwJfwDw?Jl&bT@YdOdT46@mX#YZQ zo#!+LDzC8QE)wK)5QZC=1?f}Ir&S^j={k?XCbW8%2n02eBrndkSc`^>H5B83A= z&U$m)0hsg?RlkFnI*v2=Ged4D5!M<}^7i=X%&cu=*yh}t=1}9RP-9R~w${d@TP7}x zoAlWizRUNTlnSMnwm^yR+PK3zv!OmXIWw!lmVlC$oH1JN^vg|m_K2n5`gsq}!ba_#%tYL>k$5OZVb>-T%04%20UuG8dKB(8IfQ4Pdj!&7n#TOux>bCI*ttF;C# z`jWsridX7HtYIbzN!hKpl2I2=hp23h~%GTu= zZHZD_FkI&P*jpF#_nQoWj;$+e-Yh|epeEC#$tiQiwgSF|lUJ))2UoauZd12HDU?l0 z4<=X(NV)qjYPeOfinD?}rVhA0RL)7oEa-vCKt4eY0+{&Nh%GZ{OlYZb$AgIp5nlp| z9o`-Wqe=rzXME9rD2l=aKt7pwF|DIw%%Q%}UbNu#&ycfx?a3E9X5X1b-T0SVYgfkS zHSei+wMXj5-g#q(x%b`Pmy!K^i%%Tuux}u=uit;*LFviA(#BU;&G@h{npy1&1P9Qf z3B8+$8y)v_)U=?RZ>#+3#|y7cJhEeEf!SUqCiCBRXq`rpD=iv-=VcLPqU!T|iEI5@ z?dUZpwyZb^RddQ_C-2_X+TeE8TeWJmY-Lv(K|CmZ&Ms&5X*vkuNDB<-jRMqNzFBY9 zn?SM4hYUReCM5#_-HeG{EbLigUs%8I)7hIZyonUAKa3Q~`>SkO^*vYR zhNQ=@L)KT`M6aNG(W1|jfBrza|NDXa2p_to_%-P}>6I5I183MEo#pRfE`X#3 zXbKq?n@&wI_>`F_jtIqKkAhLbb1xiD%~?6D^0y`%u<&8lfVp{!o8Y0rGj>>q!NWw2 z#4E`eHZl$|%zPKFXJhgL=9nyUff$C>*YKsc;?r|W2d{5fcwOs`>*D@ef6wD%)xynE zS3{z<1$}`!N~6v?FVuf{WMRcvjWxQYdFA7lh|7grM!R$hi?JqPWm(!3 zv2?F;)kM}EXV#C)oe*;dLYpSFU(;b{D5F8=;KqVRHL+_(9Y9bsExjWkz!!{wwLzeqXKxz{D(;(`+1 zR`3jpy#%!=_E)ovQIoxzz7^4J0OF-JXU@x*MPJo-#mVPxADXB?`^v!YdajtM)uR?pBx@VJ!T3MVIv5&Cf~SZ0mD` zN(qnE0e;^+t6;1N0v4)^>Ln;qFk1m@U1j7oTAd9|BTQg{BeMfwH*}+M1OiE;qcT(h zkV0^=*WOpV8R9HYN&mjLqO^;cM0|~UlYbgQ(qo(c0MX>*(Q`+-4@}P2YRwlq^G2C! zpXzm0c(WFu`nL`v)!aLEw;btvtppYl1|_5SEGRzsQJo+ivpCguUxhJXprY5=(2pDM zy!VXT!4kX8UTs~TSZULFG|PbIwG8)1QmGKMg_fw*6oMDi(j@sNp%%byG{3{C1qu2l zlp7LsMy@}jN~fbq{g_!}%Rhl^@e+yM)_PM;c26a_n!aKxqBq}66rmT8?o8WbuiBl> z{QM=K2MuTQtoQ0P@l(0o}Tc~6iRtX$A(cdKCDsneLWS1vxo zPo>X;KRh2|dFMdw!MiZgqz1k~l-EbYS`g%)!4QVQj3=|6N&*795@I3|#uoh4aQg9( z!7ziq3UpFQU|JEy1YS*>j2Z%mkcC-Rgq5}5Bij$n1#kj*9o&PT8NgM!<>}~0MwoJ; z5yWF)@R%Y(Cdja>B!IAhJBk{TCSKg2FbGyc`=nA3l#i<|8ohes3ly!j&;z$zJ+}3# z9)A0Fby7zY7A>Sj{C*{^;|)5(@syQRDjz1Hc&@PXS%NlLnVuW2=^DLvHo0ZDftc$E zOXsBY3lsKpJU>8EhC3!pP^!!}|4Jx;qhQHTq&De@_XHH;7tOH8+O?+8vR$Wz!Oi&y0ftk+L8?y%XS4a0#ml3_Ex)ZAwQfd`dNp9HJxcakI zDgx4y`Q$ryOBWRsq2-jsv=ftyFIx@EY$fGISMLPzH6+#ZS82)PDEpo3w7 zj+QHzd%+(O^B z`l$__YgSIaux`%f^i%nf3+syWrH4`D0JR~K4^SL^(ntI=>;#CdxDlY_FQwb#@eB=X zAow+~-kH*uF-N(?mS9KLmPdFiX3ek;0%aL2%_*UWNdX$d&Ie35!=m#NtC9ERr4tsZ z(C|o;aN96p$bVm+#zV(*qUVioTl^<+|)F`!<-T1dPt?OD%K8;W34&L2#>eVcE-&Kc}F7)Y) zCD~J&4qj2eHPU7g2BUxr|(A<+Yzo+w*8q=;q~9uPX_axXR|Vy#GmI zu@lk@7rshKQy!Gwy7{t0bDwUjZOrzVODuJBYwK3NJm<*DtEIbcdGV7CAB~2mYj1mJ z$v>sjU!%3JJpq1>DbM!;>Q9;HnDn>#EhfPgDLQv^>C7CH%Q64yg|pA>Bb6FUmUF^~ zdlR!JD1rTC{&=*dAF>a!AKzl$+Kb$dK>3EoWjixnMOuyccHb8p{dGR5lEWGtuBZ3( z-7rR*=jvFLv+~Y4J)P4MZ!RwF(%W9MIx=?wjsyQMVNC@B!61as z0uKX{8d=f>f)I`}LJPPh_yc@Nd2b+H6s&u#8GXCuL8~ zYRzC6Dgx?Rj$JycJ4tkGscS zHwD~R^{Y_!K>6B>X%`5kyCD>62{IH6yGyze3h}h3#erp9mG(s1>k!%VCBO>EH9SC< z2tdI~}NSiTdTPweYE)OepetrHlk0)^tAVe600cTSP!tkmWNKeoV<;Z{n#!xQ8 z5V9CsARNK+SrBBYr89Vj@=nVgcNqYDIr4R*AJ{Um_rUnE zcPz^%dB^%zlqF_DC6{?9mT1{0}9?~-usBE*McPPvXZt|q;v1`p`%3Mat zv}H_$j3q)rP9RCMn74J|x-z=3JxCi>LO{iXMcO= zAct1tQHKmry+f5b1U%Lvcy#Di0-3FXO`E1ldsn)ip8(5CF9bjorPZdjL!8+wP$Xh3 zgp*E3b3krl1qXOcAPh1r_FqDD7-S*{ftJI8oiVShKP5t8ukP1)(b{2xz{Ef>K(Qdt zVi*u3oJio5J*dGZ6BJ-R05pRM8Wu$`IboH{1kwx~Xh4yvLZ?`YK~fT?KFs*!T}+wy z251BbkA(&SLBQ{4Mcfu_$Z9^{@ua-qV zC|ab~KDbl*XW`Idt8cT_=d-d-F|cHfMf@d58Ppn-|3P$I_}paj9C5?;jm<-gnfL#B z+pYin?v7jj_1&>s|M`zQk9~K|WnYiN#&2sp{OZCI-bA5>1Is zD;UbmCpuR3-Qbu?G7Q;D5Uu!os^iAFt6NbCMOzz!p$gM1icxGqXp2HI%TylrbUEqd zSdyn#@x+#`1izAoT5Y=rD4M_hcKo5!vyuyO&%EFC_2%M%{AgBt^nFw)sArseN3h;9R9m$oIf~l+-RV1T zKXc~xp$q?njqiUrN}Qg-22`pnZNyHq14=*+_+YjsTK15ezB7~4nOs(8>=}zfg zvrc+jYwCIazQ@(f%e+QWc6Qm3H-6WPMxi>B7G>+q51;}Ab&S)Kkfp!Bkv=Fa4a1uHVm#H|@&M}ys0-}2SATgr<>PQNYNlc!5Ji5r5e zOOyefW&+|gZl&6$7St94@O=_g%SrrtdIi)WH9)XM8VKstnh5iFAR5}MOR;j`lmB6t zEDJC-|C3$fC>Y?wBYZNoQBc6w;mLaTMllc&x%6L*?CD?b0W^ZW3i8Me40jbEFcHVr zs#MQDOS7tHdF~*?Kd0vC=blrm*U~F|)(aEy?bMxC-|`EulhY`Q<=G#91n?@ba!Ws} z$E#y3-J#wCHOQDf>JFNX!66`-)X!Qe|EwlRM8Njr-}-4FcQD1Env7DQ#5B5lRRXZZ&h1^oD% zq4@kKX@3R$Hws8I4>jQ#gr%xH3FOa_P1&XnVjjn_VG|gLH<(cPzsxa)MI3aRUL2k} za>5K2z0hYMr0HS}xbu*o^$;BA_<9~Ir*-e_U4#PjNrd_EL#WoKFHwfkVKCckV^0j9)e{*%Z zui864`AIR#=~-JT+L)3SDABC7R4PVK?yOsKD*xI6TejJz44QZq`Z8A|#E$p&ot$CJ zwtJ-`5L$wZnOaL-{)WcYQ(Gcs%a8Y7)8x{TDwb7=IX07*pg0yL14Xm;5{D+2hlKs} zGmF-^cV$SapHYKZ6K!^__91eoec>sY#tGiqJVwK14 z8j5?A)Ms68S6)v-8_@EV0fJ<>62G~(w0L~8^p|*TD93Cj*QxuKXFT$Bp;EM#gt7tj z2QK_GPOZciSg)4u@FU8^eWrkv zZ-D4g{0<5j2waC!=Wq-NI-1pUmV`Zhv;}~klwh8ESh|&8!nQ%^&km@jEYo%hEdb98 zV8>X)g!uuwBA#N%0NFG02_wMZq~IMZ|w&7xhQ>9j(ewgT!vmC#yq#7_{~CCEN+7fQQK1qzDv^w)?c-`QqBzN_8QQd3;}UfH(2)$H|3vL7X=5n2-e%KMR1ILrv2J@X4u<+yrGN2Ln2!OwItq&K z{It~&{F8>{zwQkE4M zG^_*4+CXZOfZ)NBCN(CsY{ZaT11nfUHe|qq<$K7_lWmdW(sU*-Tw0QzY(bby${1xH9I3w2w4#pL@@UKG%}sE$^Z-l?B60H!G_ z-KNX7ExI+?r#2YW7ueL7;BNo9JwDgmR^ql!h;SnO?dqY_F~cK{X+ z2H@&OjbQkzQNWKAjDRBhmy1<=9diW0uxCOL?lquZJqz|J&>_J5tRaCm<;jAzhOhz1 z9_SppM4l5c2teIfzRT8VSqm_O! zQUekX260<3N3_3n0!Ce4^25x7=| z%GnYclA3Jpijt){liu0~)tquHmg#lqU-_j49k~-Sx@!w7JPue=0Z*~rASE}=<5em( zV2M!-v10Bw_GW;ijn^y8%8vaM$uSkpU0R>oX0O~Q?e3P&p+`1$*486Hup`V( zWu+}grTi|4R_bdd7+#y5)p0K|%SuAv6{Xi?s8u8=0A_=v#D{2FuSr(`RX`1q>&&3y z@B~M=b#^aVh;GpH4rCE+fKvuV${B=oPO_ZNFLZm@!u)U$kqDL~yQDSzVb%c_@OFs$ z1|kE(An~G%fFT8thT-_4xDBH=QZx z5)6>g8lKiz+uZc5Bn8!{Oc`Zb_O|lb^6S*WZpBs`^#RVT2s4!@7B($C?JzB zM+>U+HjI{*-n`A*9IMc|b?(Exb9Z)D1t3GW=y~bbi+?~<-v0=#dP7=bT~<^g@K9u= zv_!)xtGCCjYC$imFL#)zerwp$^$RYv$gq{aCv5rDvsEU-}bT@;vbY1GvH@ z(%*Y;*yvZ7Oi_WML>HP~tK?N+N4`N?#qR{aeKXj0bv4UJiiv4%#yq=47D;d*PsbK<-;P`6ZDsz{(rs95kD1 z6Bg;%vOLT)!u%D&>Z0DM4H;b}4c~w_=_367)Bq_XI%E38`)};5(1(r5`-#@~yL+OF z^Zf?lD!TkngV%dZR*gFBnXPsziq;HnuD_zL)umNuhYpZ0P^{Br={q}={L-NDN489! ze4Kdi&eEI`qj#h9n87m#3hx;$d19LZ3JoHFv$GXadYfvDmFDeki!}Q#>G#sdf4+Rw z z^j(oJRG1qH#L*AZ+;6@?hfaTk4xT;neOY0!A7frpwi5`vS zOY0;dQ0KkFs#Z;%*t?WB{2|EDhxQ0>IH1}@uZ>u{`I&g@K1*YF6dy8uZ2}L_yYnRE`TPV z#mK|*;uMYobW&=f3?IQ0131UJrrosS#cz#HAc!M=!j^%^W2Z|ivKT|+=`*yccg@@N$B4S|yqFfWM-rJOX$-(~r=K_m>rR4LZ<)NS1Vyl-@q| z3PCNc^7v8B(DWUYzd&VGP0Ean@;FTpd=Ci}dF$I(ZiC}Wr7>qxbWL-?y4L-T{^mnt zp8nEqce*T2&3zl+; z1*IiALCmpQ(<7q_sOR$Xz5cx1KpIKdq#pr8@{gcAwm>dXO0HljAkPb+lQ20l+$n7j z!%oO73ws{0xkp4KaJ_6I2Y<=@FEzFZvqnRzo+cat+8HU80pNrwwu#9_1QZ_1tDu4HX~D-mh*pcS6c>CPPY`0cZb&o5CaXV|H!EgH4Wrfv!?8{F2= zu&t_ayfa^85~(LlHbE0aa~`YE>oTh#n}qjEdu+KTr-ym1s$o}Mlg;$Vl#@4a$lc92;v07{B)g^qNS0+o~mK7g1l6y!DP`9Np2@zW>^# zi|tVSAuGEx-4`<&TrPo3vTF{sw-f)&-qO?U;<|a(a$0AJdN*ZcP0R`}HxOcQLmVvA zCDPxx@0q7TRh$LWGY#Oi$l+u=+{M7IKuX~6)Wni)M|k#OJrVa6{K?!4tBX*JLB0d7 z9=-{G`F*Lf%PBl*T!ckT#1q5ak7rwI0OVU>53i|GV_@dKCz!ChJfO9zFWg3J$XhLw zE9)nW3&X^z$_dquM>eSV$pL!*#IYrt#C_scl6E(SBcaJV2dXd7)U;>vq7y802GeuA zdlz`t=D(7k)!u2eFlqn{*@+^fa-dEu$sf7WV+Y{gAe#P61;Dbpa@<=#zqYWomZl<<6CrYp}WNVNnEwDGvb*Ft!|m zswH#UVSOwZB2t5h`6)}YER?bw_JR%9Uywr>E`=3B%?8R(rdp8mc;z)Q=$#|S;Bi1X zFpDWxwtWPG!@HE9-`F~h8_E$3$`k)C(^=vgX){zAZ(eC&g3~r%fAhiJyNd2RRi6-@ zg5hl#igUMn^b3ylssIwkQl_)oD<)HVWxBt`XtiAd8iU@S}fuIs`r8Z~`nk=qdBVYv)K2xRzF)pLo zOI@`X<iOS;Dx>Z(fKDezx&+@&j z9Sm6u($=S4opv9lv!K1#7#yik;j*+Ha9ig{uJ0bLV|&=7nChS7f#B-w+u6wVU|>YAB`IuzEQ@kF~P%GF~Rrp!Zu%7s4P3%T+YNowy5jx(!0_J z(s%o-D!JTTJwq-znC;7*PAKzQ5&4x~qxBg$G0*1cePOaM_p2S!4{a*DvTI6saLN-i z?A5i`ux>#bs9AOW;&n(Qv5HIum;eB5IY0J$>3OG(YivmV^tF~X>KXJj zdec?owzE-b`&DOFs~zHg;>o^8Tn=S&pY*c9YT}kW<98&~-> zfyQ{5PcRaSOs~<;Gk+j9?3sPlYKr$Kc29++A7C)klqO>4R5j{^)b(ws*{Sy`?B<9- zGDhiBsJ=T!TASSTnnCco$C+G!HHH{OYJ)n*#vR0u(%c|2nx<764ZO~pw>)H#en?l= zhlA-B!RR|KWTG$9FCT9eboTIOhy7a@&8eysAq5W>UcDVKZRteZ!^MZRf&6?nP~@{h zqy$@6&Xq8UP>Y}504npN7gzA_vfZ-!98AlDsD^S#8>@r(nif!`JHRV471E7nLy*-1 zNF=)qz>iWeuG?X8v@h*iNHM+TdwmWICq1c&JV$KS8d{d)UUVZV`maE z34JIVj6Nh>XWwLI`7Dkbt{Hv)31YJJ&e*cd($Y-g-f_L*@Xk!s8601F@kyN0JjxPW+@>iI{@I({xr~F zg7VGye$aW~emBnGg+&9Dpo4eAZznS_cnr3m8Nu0wIb#716%1l?7&H^avJ>90Ks1Eq z2geKWDe!Qiv``rA%Ot2sn0^iL!iWS58t_xhltHBeo0t!}K~A3{4A^r3rz+_%^ap`w z#3fsch;60}e+r5-kQ~j35bTuc?!RYj$SO*^hjwogApDXry?9VKvu%ro=1mTR3Ti@R z*K9^R$LOYwZk{H96+1FHans>~p{~L+i<=)vzA&?KMK)7n-1}uqCi)vJEpv0D$$u-+ zc;;_XeenU3q$o~6RNtgE>!q)kq9{Y7HbOy|uZbK#DBW^&W08e3Dsl~|AT#S1z`}af z)Mkt-NkTC(Yf!8zQ?qxr-Q5I{yaZ8(*iFBG5-q4xfCEW$;dPopjU*@ig<*&>G+pvh zy$ZF!F(_d`NG~xeLpnz~_@L73Ak=QVNqW#C8r4uRPNx9XO913khV^Auiu5nTw;o+%hVEnnCyNC>;4mhL= zmbyH-Q!0|?Ez)SjS-}kc-%t_hBs2S=wz@`=rT1&DSk~2X>b9H^Pm;vlEPW%#Xh87= zQ(vrItrBe(f;6~nxlI?I`tuyxC%v;<`fG7MQqm@6l~t?tXgGySWzgHJ`ZvzoyG21# zH*rD0dmwItn7a2#6^ZWTU};GZM6#PQ=#{GHM5F*0^gtDobkmZ4)V78~70(1N`NoumdU5QJTQDG+r>hExV4Sp<`3`EQr@B$LgJ($0^vJw22;S4s+ zL!jIy9FTP&Gq4;vd=7pvW($R3nIQla-=$rifVx{cZzaRkWbD0u{PnF$<;{1#A*mj% z&aQm^OO%239S=5zavkDigTtL;Mu8#nob>)HsOP2Dn=0}j54T+3b@}}>zF*Q_{hXRW z8%{;-)*!30JV8+Bm2Fl>v`btijJG&5rE1fX45o&L zRGPdTYrvTfgV#o6W$NvrsxN#ZU4&w9R0$MFfqs<8#JqbU1nGHK}LB@B2T!w4rs*MvVS zq|dGvEXMO^(0-jEUCNb?8@-MU9h%N-&`fEj&X7qrtkoD#|MK;vH*bSVl)v%jx&o{- zVhIoE{{Q#Zf4}ZIulR=OK^6GRi^@F~`1-F-J^Od|M$zLDf0;!i(d!k@Uu0%lyk5(> zBVgvgPr904&CZ84*itaRK;i&^`eQ`~0`yZ~n5_+G2|#~>oR|Al{*d0DOT`%5FYgYnU-ap=@) zn-H+hPNtJ+Pd_i6%eR^tCC7V;oQ2Eom@&qtWk!oCs8HzO-|p#*Ke05u>Kw{>)~8h| zThY&jSJz6DeR?P~Zm{SBN@Zium~Owys-}ZA=Bt;7+ zWP}V_s2IdMEcJ7GOADd$H&tS@Sv>q#vnC}@>46Ij>w*u(8+_JgZF15akzYY_?qWq>9vi`6tNIiaX6t{w_ST4Ztn z(n+E3VCStY>af#}+x@Wg^r^W|jIFBPApNx1qlK7_J15n(0ko;(6dLvLh3hXHH(}}E z_;Ht!-`hQnuIr>EJ1N|wBT$w{+$q#k)Q0o2g zX&__})g#{I)Jgc20KoEST{T5!_Lpw?Fe5T_)LQOo-6mRyXQn4BxI!n?IwGe;ceahn zy&&b}J-e-Xc@DX1>d^M;V^g1yzC*K*RZ~P}_MfjT8Nl~iwv6{yh{SW#lT`|}3xI0K zsoA3@?85i18nyMgJh*o{+*^eWuH@7Wk!3uin7i zpi2i_Jl#%*pVz#m^&53z{Sw}w(y)W&{S*zCbMq=*p}-fdQoI^lNP)*n%`X`>PoWo7 zih)tD;utW+Yo&`6AldmP_5HA~R1MU>!RZ7o^H%v{g+Z-SENPw(#o8b%wed}w)IoKM zw?-|211iPRwgI?-QBw=(_Dq!J(>oeTMxl?0rVQP=LW3cMUJAtQ1v;is1HccG!4LtCHuCW^;(leP8NRSZ6F7klX{XW=5zDc`?OxQ$^NkgWM6mh2ro(1((Ue-aj>i>J3 z{+k5;mkVJ)|F?F2^Ym{X{NLO8ud}ik!fR&CU=kp#841+YC(^?L%FqADwRs8dG>qWiP90&PXl)cGgf`h8_6e78CQ|zDAdt+C{*?N8Mu0+ zf~R}>IG{Flzc;`Dm8pZ?00&g34tnPCa9o(@3Gg(fHVQ#+kOOKHf}Z&tBuz5hfH%x> za3bUlaX`oFbngNv*M?7cLQuEPB!t}Qa01E+rF$0ea7LI9cPQXQx+eoSp@QRblMB7+ zj2ez-cvHtSVoC#)p#((Ss=LHlCgN3iQ)yF1K0?)8Mbw$EL#5{0^_)YgHd3FnTEUt_ zE0i7qLQ5`6xA5t#03i^|0UZFZ6Gax=mlL8HFQs4;royoH0z`lbAz=pGZeUVy75d=T>jKWpH+2K}mak^L0%PSDqN$ zfxWb6w>=PPata@z^HrjiQt|oP=M}0N$^U}JKFT*ZX343#a;IrC>MwY+rJBz$Wob=* zm*3<-iRA0HVjJX>6X8{r_s4E*++Ed|p}}xv&Uk+R!b8ZlnItVi%ZXsF({%?V?HFR7 z%vqy{Hrq?}=E^{PMVt=q+R}J?*F7pxdPchPb%WMq5Tj->QXG12Pe}<<6WfTi*A+C8 zek8|}Ui#h*b2GC_2nfUg=6J1-r1|9^O1-yTRa2l{udTCb38-Z1ZRjYdCeECX!@Mb3 z5qB0qf?KgR$Cht)FbbE>UN*Vb?f2((LiP(XOY3+&8wL2Nb6_h9SP9{AmE(ZG$}ek4Fr?Vh6(Y~U@dX=@-H^IcHtvo@5scgm zv?Sk#2?scfD1X4vQ~v?jgW!0qPs=_9Jl1B+nXsiGr4D1_38yecF?HZ8Q@j^;Kwtj# zeqk7_3bI}TSF*pl5OkK*^TERbv2dWbQ$S)dT#yei{bkKcE;F1+!~FnEkNX}_B)oy- zs|9VbYox-H`^V?x1#@SVdz|JRvl^<$IYKOfIiZr7RT&A-Qo93yQ49($mDN)j^iI-P zAUXno_)HwsKUMV>r3VAB@`uumP8F~Av_;&Wv8*+d1n57NNK)#KJV(N!;3K2m)KBlB zO2@hJg2SyB>=elgHrD3IwE?Bwa%@V9Ooz2Y0Y<9K5trJ`%A&8>rEpcX?LdwyZoqmVCa`aR8 zh=3HZnVj`wQ4Xc69qmbW>7nTE`Eri)=)GR*57CA3QT?TrL&~YsksSi?qsyjxr{vf4 zs+^@R1A#oXU^q0Z8fTfddqn}57tLGhW@=U;(dI-fM9tPtG2~k;v?Tx`=(>iOsyE;u zV0F(hiozO`j&N$x;N_=gNh(9JT?bLCG^>OnoV3QK)`v{Njh~ijtyHy3W8++nQBU!q zu@E@GsyH3g4A$Bbc4MhUr`JJmv$V6srOOv}W`mI-%PS0Vv&KZ2T#Z2xRYqu7hfZX> zT#@LMeZ@9?#dfnHXHt)u)$>=9w6XAtbPk}P{Njpqrz4rP_)2VKx;u>lG`70{jcp?= zE=B~7NO__` zEE*dOKU>%NMN`=m#Qa}Yo z7iFkznVRJ-uI84-lLo1pmT^m3(Sk+eJWQe66@4X8l;evS9ky4Wl6nqECt9U{d1GjB z$DAt9WTFf0Q%ld^`pJd4c@um4Gb__g!OMtsKW3UCk3ucIAbm1R*OytjY~mNO5)tY@ ze1%pi6^aeg0ja&l(^eOX)YsjRmw#xM-$Pc zs_3k~^mMh&0WhRdX^=b57DK$*JP7ptj}r_>%K(pI{V)K)QGgqPtOUdWnV|yG4d!=1 z0ckSJkv+EtdC*c^4fqqCz%fy5q5y7zfo1@V543wsH;EuiiwV{*%Dj*S{)l6?hAjoi zTu_j55koF9a6fnlF!g{(0z#_H2<|it{{`Uz-1$VLHk<{%>sKz1xPx=DGjof0!>4j`r#5y`<;JO1yGLb9jso=OylI&q1Kc66 zS4GD}B(&WsYR&0@k*>;-W*&baSGg5wYmENTM3PR1$lEiaVpjG|#M41EE9=vNg-y#i z17nyMnUtM-!nl+onV~NUGCO(p1iFwb5%;4Xb0j6vFX;==-^mA8jN9Be?ye2evHOB~ zUZ<-$>!Cjfoqgr@E_4mrf%aPg%ugZx{AGd7WZhk-(+u5Gjpk08^3dKBYToQu7l%7S zEvi-(RH>%h3?x<9oDBUE)T$L~jYDWvE@0Zb9voFBeP?xMqaTPxT7$m8Hdo5E)}YIu zduiKOu3#|qe9O^}PU+de%Y?$o@){@kjnJ+_0{+lI%de{1)PTiy?BY>wJu?+tsGU%W za6O3DR4N3(j2U6nhd*H{7Y38H{Gay7J~D{=glKRaph3aJ7DWQn4M4;9vcAaF1}1m%U~7<&|!<3{0XOb?=w(BUXLur+b(kk148{saWJt4xZPVw+JL z4a6_6Z-s~pw79>H_hl_d^Zt6)!r#b zyB7=*27#ER(rE~@I{6TU$=`z%5H-vSh8z?^5K=?dqV+I#Bu|YWvgd{J7SVov*hgyQKG=W^ZN)T$C?fJkQUhe@bhHgx9Mef$s%y zaeN0M(pJEs0OYQqtR|Rn+B%+*@~ndS0owuSS$rr40gS2Mgm1`oz-~e8X9Tg2!yh2Q zRTTAlzehv-GM{u=0xKFl1!UksRa3OBkAHZ996W=L2U*I$i;YTBT_}VbO zfW?;%3zO8va#J!GAU+Lrcvy4SY7y#u2-l0MMw2|Ken=bW`Gt${7$65-1^o$T^9Ou*qOB(KrA$ z$zTJval&AnZ~|k_`OfK_&gXbI;Bd|X&^E7ncER#@-+lkQ_pnzxJE3Q$ySl2n!mrdy z>G7chwpKzYhsNx@bLOPDS$8$EH3gRajz)$fR z!>$}igJ33Sm|S{=PVG>GVs2xLe!K})R(tu5G39ewJDNw(b1S<#cyIMK{1yIp6|2aT zK_OzhDyhPl;fK=92EfVJD8cy41u~2ZsVZcM8&JJn6D-U#s8yC+uf=Y+cyldk zl`*%_qWB3xv2}f2+#xrjjF26aG|C6KDLVWCkCf>9(=Nix{eAQ*>2fZSYIKQ4nwc>t^@DHKW|!B8L_3_xbW<@6W7FyTPa zhov19=1uAOAPGaM{{<-sGYI~NX_+9=!UFCsKve*xLnausCBzDlq99Ob3*=wA+E#)o zjHPUjsmnlY;lm$}^Z>IKR1f#M*&U{P|rPS$sg!J?cpr=oUk zP4T4m0bW`UW$e|`*o1ZX(5oIO0^epqtEOgEM@uDH$LGKLK(HZ0Z?@`eMd5Zi2iYlR z*{G34#Z}#lUp`e|JykEkubT3$!2y0|Ui{v^YLi zZ({VGN^hhsKxvgujm}?H(CDW##hfBpV57d1vf^a1mHG&>XOnf-O$>EL0@bXItt0Xa zrWRy2R@rue16Qh%h6hhV%u)_K*_23fg#_{dae;&kEd~wgTBtxm(Bg>U>kmResREF~ z*riDUFtt;Zz(8zB5Cq)+P61FeR3MT6LIw#s#6n>K2e|ue*pQ1_~|{10;^H==;Hh7Es&q{ZaYF z-~S68K)THgQ;<-?;W5+Hf1-rx*PHp(bNaT-p`J+C9ep>>rXF3`w|5pbA7Hoo9x@iV z=rJ1QyS9W&Ho5SrSr2)X89IYjVv_Z>WzLU}_~ry+=qzi5@@49{(bJ;USwIoF707v} z**7fm2TjPbWztb2ih(44+rFhOfZzSEv>+ern53@(DKOfYriYeLnjRoZl0gr3+~0;0 zVT0k`4wPuWrvr_KoX|s>Ec0D5S#ky}pA^~V5JxeH;~g?;HW=lC#a^DJ_AA6950HaW z;0U=8Pc{v*5$8f-+N*#otODem4Pb}30gBY_gi39mEDW)mSc1jYsv><@;BxtKITE=X;% z@D!Yh1_k5FL10_3O`Z-eFrmWyM2a@8SqmX7oNkW6|`i z>Alm7;InMe^j_2n=U@XI!7FD@nU24nHf{PZc3XVXbYz-7-d$*-`O*1y_sEOuJf+r# zUZ%FNxMh0ZE=PsCzL$E|Wlz*iU#wHviYj{XJHQhwd#R_ZoTm6<{Fp=MQEIvTnbanw z%OU~Lu`&2Qg6r9P@vy-n=EqZa*b1FeovgA{Z<1JrNs-sqyWS$r%BoGx2}ty{{fSR@ zmBa>(y!2aDVE|{@>%GBxKQ*#vUf!INCKq3Ko!n6O?z6dpa;F)sEf{5n8R4wk@05`CUu9HQZt8_^QL&cCgLdffaD{S97~%kfqJs(r#>D z*kSCzZy5|>W7~)A22-ZykPc8sbm(4ia+O-}dy%lF4eewlT(RXPqam#7zySFvR_0l# zpCH*J-;x{+$!b|vN?}kKB{Ejpp6sJQ)&W?gel0KOvMp4n%%~L0B*x@cu$JUgKg(Dn z!~iYEQzS>B96+6D5|oD73pEj+1FuFB*)Gqiw22H%`$RyKm8IYyg}Ftny+kceMeq>S zSlHo*El?yyMwm+d=NVzwK(nCs{vmGBAHP5V7(uNDsSgPbutFt*nh0-_#Y$E${e*hI z37Qh|l@vLP>;@kMk^2y62l=m%8AMnB%%3aLLU@E!)}kQp!Qd>sNy?KBcOeQpBDFtE z5QIpcH1$yAI(YvaT5>l_&APtXW%&q;+P9XK%;~%uuO9n%VU8)(V)dA4JJadVMzFJSYts)!QA*zj}qrf>ckMvdpR9iLM?RD#~xcTPmii#)2lOq`pq%H{<;s zFiBaBT21EY8Z@;g2Wg87^6oo?@8&otAl-u&qvDIJ)l##4iDrQ)gN_kV!(vD-` zIDQj<2QYBs!E0X(s;!9eKn-lDR|-?&N}!rRd0@1LDFhQmtJOe}yDRziWce;>2jFlY zz6sYQcg?=pq`XJ33QySgUI)JJPj zH|}{4eTW|Y3%VY4BFZsr|CoUX7Z(m0RNFmpVM$IT9~SU>{BJZfr=~87bi)DAPyM=)tnQO=2NZFpo~3>EPuhDJ$Q0Y zJg|89Fr5h?2eQ2*j^zjSQi(3NPM-W;{MPc^!7^EXZ(;e@NR1-TaS9fVr|>Wq0^H~Q za{SnJrG<;~(c!#R)w%l~d=jyvxZi%FhGlClEEgT+Um_M;O3e$!tQ8D@AJVLjsXjY0 ztcI!{+PKEAGMne-T1_#^Q3N0b1gI*tbfQyZ zXM`hU;ban&U5L97*deqa644|oLrD9EpQ+~%jF8Y!iaN=+z*dQw34Wwtd;l8^%7Vuc z=*dD@hmW{K;9-dMD%}i_%#}vS77RN;JcGOlL7I80>=8M$-)+h1gfcFpNstx{k{eyl zK=!#6-%hN3*4YxW838Sg*2=UwBah8RD88bq_4aXV@O$`Za?&M{B3d)Qvt+_-WAoWz z*Y-j>P=?K9iDWBPcB}O5f8lktwS$9xF#FT=fZF`Y7RcU|T2UalJYXNj@#;Y)8{_}bv9#Wy4Qq@6lg`={(uFOCGK0ya zRxn=~Wq}f<)>|QFKncnH$AY(qmt=4TnMWlv@DkI)nmHquKDihTsGD}6>ip}IO7==1 z)<=OnXaqHIqQq-ZN+2aTDZsZitLXK4Oz(6O}B4uzdgJ4Ri)ESMv9=yOb7xeOpUbLBxfeVeK{tWXvEgy6xO0RgB55D!E=P5gCa zN(sRUAiqT-A>V-fhP(oUWFB6h{2@7gS9aITg`1!-0Q$&icZWx{Z%6yl0P}{4H>yyV zC8AeeSXsavPfkewgtt2Cd!O_H0Hz#uszcX5XG9g~(9YuG*0{}tKJruO#Ld0s01R+R zB!d;r2f#*M@GAkurG|>e0$9KjSmxdYFvRzW9)WnGR{=@vOVKZ2`&1$;#FG^OC}0yB z0jLG@&C@=HI*eT z*cbu4kL*d=fVE`O31&$+C%uy-D3D#39q;MGf+aA;(E;!8GJi)$LOF_BgTmieaxrv3+nzyfj}Iq^?-{BN)1 z@7Jum-}av_X8yPLfIIYWRH*2de!esk)qg&0|C3V5bg8E&*>soh@u!>pm;HjTD18aP zsA2l_hVy0hGiKC_Z?4L@usEltCWqaaQ+46v+T5JFx}4lv=IOc|vXxWUcWkuBJ$z6> zRq@ZQUiY}_qDTw=ddLK`!9Y=z(}A7>&wx?y7(LYPb|vEw#h@Y|W}ix_QYhdL&W7Lc zPb0sFY$!BJMc*bZm|OufT*2(q%2WynH&b954yqLzdGh(N$&?Xx$oL>FQj=p!l|0$1 zc4SyhkXlD?P}@TQjplduH7no(xSjl;se=&{b&~9Y+bCXC+k;kf$nX4|w3Y>cpP`Er zgiXjg4pPGvDz%(^lE>h-D!4|${Ny9;W|=N7^#Sa4D9c32p)OAdu9XDvLl7AXfRS=) zB55QOvZwF_cs7+BUZ>4V&JSBrLT?UPO`!}c1#mv8e{cFoIpv52&l7H^lRT%EfzwE* zQ1*QQ7pvuJIr>&5Ck;X42jL>MTqDCl7ilPBWj~nSaB({sE@oE4bo^0HRdseUnO$9# zgP$EhMhDOZgi)%?8BtA6o{Cdu1C^4@G-c=6ogAir;^jxeDt4 z{0zarG0p|v`T3$-AUW|RNL_QFdei}k8FSE5v>u#?`_aAVQSjO;X`-WvA$o=LflE$M zV!$T{+DV$J3Ld(c2~xs{UT*+3h$0a%PD-ma1R>T*&~eBFCOR;*CFI@{=PPkL37nRg zHGx5cMg>2im3$NUi~y(!I5q)hO)+j?fLu>OD=DW^>S#*cgOzI9ok&zM!a>M>(5A=} zQE84sbSv_Kd!^O3%la32Lg9_lhf^mBe^bm>cm?w2!UmMh{8N+U*@?0SAk*m+Ql|m; zg-`==nm>^rYJT`IWJIUhS zku*TgxJMnCoZIGiW*C{owO8-HAI)kx*laZhXlnf;q|I6|t6=J|ylD$dSJbRNzhcOi zAtBb4r;?Zf3lRW}2bSH3>JLvoHTlSm1sBfS{DX!?W7C_OP*%2+j*s-I#j$buIM_a_ zXlk|Xo090k=>>(&gKa$6TDqwS1v!zPP*X70SsQfcsEeMy+3G;3A#b?rJwEg%Fslb!&oMVNSv6fod{_ge*k>NA6U+ zb(yRDYLiU|oFxxncuZ=#b6{*jwpLSP&NjJf-C$2LW*Y3nLQ1hjj8;Z`0LyO<$mPp-^UK*njTwM#PsD$%;gLaC&M3y&lO7h9a4vVALJS%#36N z8n;ZZc1gfKK_UM&35Xw>Ry39x{BkI4b~#EMI}rUvCHQ!G3Svh!(((A`bAzF56H^Rj z0UoKDh&HZng*3$M@g>2cHD0gNRsyf5cdEi>J#W+SOr&ZM{bp-ko(ft=l*;KSw%>8_ zkU_};ilt`#ZR_yNaB}IK%F2fNz%XxYdMsz@u+W=Ud%`}soC2P0!+R* z%BEnh5xxK}VJKz`SEV&OU@%%pc!k&iNYjf0_uv42J~ffaf(@c|Q|@UorY$=9()5$~(`$6H*QSnNkAlxWibibD=H(Ly)lZl> zNNJO2tzCQ7%+bkzulZrj%1!v>&@*@2$K2Sa4O1^p!DsO|AJ1Yr-dLeohq93K1AkTv zO@EhT%e)i6FcDxxB?j=tSahi4u36Ip#f3OoQ@fr~dBcV>vAnF%I``kr?m^VKm_L}G zAN0qlhYV8gSNt2TpFYj2}QVCh=lVHaY3|H!X)E#h-5VbFtiJNQ3it$nni{LxTO~A z?m_J>7>iW+ArTy8xJ_iWuM?L1(1tK>up3~KOvHy(D5SPP(Go@&YcK;aMjhOi&=%xW z!kMC!fJ&Q9g|Aq?RwLQC`rL+z59Q&D4PRaVz9Qj$*jwu;C<3LBq8#6qSD8Jv)v;LN z_ItP-}h2TKCP%aPScL2C@`tvu_&{oM(I%GWet+5v+Ox`gHd6m zKOa+t5qE!!XKA>* z5Pdrou96I>U0GYPq^8kH@~Bl-Iui0p5Lk+&K39p9a2F))tK5X*`;Vafs~_AQ&R9`D zcxf~+VH9HLTdD)@5>R*moN^Y5BKEK5Q70#O#nYNOS4{X4AFY5@La9DZ8z0(S;5 z)+3@CQ5%H$kkT{6yA45qWSCQeIR^7g3m+I&NZf%r>}EJ4R9S@7abZMY)DnUR6Q(JY z*@i52VU?89I|N!FqyPgbG(nasWU~5Fc6nl|BfC_TPq?A5w=hl?d}*U2ADOZGFL+Gy zX_e2kpkd9ChA|FHL*uWLUGYcPe28wkv|#K={0lDG*Mb&2`^eYlzh+`rja1}XC`ZI0 zmi~bMA(=XyyU*j)&E~_2c+LY=ZWlIad?qW??J{%+sjG_`nB*g@1Oj8ct7S}{9+@(r za%Uesu$}URM{BehYH*S(KDch`)eEVCgAXihUKXhPol3hxE|x4nZ=5eG{PKs|YvPR7?1PHX2EB#aVU$S|4ljH4ys$=% z;uZtCFMn3Kg7>&0HYf`_C6rZW`27dx>`GqEH|ESJf`IrYh{&R$zIlM_fuXuev=U${ zNSYr6S(0wcp#-D?=wnc%z#bQa9wi+J^9?jDBhjLu_fyOgjA`IIjuWqgV9pd~sGumN z+JbQR>{Kom3AiI|C;(rKFzo_}egg0djR&0Bz>xtEmrP(Gg#s+aF(JYS&I4<(-;4*$ z)TuMfwENHiyfE46^vKZ4o6x;oL5_h?SwEU~ZPg5yiY<+d(n*SKoK$Zy;IB4sLG#Z1 zEm=#m8ok~>AgDG;Bc8zp`h5HR-7SrQ?Au)~&{b(OYJj>cogFdSqNM;T@i_h-KY_oa z=W>;A=lsCtT1=4V3(5TIAuDbuH$*xrIwDhDPG1As9>~E(&`P55Oev%e%Z%KM$p@0}P~pC>7|zVv0rQOIKV&4F+QV6? zS@l}g02$=W(U(M$fcRZ7x$G4k6h;)hNMu0ipdZG8pxD6wV3kIKrV1lO28rnEss4sJ zk{TJ3&OpYQOfn&OM1W!qB~lWa3^Y_*AwwAQk+Du8XNF)Lvw0Xq11ThAAZQc+G1iiR z27f#2aGjr4XB^J_3dg_5tMHYvtiq18k8ePJT|f&7jDK?(t(>C#+U*sF$ndIGv%b}6 zj}Je1bN+MDdXth-2aPdvriGnoh>rWqglbe&Qk-82;|w^tU`Y(r2i%t8A;tQ9Td~u= z*rUwgxp|pz?>D&R009Y4AcRt zg$gI%0W_oq$xD)lx#1!+AQrlzLPrJw&sBm=4e}C*8H|I$6>_eKEle=>nIsI@rU4Tb zp->!3cf?I31DCK!AiQ`;-fROMbvO&}SFU&cAyR_2R^y$unabupHVDH$f4zT5LS} zSMsTgpHgdphBu08`F-rQU_u-OLPQksXo$Z*pz#=lI2A?&vEWcfi1470z~hCtA}=ZE z5Cx!37z_dfB8mWO+X-@k=aCBQb`#p&=arGZCfcqIPyz1Q(~Nb|R@os}I~ zvnsZqdV0tHht+nETed`Et?t{OdjJ&KL?1_6`fvq0-G}6Uw3fHT@tk6)IfcNs z5R?R3?|#T)vgXeC>r~jMR!c4ANUku>MN>U$c^=+89QKk5vYRix%J1bDz#Gewn=bRw}=1bj{lA_z>qKqy3m0QLpUiu@qBJm4%+6TJk)nji2@HK}4QpdONV zRp?BhCeT6!j2uE&z(flJqzzJ=0(hu!0>REnG0-?kF(8*g^Qr5}7TlC@RKdjqG7k=< z$yRun5MibQH52qOn5yYWSbv>E=d^DAwNkwbWj#~TSj;e;>>g>#+3 z%`@iV(NSL@l(~z~mXz}&a|;3acz77{ z1zl2k173zJ?~zLKg1b@Y?veF&i=C6m60hQ)x3yQCP1Kp(C#F@qDbvhB_4|*l%bfFI z@6%((PYfRRYa~^rt|}+h9B-IAMz56}jr##9sG^`akXa_t;QJ!Lm9z4O6;u}b3Y{7! zuTlgYj+|m^A{&j%(wK|$vplVJFy*57c}P^w{eXb&2`qr0BgXDWF_OAg`rINLH~;LdN?R?a?l6;RCt|J}jMRP)jo zy(>>o3IsDHtT{JqcNE!j)S-aRV#xw!alOT?wBWn4v<2bj!tx?-!l8BYN?FM1%r2^_ z0P-m-^a|9&i+;z8BsttVK>pe(Is)FTUy%%DBFOcJAOV=r2J#1>Env8VLg^o2l0#%K zNqdR|-j|SwN1VY#!-g393K}e+46IGb#h_fle&9{;6-?tC3Up&K0P_$I0c#-GghI0+ z5Q4yFgnNa8A&Arj7hgJZ2Uw97imhf-X%xvLY@^f z(1|yYc?6TJ1C@CK{(fuypoYOSvI@Yvs?i1MK<-c&2Is&;0)Fw1oRHyQdHG9jfP^N? zc5U!0#HpH__J~9JoQyk=PVvTb_>tx2+ZxOB%Bs-5LNsvch0p!AxD`p5vAoc?!*lqHo3wXkR{l*1~rd zZ+d1{pDAW>n;-=b>Po_Li;=1G7QTOsI=3MLIAF?%LYk|x@LFbeG_z~qrh!?}1=I2k zIaLGTZQavq*0)ZIcXuq*)pV%8goy+mu<qZ24g}TCR&@Q z8vqtJiVi^#>?@*=MZbe)2pNlj1_FyqA*Grk(+c8h{G`GI@kM}~6~2vHROk;N22wt? zT9Kd*lWPd!19|#ma1<6Z5C$z~gb;Z;ND%`=1)nk{z><_`Qi42ZV0oGn<0P>#ma;Cu z91&)TaC$m|8^WE0^(#!1lpYoY+at6|gHZ}*xsvgbg&Aa+kVg<*#+!vz8Ic$1(z{BQ z|0iH!=HajVKJ3dF;|xel&gF|Vtos~zYJNcpP`UL^Hf!t}p4WR>g}LL7yYTP5h3k0cX|pGJ*CY6Mz+Tl$-as+*&|JyI-z-+M#Zr?uc1eDM zH(Kit)_V%=MUI6^S>47LN;G=SKW5iO)ka6eu(`B)UmmBmyN}MyoS{XM*;T?w&aWj04_@9hHrP>hYH*56x$HGYpVY0{ueY|683Qe8Z*< z8I(=`97KWW>y~55Me)+pIpPP3zt}hrpX}>l6kjLJ(Rk~Cw7&E*xMQLON?p zJTtPI%aTj=7d}Vde~FGHk3r&MW!%yr&6hY5HnqP>Q8Ko`lArvyORMv)sv1R`m15JR z?r}b-73Br&>YRKx954heRSKv^tiO1R#TgxEQ*(iqF!&O+=sue&5)Ml0c!x%-t#@1f zRCApxK+y^q8l)V}OAL~=eLXrEM64S9_N;)ztOWaz!>bF4q}1E^YOa;L8Wub@2%4*a z^<6zAYz+hTbtFg>sAo;2I;`*n838NbE0GL=R2)Nk2TJYt#W_ zMFT$A0ASAqd>djq(6YH!yzTYZ@wTaG=!Okw=+v)`YPFGka2?=qU^~p>f^a>eA9kd!2esUk81!c-!x<>}hP=!xc2n&CHyOpll;)$_Itj zWipecJ4+p1zn{|LdfEI63f_DU*J!zgI1js{y@Bjh_

W+hd9;n`TbyJ!wu4qxteR-=(YF>#qi!YVnAm0XxBg=h(gJV-?KygW>QW)%U#Js@WCG6w={x0lm+`iE5g4@Lwn zdD2CYp-qiY1(`%^d4P!6|8{`fAD#Q`vm-}7JNv-ViA_xtCpHb4#4g7-z4jWuDITNm zlO}nA89AM5fF(#XBSeiHsVXZAx7Dfekzf-Z7K@KV4coV&A!Eld;=Xn|Y=i_pqYAiM zEn#SHl0WofsTjZi9WRma-)SD#NO<(USlZY9JtviN->aWcOE@HKqdDI^^2oQ}K62z| z)HZF}&|%Z2wBih0vAeNxH#)`{@eAM#!GkphNb`Y$nNqo2sxCJg6UD_UYlg~e2e!#( z>LuuC{hXZax%Ie`LWy^QEt2Q)61GU#W8a&+WT%wd%0ZSk6r%f)Jj4F~xnnCY0m}xu zbh{)Z)`5d18^lC2Kv1`XfPhXl2!-3p{3J?xT$A>C5HcWreq}%WCa3?OzvDiS&%oYm z4I-%)$8%gO+}Sv;l~Fz`x7(GS?ONf<$?@QKTsb+emG10pH-3-&UE#{jaVP(AirRHj ztP5DD0?Ky8kbEg=_C&MY)R$BtctMm3X0$bs(X?;A0sGKfml#Q#_*tQH%|vi136L!C0s;&Zn70H^5n11exI$oRU>Qs+ zT;Sy;U6G0e0ZovsW_Lb&mpz6?^3QF&;nihR>)Ri{X2$Wqq3`BhVE@5wo7J~L?^WUpS`*3| z{+AgKqS*cT?E8lhkMH_!(~r%aiHdU%O4iJxmhantN25_?qW~jVZ0=>UCAXd5i;PE3 zAfTKRkKtd?!yC{1b?fXSk?iY-U3+3H(^It33c|Dx7OR=uK!Bp_7Lv@Ciq>E0MV}8) zpVM|^!5QloOUP3C=(h2P&D*CKl@Ag#tqGGw}-7BZ4nt!8 z-*NS zOL_eF+$qR8S_#qnU_$(A=OcYCPQT50bim1`-A`|(*0oC1_3U>mj1!?hkQn#QU$-4)-$t*&}W-^%v`VpR1}&i&y1E=W@^0( z{3{1pNl0P~W>r*0GOA!2e`_jn(|SDSEsYplXOW5R_Ydz}@nM%np=~~i=pu64y1Hz% z6#kHB0RHpZrGb(zt^i_Go`S^7w}^fvCWM?C7$ASxNR8AVyI^T4ROBRd?}5B67*rqh ze9EB-ljE|QBv^*i^9&*u$Z|C`Rmh-2s~lt#5Gypu@|ej85e=!MFg(JPgTYUkkPsMC zgsDm9s+%!Fgg;C;KyUyH5x`-jT7Zx`A^zpc(Dom4Nmt3}bpX2_FU>DSU>N>n-uVUR zW`Jw8BtKpPwRopbb8bNDjo0npSc=zg#+O#;Syl!!MdrSv3;b7;s+X2m78GW>Di@ay zy1wy;UF!EfKi#ltV6MgH3mU`P(5A}zU}W1hcRT6I2XCA3%mk^o#_JDUzb1cNenp+O z=XAYc#r-rC`x6te0HaEF;mMI7T?6*wTi%%RY22SY9Vmze(Q;O7Pynp$s6c_Ty|}o& zwY0Pq7hT>$>+yw~LiR9a32$dR>+&U19rM6I zs2Hw+|MCW$|0^$+25KBzrbr-Vei>Q|$FDirq&DYsC2^lwZQOkgySk{&-^~xm%dexD+Xhn1rxZ0X7uh9ZW#_8szsLZ7PvY}bi$u^{A&w3JKO;Vx zf)HckLQpJR3ox)oQ$oj*l678&GVq^L0NYcAFDyCN^Ii#c_~-gv&8P@Hj4lMwrxZ$} zXOVKr*FEnQCr6M&r|{ix(O!)Q($0$TrLy8Ls5M4>(d9-@qH?pmT@H&5O&>aglx3w~ zBv(S!4ww6BeE1V66F=!viR6rUCj`}!@_KDh4+L0*4{99=1K`s_bzaDC6Sz1EaS)XP z>_FhnWY9n#0Z;Lr;9U3;>+c=46Fr?wqNjJ>z5?%m^G&>O zd3O(rKYTwbn|(|3=UcXX-aNAG@wsyzFBh@EhHv9^+)nWSxxiO96tojTw@b-0D5OTD z8}yL?>6Fq(z|TQw8gvWNg(=f>3_`+$;6JdX5xO_QtP*AiTu)t@IFPrMn9@63{OkSq z|0?dBH`{jGlqvghVs3lug5IGm3((Z>qJIj!1)dT*Hi6 z!@3;x86&R25A&raYROe%Iy2}AGE#Y>V4A^@)kj$Y_{!}Mfh$yor%4>#W-xB9g{3^8 zUy*SJ`hmDdZHnJZDbQ32$)wml@i>BUovhu#G*A8~-9$7q!VH9c{dN!%0-K~D@6$rv z3VC+I6%+;D1~>u}Cq=AY4vhb#*0<1Xs?)jcV`r5I>(6$7T|0W%s#lGx@f+{H`g^@N z)6IxGCKMsCvr$q`4z(v0*^aU@hE~OUDj5^Rl}6n)g7xMkxxN*aBe< zSH0XlbsqjIxemMWKR<&Iww= z4R~T?*lDV&$k;w~xUqQj34AJtx85d+HivWFQ}QOy+z8PS^HA~Gk8U{kSnnkfUh&Tv z;=fydoW`?HM~9f*ddhA3WePnhKfBehx2b7Ja6N+e0Y3V4;N}UECYl;AVB0W6sfdJr z))yER7!oqo{eVLaVAX^-kqQx#aZ35gkp{mKcUm2f_gq->4&L1=0eX2 zrChEIJb-+6gB5-8UHBhI?!oWh_dg$6(CJ~+iTcpL<|VLJcpHMOkR~`0tgL{|3MDPz z`Kft|kkJ&go@i?$kNGZ|_x?Nhy7$&x=y}{>#hvhM-A~Jvyz~M5;l21g-h3DZkKBg> z_o6$)wA4U-wRH>xp=7rX8-6KRF6|g83B%@4_2#-`)6w2jH@M zKzd@_B?)s6g6te9u!vQlE$@abvj4~gi@|!1EdLs6okF*W?a3^{hfA z+qZ0Kq=Z*1nV{!lbkoGio6tY$yXJ3GXabu%ZDlQ#rdVMqayk;0QfG}nt};4u+@2h} zX+@S!sZo~XxA;PpGkw0P)k7w`HJO%+2CF9#3KzO!*+SO)zIAfy)?t#f3vS%bWnle5^tu=yR7c>LC_ zzb4E{H69l*R+ zUss0@L5vOc4lR?6=omAM6;tQ!@*GE~I1zT_$TwdDV)XQ-0?8Edt1!?}Q6a=6w*Fs3 z%!Q!xeIleG(c!5@Kg9(uk7atfDKNYL7!5;aX<4LC7cMW0IFb38lgL85w%s%y{@wf( zay?%_oOuR~eDNhT z^2`fR$9c>>P1NdTOGk`Yx~vONN0rmbDMRiX{TDR+<(Kh2e|a9?`Fv)W+PlqIupl?&-B*}bQ@28GiUOYh8Gr4S7$>3 zo1xnZgAN;iHT4AEfxka>c;pjke>B>i)$_sf7f}46>+!<81x2#)HTX$uwkf|{Wmtq% zV?q%=U=0*zI|le-{>1}JoFPC9iNVOK)|Jkf?D*u_i4QF4Tj*{Jgd*!ILlNo5Dp&CN^A)cB4s& zq>i@C>R}PZF4cP8{0KqR%ERm2Y zUJ#eKkY0U$_7T0k$mVXucjBKu=yVup-Y~~k>B{V#svlxECTSAJ}#I6}( zOE@wXuCr@Y-Qyn_+qkFM;!(McZq3j;o98g>4E!T3L_VvvQC}8O4vB_S7g9Q2q0TCA zD*y}nHvBBVgENaf5L3`0ngB}oO0ry|h)IVdpDUUVQLnXH;msl6o2UtJoCDq1l+v&v zz7Mu80{~sox(Pjz6{rYQ1VM#J^$@php9dG~i^8yH?;Of_v zPkL_RoUQYFvFZD_whANCXdE!hAfyY zv)$L|R7JAw85YOds@3?Z0akCO$riNOwb_&bQc7HT2A9SN(dS%li|K^IC^Lk#8TtUT z8Bi)ef5E8tYxq$syhm?a-!<`zIipU^nuP`-$L~EmzFS(@Hs@%g&l(zEV@^1V`7c}g zd?SL+0kBfGnqwBbwZP&G0C<(AsvTdq8-D~)rB+X!O=}OJsgOEIDWy@Dx`Ab*?1LO3EhQ~%|{|%Oz%u6DW9#9q5Zn$fpsMdwmpcSJFy90 z>W;=GOHjkH-_T7TC+DFS{J>|Rtlo>7f5Cr$eD$KcjxJk%1ig-1AW~p2fU6k+gg}gO zCI0R?H1z6@f}!|}8;|3ck526@9v>P#;{$ZF2n^=vx~5OO#xU0 z$y^Hop9fLuGo(#`;7&DNAyty(%5k8u66TeX%@C#!Jwn=qi{U$EV1fy4Ac0rpo8yEC zV~>WZ7Bf#X>Vf3^_FCpt z36(LvXz795m(3rHDnV#fxJ!pLmbnI-S;`Gn3zzcfR*szwm6_Su@(qCYxm1R3IlqAJ zp%3?UAt-Rn${4P*lVfD)B8MNl<(9$5s$k_vjzO2?BP*SScBjecp82WYV)ZR)na_DW zdl&caE9Mu_ltw$axVXWpLx(t;7oYzbFtOkwc*f`d#vm}vpJlG2*5J9=gJ(dH=f+DF z{65YDF~%0aB5D^b3_^cGmypiYrMuM&iaa>Rz*-7jOnQfOTG~$uX4F*gdGg^b(x-IF zeU=&>lMdQRcO}Qn-@19xKNX61iU$lRZjs6QK9$KnKPKncW1q8P8QxExmcCvlo5PC# z%CmE2vbkdBRXIJhZ+l-SeK#7ueet53WzYwBYI9X(vjks|qh0b2*(6y9!_DQG4uIGZ zUKS&T7!AbFu)G4!X7LlKLQdBwm!q2i_vmJD`HZ-vm%Pda;k8NC+H!Ceb^+_}_X6n- z0Ma4P-#_=b9~u1#ok9x^H$q4Nt}WOb!PK6LQAZl_U*z&RJoTD1&r(cud5&M@c zyY<#(OZIPoic2jmHB~M244X!4vuQOpD=MKR3}1r6PDdC&O84|_OHSz9LC-`HIffFE zs1Bc&kzPo>7hb!2Pw%2VyL-!9TFNVi4z2jrX4B{`7CpL)!Ouup7F9C5GZJw!ccL4U z%aSY5jd%{VhFXi~K(^_)OL6`M&IfCXCWx0HKJw*6V@uC5Shs}|2@q$%wPAjdKu8dL zM0Ah4iclyHqc?W;$zW&{sCKt-RplQmeqOn0#z;ec+ zK8<^>mThpij}CO%Xdfe{2f0iTnk7C88By$bR{V&Vok%t(vdkkqi*MaLd(M_Eb7pV8 zdQfpuaxBEFkrl9wP)64=;jLa7vjANVO$!Ekup-aWm&d-g7>Xltpe z8rD|6Xz$)dbn7bg0_pql=0F)UL*bp=uivvR}O$$#7&(}^%96E zO+o3oNTywSlCfmwr2_`YJ~AA{gDnW)w6Y<1e1kq8XE3pP>Kd5sE;XvIVbNy{hwdWONT?F+aD+1*BOoH{qrMyQUs0@Y z-X8LjdwP*<#^8pT2Vu5RB33+w72_#Dkh{ffH|hSFD1PA zH6bh~E|A(pNOs)C3lm^%AYIfS&W1Sm3G?!Jg`SIPNPHpb!vM^E2`GX%6aN5zv+(y2 zl=OG_sL)$Tjc=GZ_Z;p$djNkjf8%(geM0Aiv*@}nZb6Ek-aw5txlJDN8(XPWvEs+K zYI+@xXLrinfHr(4eA-gD!-dR4hjs%LOCNg z9|f@9MWCB(t+PBA6Pter9mW^ z@YTequR-1r@vA{(KCvpLsMCZId2DghZZ5;tQ#M~pWu)1ou~`>=!YTF`S+Jq{2nv3 z>5oH0D$RFFx=7QPEfs$zmI{BU1Gw~fD^xq;AQmqZY;Thx zm1d=A6Ck-kM50IdFC9OW>JDQ}2c$+iB&D&W*IWKLIIm#!MZ@|RsFp0Nz^)13P=6X` z0HI&FB*k$5ticdc?x(w47S%B6`)vkHKK^812SoYYi$1yG=+PVS*J6o8>?D7F=vodY zsGKDul2?64OLs`A?5Qb(#SuhxqUQvs|SPD?9XwzbC?|rvS!%rA!;=fd=jvuQ-iWs7!)rkk|eun6)>5d ze+)^2s5Pk}&_!Xlf`>0UR9|WpE_p0 zNI^q?Cyn-xVKrPHoio5p_pq#UP-y8bJC}q8Ii(1Y2Y0bz*t2-gp2f`-!mqxo*@;QHR zC3ynkqh6;u8eu;LX{6${3@ceHX4zE~W4!i;sdFAFk>8|H?2QnS^N0n z$?GbH$hc_?Z7YcRym`EQq+D7Q@Z|e+GK!hTYR7IKo;!*<#Bx+UygVh@Dn)v{fMYiR z6!+-k$FK0U)D)@!w!ao8&mqxK(UYPV{unBOu3@$L2Xlh(S7B95@ z5K4_)|JbSisBrz0ThTQ7Ui!Yij@|T2EdPW20sa;$6?bXQ)ZWhB%P*eL+&m$fSU1KIH?aoIxZ9c} z7KK>qw&cxS&)nKxHx+*(OWq`xe<@opI|rQ%o78OCB~d{%8jD4v1=N=Tw>v;SXvgI( z>PLZZ!g?q9_B@B(6Pn-Exe&@?^8D7k5{G@=f!mfZzpZF;IOD5h$G-mhM9m}(GD9x) zXRMeWGIs2cA!B8dUu6vQc=PxP_4GZnbx`Ob`q6Jb(Ze1Msrw4 zwrezJsZ?Vxrqi(~yf96%nA;PJd5)Hr3hxkc_wr|`&QuH_s@4xXaZbrH6fE342?#3g zWTJ&2`U3?%it&_SXF_$sXk{Q#-yJy`Wtj4{8~gOd%8=e@GcdkR@rp7 zLf%iRxAouj^2;|}wr+fxCm#1Ml%QeFTxT@u%=JNRMaE|{S29;h?!|AcyKKuq^@!8i z6&>E7P~^@a<=4L38g0U*yC`b>G+$KPGudUAU1+{G{=jA-HPRBns{rOh)X}l)DmV$hE`Lp zqpB{xfGNS`9U15eMR!9uA*&4stVoI!=Mi`tcIIbKN^w7fbR{H`SQ)aiZ_`Ws_YU04 zANS|>?jn-{I)Z{2ba|IP#V691h$$vvCqZs8aP>D;QpAc{E z^rqA5w8|$xJ!>a__|f`EJU4#s_PH~9b}d65l@Tpvbu0VHE#!%zhhJ}>&fSUN^6VN}Ht+m;jg z1d9T@R}=sy9;CJ{c#mWwe;Pl=yhcv>`5m8M0taBns$XX7bMAk;`Bu{Xo8R1W7k}_Y zG!e_0H@`~-(W>!*foTg)`sAVo+q@p6*``MtWhZ)XKEa={^>2FC&zyS0-yXe> z47_zKzwPH`qoux6R&Ua%$)is`LTdJ2{i8?u*RDCTP0xI;-F0`<_#vlj@x9VNMNXW4 z`n&69#%Ct#28OcLQ^y{S*3Q1sZk=|`<#1289W^LFls}KvU93I{JPoFuAgToBh_H@I zDauzsmSIMrZWLvm`L^Jgebj0%Bu!I_QMW&_rLjvd=5sYcaydj1}8mG*h5NHwl1u+22x8$r#Civ48Fi#o_}iVNX=#+e?>@g%O-MN_^0X$qiu$+rzdf+ z-{2qB#b7jd@Q0N5D5eX@plDZ2jIL1ofj$}h(n2L=fI)~s5Ee9mA1Zo8^O3G_U*Uf|?H9nN{{!Hcgudba9v`CP#+TZ0}c zG^gcJuoHlRMtxJvgSm==lg5D4dNH9$6x|B1B$dmki{70MJEv{4d zZR~Ar&0TY}wURq@8q}`QvL=b- zCJC9{xzVOSB9$y9O4;FGh3tN!wYyQZj(Uw-xsoQRaQP1@-TlP*M9a&yjr8* zcha(pydK2(ZskwiN`AoqoGa(1FHet9syINVh#@n7~+hA0Y#fLJ_^ZvQjGvMOYU~$U>V?F}F^uo^sQyw||RE0wpw#Y#8 zY|=!#i=nvrd&TNi@ivXR6aa%uQm&%%to-V#-}Y3cE|ANt zma&Unp6a@L$XD+NWa|4~KV#lg3+G++&4}D;h+8D@Xw6dcj9Fz?8~NYy4`0_SJ?QnB zy&hCNAe&KB&*ycbl~i`qpecK$Q!3SKOflJKC))TAzHf1Tqj72+33x3`%}ZZE!;)_f zIOVcyA74ALa`Winc|kAB+^N%B(8`J|3!t>kQU3ln-<9qTInn(sY$0<)E|)bJvOL_W z*XWu%3`TQf0PzO%p3)+XWFp^<#su<tOQk@SZ3-ZiJ`)rc+JuxsN2oU~hE!1?!FNpG zC~xNIQEU{52YF8Y(UiZ7>xilW{#AZ0f9w0XFZdfrn$Ew}tWuq_>kD$@#~+g$Kfhr6 zW2ZHY@asd4>|N6yB29s#d6twD|&V{ zj?eO_7-`hJP{WxV)7rfqp&nP+&9C+GyQy zafKh=OkoDc2wjP(L-T8a;RY~yfki^S!&ELv%;rEqVh)%|0493?m7rwU2Ezp_C~#=y zujR5TU+x|LuUxq$0@jgx+gGI_M*goFBq@c7K@+i*bHvDmd{yxJetzVp8+n%3eEl`~ z`{CRsQh!74p&R+1lcpPQlI-SxbtCi1jW_*r_mU+SU9@D$ZsJg(^n3-MjhfBm*@}S8 zOb(GlW(&ff51GwTK3x$|BaAvtZsorwDt;lqfZWEk5>;-5sK%dwQ}Q4C_`~E5x{+yA&z>dFsXhEplxq2XSg@|728kd6=@UWHi|#fm-T)dgO~-~fwiwQ$`)o<2x}_pF z=In_^r#N%tl`WmsraBKJRhvxcd+w<-Rd?ppIK)?mtc(N&%jBW4@3&kM=i?5i(?MQO zT-qdE+16)p@)4DayyZ0X#t9jmUlB9J3?MP8#3$l ziQd+zr^>^zO{6OH~z6&UQ zw(OGfd8GZL;eMOVd^S0C+qwMtn~Zi_@92Jh-TJ+YE+8i!^!aKmW_yF{$ips~%oi?4 zVLfLSvBhfsCAhl(2v>I$v+7$;ZU83$I?*I9>aV5inw0kgT6m{UZp3~}dbySGUU$Ob zOug4+o7pD$+FhzrA7J$@@!YMZkWPA$)@MA@ov7EduLXK4BwyQ582FFWMNPk^NxrFpw1QlpymKGI0jHIm@a?;& z9?gRDnLY+B8CrORqT8lMI`RZ2ACPW-t8swxhgEI3)kDIq&bH{jImDU}`<>QVe&`G= z>N@ghu#tPt+f^m`+8Q$Sy5rXTb4NT6t85xcmr0qsRgHRA%tDR6@X#HCUw$L)(MrAg zW>)@$tu1oo(e}j-?gf9P_hq|R&^0If#3*aLQvULf^=`A5TVD}(A!f1{T6SN@5Crf zp^#Cu4xlZ=9gMhx{M*Dkoter9zT}6;KH+EchsViX-{i=ne>r?ty1xFrWK(0^L08s; z>NyA98IK1&X{!uwJ?mugU+rc)n&RX!w<9DYzD zL7KEmjvQ{;;c;h3Hr?pc9=Y3*^+_eZtYeIy_uPi{hvo|88UVR=Ve!!ce@+aHGlfj) zPE;G71rQ~2siqy}lyAv@fD{Z4i($T!5Xe;qPec-KFi2k^6dd-J5ZO+^XgLKksYoCe zn0#?b65#Mzd}TloE}%SQG>&gE+8z4w%?5+yGQGoLxQ8$s!~AcVvo`Td`Bx-gZyNuQ z|2NszjbrPXLp;AROe%6aHj#a#VqCRRVkZ?xF4-)-@~iCOud-RWRstk*#_>&hz2tJe z!)}yZW-yFx=Kl@ z^R&js49uYuN*2Rz_>cQ9(((%T!BhM&%FjGm@-!MLKZ`OnFO|H4hRTzaqP&x;D3nx4 z_TdnMt!LnR$0q(~pebkYKgQ8l2A#|cFM7Q2Dx6n%FPvX^FMOx)Uie@0r52j(Zo!(cnNooymb@?iorC=RO=B9zt5#(9rPk&f(#~SMahA4-TfVXBT$A zGBhOY$x1tAl5_d%( zfIi=U;Q<(W?sw1!y?<&-j>!KZeF`($1FezP3@sOCbOEJQjS^r=V~VNnMCg=7H_w2N zX=MUv%N?jfvzfsR}UAfL^oOz^-e|4C@Z>TmAe{d*w z|InGkWd3kXdBsD6%-kX7Yud4d??H7Y1$kU&0MjN<;u8E|Bp&Bq9~`WW#~&OXCNB>U zS5;O%Fg#r1#El;oZrp}YcrUE1C@Mcy!q!Sd+_ga9^2RpfWH{*MU!XZyyMURuJ7R*6J$bzK9NfFai#6)bbTv*GvA=C3WkWTupQLsRXUIwd02cuelu8~ zPCtU*u;TaV_FQaVQD2{>pF`P9eK?-Q@0DfF&&IPmVALpfT~yC0xLjk z&tE!CxY>t`ZdO}j$V-L*JV$YwT?&(tgnW!KB+G;(nG{~e$ro67NWuxPGRDaLUb$xM z`SFXZs|Po4mfwv#-n^OKtN6dVZBzbUaj@m^HhI&)DL*7zQz`!T{rhu>EPZu+_I_q; zta`sRc;x2(WwPF}U+tIvdHl2elBapw10^cv88TC`O8PetR$ihmVWG&-DdB-KQ=*bk zVx{1+GA1RUqxiBW12JnLnPQ${MwwCmvZ0}x;bBawbBBk|85%lgc$j&=dicLpzHdC$i+T{X)l2r__UIpDCK%18f8CH^31p5su&tFTpVXb zZ2`exgqfK!CXJcm*s0=$Q>Ne>z}dyFg5q{q#ruo5O_RTIObBK1Vr=nsihozNoPRxc z6pj1d;7j<|Nt`J`8?+ddy!cxFZ+sH3ujpTdFTaSMbvpVtch;*~c13sh@|x0{F8)P= zHed>RBsWD|$R%~UA}>S(Zf7jw953ZO=yK$Yjh|K;_Hd=xgLXjZ*W?aujXGW7u*(_k zFAaGdIG>zd>Trk95-CD{NUukakqd>dgE|=1A`QU*zue`RRRH6A}ESTVzwnZI6fCt|(a)jw6IU#~#CVBJT$14JT{> z&;KPDakAQDV8j&;nCikZy{!%87BekTbfS)5@9*YG)C^~*i*y$EfA^me6 z72}VKsB-gf3Lpx4k=mSxPRSdBVSn z#wSNf6p$|=PzZppH$?0R=>JN<)R)2yYsvD9UXymOSkYZGU}eU@?C&l`M=cEe_&M5ym3DF;Kf^XuE$}5Q8-T?GypwF{ zMKCF@H0t7a(HUI|Y>baZ1-|6wLHuyk;W?f!h23C14o@&{cSMW$((YKu%au;>C5ohf zg)e;x?(`3|eg6$%ueMj-R9dsVyLQ0F92u{r+%D{PpkEN(iV7Tw4nDZybs+#M6n23l zQQqPX2^`5C0Y^gNPvLCJk$`KrGekL(fWu4khYrN-BKky*goe1sa-^it`)D@eOg5I# zYD`y_+*ERJ$&YBA^5X)Aj~8q*8O_ia`3040RLlwf(L9?f<`Y7}(mZQ9hOhiHepAe! z{)^{G^9!HUef!@KZR5|7E#OM#3djsdw|MO@V<0~T3t1Kw*~i$KBHwXH-=y2;6z;mS`_rHXtA{i%t z4e6SyZi;yQp;p7gF$swIQ;i#KOUv>dNl5TJ+AvR|Q zy(=ogq%C>5H`ben6giWEJ^}=infcRO3%CWUe$HP4b*U0tJ~%hTCD8vmXOfu}5l#gav!9mwbkzYq<2?Xj3W2cZ`TJG`Os?Sh<(IdXL7(^Ixmt%~Ag5E2aI;~}S0 zW)*cnM|yx>k-o_8n4p-r$mvuop)bg{<5ar5wxmWv8rGWlIv(A+_!<;!(|{{!=kk}0 zkS$B6Od_eKUtpv3CPb)^O%ce@um>nu5Ts%{;%*N?(uLg~WI934dT=6Nmp_}oEQ~2Y zkMnCFjXi;Yx4@Ym^1Gcl;t9SVih=otT>^&wZ>Zd2N{zx8qNNrDrJIQgt2H*dSW~PM z{r??A3TR0*G2%rm%&m>r)Br)XfRRB|TOHA25YgF9K}0!{%M}_w9n;tug=+FoKm@gS zJLM6jcCfZd5M2mnVTbOHg8&faYX6iS6@%z&K%1Ehx&aV!~?A~C=a|u0! z*8xC28g_X???G*%aq-8=H=1(x-!t6uiCGf#M=GGGNU{_gQ2&c4dKLQXoBSIv->Bh+ zPCJ6(R=Or{2>lHT$5*0hB>o*=6RTY{<+Y1m)#$3@H)To#el8BB^=6nabcCWWLc?&x zqN3#%2QtP_gp{uO3a+gPHy~zdd5tRxEAo?##wEquSoxXcUas;cJuG41h)YNoeJ48-47Og%bYQ~s-^q@+IAuo# z9lHoc7ea5Pre6^|I&ch%;O*jH0XuqIWIp+OE^>Tb4R+MMqBee0xqU*OLH`ms;dk<( zwlIE?f{OnT^6WeK5Y?ghRf3*Y#D^#kqTH*95B(I5$d!3MRE70p+KX&a$-0tLP;vb- zR9wHa@AEL~JN;GK~I6qaM__x5L?D3d$9F9k5Bjpz9+w`Qk z-4i&eV5afIRK<$ItN9&@5xw^}{uzX>vVsw8z zi+&0dtpKF|v0@}zX_5>FiB<%Se?_AGf9^e6Da_@!__r~me@h~$pqe11_!~PGN&*|o zD@Fp8x|g$>S1#H^kwih_3rH3fhW$`H?6E@@<=_qoz^M`)KP~3>I7?%Waf-31-5x)w zfVu$~VjT1)QHL(&95M1>$mxQJb4ADnd5n?mai~pHP7un9-XOK;j)dUak>tPTge+Ei{vW`IE{yz6(63(;c@ba)QSr}VbdE2y z9P1+Js@c)j2igpIulrd;gKAFJ0E|Z?M|i{bfxKSGGDnH2ZO4yJ|4ESfw(1 zuC6XKBXOkB>28V`GA386F_w7TTTyX&dG!zK%g^YD&P_buKV|03xpRBFs?o4oM~uIF zzq+}5%G3w>NBMiN>8K$&27QqaEu9@rG-m3uC-EYutl{2c4yvxiqM_3ee zMSph9^d+fmCf;dp2*lI%$z}1n!86F-zbpDGQU64TRXSmGbe;OJc|@T>@`frDsBZ1<(#X_0hLx&a#ww2WXx$un zUGC&Rm+DEMIcr=XMgOv=Iphavowv)S${u&6mADv*R3Z6LW77Vyy1ZTCHY9Zljn1ue zXyi(Rk#)E%sS{XpsU?nnIC2@+o?_S8bVRuv- zE%?FBKDEzsoh_)gx*Ft5d`2r9U89wG2UF*^$y8@Z+OjpDN+s%jmWYyUl*pz0ei|}K z`lK3upVn{LPY`V_Ic$yEzA5WCmo*y{QYHPTcPllPibP{=Kq1i@6gu1rBb_xZEFl(; z2BW}u9c7yk_qg@9;Tq1{8!?*rOO5FFXRb~^QFWEVjnJzM zf(4%?K~%2m!7Kp?fikFNr@-ua>M8Iyq9Ti@Sx)9hzH*nL)pf3yC*HuR&2S z#`NRyCXeDAdWUpjnogbk7w8NYuPe-D;Ro_t!VTCsI%xz#(67@vWRxi41@Qd%Zg}Ey z;w>Z7RjyRVO6!8i7t#9dI2(1Zv>nx&dXKLZO?nXiG%Mq+8dOB2Z>uc2QAv}=RO%UR z=x#HxF^55;cY7>muF>NumzkwrW3)Wv&(sI{I($P-AvYW3LQ{iDm$DY|#ZpOWLah&| zm4UTQkv2;VwG`7ne-LTG)5a@ZF-gZHy;>45m@O)))!+%0$>kDt$l`Tv?_uPIgkHxwGy@uhZfR9U zl?#mqkmyPjZl_&S8EAb)_z_ zpvqI%+?OZoR&N#lx^}t4@i&3v!iNr8KL}9=FzDL)8<7Od~VvFdcW) zp+lD5s+7xWPu4O%y;kqBnhh672&>X6jjWR>(08)zRJ4dxR;i8lH98{%&zbzI%6sK+ zBaHi;l1riM{sMVx$SR@8Lvk-f3Y|W}6f&SlB_KvojTB)eO&Vd4{u>iOIx)#OoJ>o` zmskxZ6OHdi1C)%=o<@NGL?uiw&16HT?Nn4iyErk@N!l?DgGCXWq*5V5XFwIY{fZ}` zeKdu+kD*r<<`)EigGOhK*DWYp^WHf=Z_8+Tc=W-6@Zg&|sYWwEW|M1Q57#>*?s7}1 zY0I`MsiU{v%hfjrLL+8dwN`JI$|P0;=Z~X_sN#%A8{guetBdrkJL?kSJrku6WNKR# z*L-qgB0hb3oK@?T+EokAbS;lnoDkP_m~V5JM_I*|Wo53xo|f);T9xVgCPS){zrn6{ zMQzS%m1A$qU}MnjJUM$-ztN}ciutT@pQqWYzhz5Z#fAMIx6+E<^)d#WT&mTq3ykhx zh%oJ&QEQITsPrl@L0$K{%`4}ewBm98-N8LeNdq}`Eviy`gKn1*J?Jf7lirirxS8Mi zHM#}7#Xoy_moLWZ88BZY%gA5Y(-im1rX)L%D5SRd8|I#1;OusTlS7ken~`WWo<_`~ zS<;@m){PdW)#WzFE#}z?yIgmFd(A9H%4p5sk0OoCaP&MCC!d{9k!V2?f&qk%os9K$ zq)5nVN#k&(3=O_%pqR-RiNyes3{r`)gih0dG1A~xB1aP#r7(yb7YyPI&J4C_q&dV2 z6b=Vi3SXd*!gurq?=q_LQ}{t6K>W3eTQ>Vc{=7fk#{c`%Ji05En{P9{I}= z$CjvmsNuuv#87B7X^@#zdds?L4f8BonZ;mpHpRnD+V{(6mi90FsB~6&<*ewR2NTuF z=^?w!s4`e(TFbnKdYmj}EJl?{)=?X6)Ydc(vyqd?pZVWyIc=o0eXuWc{yXPEPrDN- zvS0A%2L2bdtYt^?4T+ThoQvwoS4y=*=bDvtgT=61S;%kP&+z*U3^|G6FGc^e^{D^G z-^)gACDrGXAL#iF?K=r2}*Q!@^HoqL38u0a&m7P4jXPrUH*z{IS zZ$oyf#;SGPHNCBE`oGbdhjkhh3&K9VW!uo`Np{`C!KtzGUSDftlR~qb%nh!uHKOWG zuyJQ%f!Qck#O&KY{q$zCi&;Us`6qJM^QTwDm#u3EH&&LBAEET}ysW4CPW@^IO<&7T zN&i&Jd}9e}ne3t|ehdlAnUsxsdq7wMD=o$(I-Ktm+Gah==q& zesEV)ytK6?o@hLclI2onJxTM2a#!+On5OQY+S=ZpI{s68ZAa3BZhEXnEo-ko%}Yjm zx|4N1y|u?YLkNOn+VKcPFHCxy$H7K;8a#~fB&ao;Mwxj5ss#kn^Xd1#>xmA(Ma)J! zSLqDbXi=(nep}ldNn3lyf7aA}Pb88}#CYeOf9qzH^QJ|5OmM%zHLSPa`DHhwnl}*cv}siwtJf%X39cs1*o>@^ zT#+r+gUg^oR3z#T<_WY8Cg`)dtNAUG4N^HUU*$`pg0~(y23B8n%ELdJv90QCy!@FT zFMLPexY%oPP+r~M>Yu(}{L>L;JvQp`4(HmQ=e*jf;BL;3m_Nct5Lc?(8HQlv+rio&tD znWRN-s>NsvZ=5#!OqX73GFl$lz3t8^hIzv_Rv`_{zAR!jDB|W?x3|qBmul@ZavWt3 zl!F}4Ils1LnncDB+;Xa)_$usfqlIYwv+>Y2Fi=$2ASTq1O-|_*ILR>&oIovVWSbr< zut>3hIZoy?&&qnY)IgX#j536?*zl3f-!`g zswu~#s#>&vUM`N-HmMDy7I2E>W7;sUI z4vEZ@LIXb0qIrR#&OOS{q5}={`VR8+@5$qDkKVn!JKD!dT#KiSE^i9hb&l7F^vIF5 zefH~Db=1UK(QB}?x~`phqGzq8{lcb?cLK|g4xFgTQV}7Hu)y*1hv>KqKA!?l0Bt5`{-n6%kG5R3vVM7V z4r_6;BatPUPFYV)dg}Pn!HZUAmquo>X1hUdYX~jObV_?`vOUadi{7MO^T@VPmyfkA z@7ezBndajqBy+Y_gVGm_l{K1Wj3kliuC39i3wvNSN#YG%b^JokFuhrAF@!X&CvXOV z3HoXBN(m!04jV-5azlro3r>$ zx78XwDcZ8b#l>?=@wAyK9hWWdJFC&9H`$Cbfe4b7OCqybvrRuQy0W1T=cIePFJE)| zaZ+$m>p;2LN&*#V@ck*CHH9*kXn4YyS(BMTFVl=j3j-VkAVp8DPfe>1n2)YkS>%h5 z8y3JMpsQ??lR#v6v>6sKogZ#t21uh}afGFU+&5F-c45fV#a*(Me+WTYnoSK26&4-| z=_9`&zp3T#W;SNxwaGeD?s5k|BW>i?5c^h*-ue-fDl6-#OGKN;|6U!dm|B&n=q08hVG@_dVtvwVHy8>L9VRWVv2n)z+7(?9A+_ zA4tcuLvZG>ram-Y|ZnPGN>PBwNhK>FKyB(mCzcO zGT-~b?4}JntOjG^?5PbWMh$xFS2n#_t=3k1V=WHIJtFbXYiU_Q&RiGWXAh6wd1>|wk~P8`rMI$5tu27UKOAeH&3`Zy==HGHXk%Hxswb;x<8E`f+@_aF zRF&xt$_kZ7FG0(cW;k*apb*x|lngDsViBbur3bt}rf`7dSA8J#^D0jv5WZkUP^ySC zMIce0J~%oFk(BJ;7zas7B?WLK>b&ofUBsH>+2viB!6=8=GJ`WPpE@t^uOIL@W;sI( zoW?+T!k6{f2CR)U!mb`Xq0DIyRm`n4vE1Idm2>3H(3KDI&-q(@6ff~*6%D(u?SF|3 z-!b+|-KsQeA+SC57sL3*7ivTxwn;C^1Ekg{xnN}Z?JGVj;cm$e2 zt5!A>#YhoTQSRTK5Jy*W&CfKUb1AQXSsxt5|hSl znBUyB_B2wGdv01)SF!hb^0kDlnzdn7r_LZ#%M`4>yMIMb+YLLI_R+>zdL)xLi!tfU&WugZS{NDa z_v5!1O@wd(75@)Ek!sZCW{7sVQfFhGN}XIQ$Kswur-1{&9_BR0_BeQ0k>7^e`77aG zMalYOIeziL#PX(nUhYo}AFaNl5Gr$0+GA-huha-jd{_??w127%Q;(@AHNw;_Bl1aH zCM6EG@I66A6i2T}T2AW8hd=F1W#{(PHOwTD;cVR~Oh;2$WjN#HjAlaCnw3gdU71nk zXVLpd8`0WEIExmm2&{(Fa(1W2rZKVP;vTRXt3jWQCz~~Pqlr;j7G=q<6&<~cmUnb5 zKVFMw{?s!wThlY6;V~JO2eF#P+Nwe|4i@Wrf77AEmObiL+BK@M*W}STwF!mVlX6*o z1l7Z38d`GMFHtL}2F)6chUID+lxT<}RT{#LI~Fc$?^(R08~moAYdeJ$j%7t0`MXqY z%#2B*(WZj}xmu*PlvRJHx=krtq;ZQ1cR>rARJVUmo_H%#ne*zD^7`&8K32+dQ8126gwtlfKJSwZbi%_beYDg>phNa;SrpL=B8 z%NJLZ!&+UtO3JEOPGi{Ck(pLDn;mGDOB5#3)i|0?WM|arZE1&zHN!C+u}X+dPnB<- zjvDKeG|Hfn%Srz+jcj=x41A=(D8NWWrXn`d3xS4|W98XQfn#9Nm~w0|b`hO&s~78O z^aITM4Aot!=5maYB2w+h53TI9D`A|dH>E5}t--3V3p=wOgOypSVwDV6ww}+W^+2L7 z5$;~mTsiR0CauO6jH54x&TKQ3zwcsndsgbu;wgd7hLnmw`0`lOr9ef=8Y>et8&#I| zF}=)cFss9Usm!A1`?N}xM%os@I+2QX7}TeCCwpH*?KCpg3ZP+_h6<)9ba4(mhkV zl);)w|`)uw|Q#{d19*+j%R`)H?^g>f2g8I|NTlxL#UPzJYncktav)t%385Bl- zx}`T#u}G-_HMLx%y`j=?bX_BhOu<&V3=gyM5z3F)4jcnc2bfb+sTCdVNp~mCBi?=6q z`B=zf$ zY@9oTv)z8Wb>ACj?IXeYt|vbz9bCPB`i!$K*gJA?^S0p`n@^rTxNXyn;ZwGaklSiU zuUch^u%7tJYi3rzZ<$_Uw|T6Qg;tHeY+iY&26e|Jta(-yS5+?8WV_)v(&)-6%jcFG zU@yoqg#z+>FokMiGA}}%f`6hZhLfZq?2*o`3& zr&mgjH5$}Al!28CW<{dja|!w~t-XW4NP|^pOQ11^!tZdUDYT}+>gE&3AD&%4_YRIV zMAtO+pBI5Q>}B{j?CoWttQ{W|TFY>C<#5}QGc7Eeh({dqd-$hvKj8093`VIEnOIht z`QA+<50Zv=2FFt-1;AMAL$y&>t48(C6#wS3p#W?1mp6xPdh)1}MP#_kV=-vdT5HVH z{o56&C+w>0Z**b$u}9BWIpqsrX_Z5(L>Lwp>7+D_6$Ul)o$ExDHhGGrgxZhvlgOG; zqDoK}>6&Za*9t$ZKte@OEWjyzJMV{7IeTZLKgywBPWv6d=kWX$hp{eaC3Z6Qx7;bK zubFxFn>M2@eZ`E?i=!%;QDd--|J2#wuB%mP6%svn)CTveRj1LojOF&>UXGRY4)j=9 zmDF~$#GzBRw7MJ6MOVl5G6DXJgL@jZy{DZotokFrRNfpQDZ?Ei^8I{W5s56%;+(h$@x(uR{P+zG&>xa>sKE=Np+=s z1B!L^!cVgmYizd&b6rZOx=>rOMUHB4URl&DMPCBym(E}@EDyU0jUkh_*@`(f+?L{G zFD$u1rw|EGNi7f+++92f=9i~3y>^W17h=0*yE2v~nSm`jjZwFLBvs{1`i3{& zAWh5&IQ<6sBZ=h>sZ&-x7mT3xmx?Y4!<)nBOyhB(Q8?B8s9oC zyNFAlKcLmLrmL#A45U+4>4A+*>HdorH_shS-hP^`kH~LZdiSN<+-om9sf(-Xm@)Sx z!~BgaPF~RX{2XWJ`b@TemZiFV!+^cgYRowLH@l6-P5lysS)2A)d&}Dv%WV4j3IRYDJ-^ zDP<&-Yh#!4@v_`g{1Sd~?rAvx%NaeHN)C`mJ#|Melqv_VDh-ChMoFokiCu!wh{Wuw zL>yq}M(P;JDZ_VM*x*-(PS1oo5WWB4P^*-&8w{qd#Wv{OuPiCu_va69^fmiP6_X%I z^yGSL$~70In8#$d4)|)L9n*Rf8a^=U)!QlpI;?njEzi_1N&DJ~D}YWSq^Tv7Co?3Y zaCMP<%0C2i_w$SR1-bpuVo;=$^!MF9f^|nM&RAJB6#Fs@tI=p3^i9Qf_b10M`pdPK zJ~KMXRdBu*SJqwc?_Wgz9xX3-PqP}575w9=czI_gUfw?bVQs9UuQFcMM=o9&z3r1v zZVUB9NC(qTdiA#P4>SO?L@PI{a&z5c?AE) z%)N7~d;!j;S5}l&&5K)D?rV!tZ|NJd>9wn;Fng9x>$suZbXL4_csO1=y&-u@$J(Xx zLH?Jy7va`u@!#_4r?*T5TLe)9xjxW#Ode?@R=J6ZdhB9x* zrGtZOH}_W$nHcS=L0^+jrI3TesF{ejyq$BHjgnP#hgK8uB$^x!y=}!DUjs{*J?{A8 z<6Fg54`!Fd@GJh@-0ELnS#{p%^z)y%dSl<}D(3nSu)|!qp?}Jva`L)V!c@<(s1-7l zcjHu+>L!kVMaE)VX{~6svA&IF>KyeSJr_{{qbR0D`=)iFB^Z1Kf=3K11MrJV;RdC` zmWpL88Wc|@^CVL$vdHq{A|lMdP=4_+1YeGze2V@aI#bcH>?=Ns#=a-y9$@6;Gx8ZD z=P%*^{lNSD^JkquC5Eolq;u@wO-k-cOsa-jO=4a&ih}c&noDN}Bv?ydFnhrWC)Kj{ z+HiaORE|ZjukPEi_QAkh8;w|^rn??r|Jm)#HOxJ^1-WJF1y|0iu193`thth3&$)j% z(YA7P?goN2ARB9q^gGlD_z0NdGOvTRO~H!41r>|nqtkjfdG#oyjNYcSCEz5+Qm9(D z2K}HeEx8rF3?>=bw~$6DhNnMv28sStGkO~2$waABSaOb19+hWP6mmjzORV<($btV%ugjEG%ft8u z`i$r(v$-zVR9DI$7zpZ=>pb4Fbf-rUTGXCC3svv}C@9y2QJ);^Vqs2K#3qCcw6lqA zn3_elb0#l;pl$&XF*+-#6rzzzvvUt0V@r?SKUCjx=EkA+E{P|@8kg6mq80oKCfRBV z*QOJZc6L0Ms;r!9uv8(9)1p)&Dr{vbRp>)K(fl$(-j4R1e$S%5&6^gf?_}u>j~4Iv(uDUo4doTAL{5v{K((al0Khrj0Wt`5DQG}e zI4v*(5KUZwYaNtQg$QT`zDs{3jG!cL@;E}nV6zscf+8lK#B4nC=4ytaJs#gAZ}shH zR&MTHGBvX#^2-JpbAW$&FS-P&sxKUME^HWF)H==LMvH}T%Z3`s)eCEDy!GZOdol_l z^)_A5&oGG-PEGVD>PrJn**SE^h9&QuW%b6Vv?VTJbm|dP#Pq#Oj&7YbRK9k{ zEj#B9-jn&z(&?#OP&Itc>e~)>4Ycm=n!RMvi>IS!?4G~P{`WMq*&tCP=2O!+wZASI z@$@k=oz?DK$jFR_os7D}9!h0;r#5P23{gu=YStiQ7Rt-bT4)pnwkJjnpoaf{95sN^ z2~F@j>Y=0!YAC1smK#$Po&$9>Ff+1rTl1nD#y_0bKXdQG-uV|vZmtMNo694mjZ{#se+uyd3a}8w2Ry)RCR+qq zN#g^~S(QFU$9C(<1Qr z3%^5!5?6`}YB(NF=tP4jr(iBTJP?W_8EEl9ytp4+IL|2ZX0M=QnDZG*LiRvXJRyI4 z!UO>QWY0jkuZ7O_IKLR`{^N;}zmOe-@ZOG^^sKJh?8xS8A}DJH7xXHEPUKb4?^sO;lKGLQ9R_L}npr zUeejKctv~rGI^N)b?z1Zd4G%V<_|x-xpY~1`cLP-N(R0+c6~$7Xk$(9C>d7D;F;5C zjqVyOV5m_2pL<*;;dEgAS`?c#=@gvOVA1HkIz1vAQJ+>SfyY?m^@Uju>W;=7tUtJ9 zSzFhlB^@YoUx1t!x?I6^E|bH*OCuM)BT|K8PSnuA61+l`A49ne5V92eH6c0=>6V%60O6-AKgO%x&6gT#`{ko`SRv)YDQwIt$Iin-`#NW zQf3t+shwdpyBrPHeuvg*F3V_pK1gz`%J5!K4b*8fZA@7p54ACC;piQ~dgi%EN&&x3 z3lSYgxm1DDK_cAHfH^V? zM<9cP^wLsP>KDEtw=y$*PU)@WSyJ*YdGd`k|G~@rL4Nm{CzRJ22W4s|T^Xw^_4ir~ z{PbYFK2gzE+m|~{sm@}{#x=%Smqe>jM^wt65bG!-k=OA*Ke7R?aAz$W_pomH2gfKa z^@?nZgn6F?Iz|l7lIn#DztmK_I`CJbdyYSc|2jR-baB%OaWAXsLu6sHtG#kySJTE+ z;(B9OqNa1G(OADhAGB0=4K>CCXn{1I2$%0q1ocx>R%`j{cbG3r=O&t;-PLwkN7bg* z9D6|}RZJ8LZFFjw$le{;*R4|NxBq#@aMjJLd@B-YGkDQarRqlc!-&>A2@T1wMM~Y% zCC``qw&bmnKb3rju@PLXArLv==!*8dl1piChFCNlQv}8A1Bz{Yp!CjU>L5L7uqr+z zlbm3s&`m7D;se#9MHQUNPz(iKmleb*r=WtD##Kqu_ zNoO3wNr4s0&wz&%om|u>PWujEn#Jg^QMCrvP$q?|%Tjrd1vSSN3M<60>NM8qHS9&L z|5J22n*j|;7xG&EnV##LvcKft>^w`VFxm~eQ#4xXm1dWTy^fIY>n(PZ>}s~TMxh%W zoJq8OBh`y*YnIg1pTrMN8`UUkT6EXQ^$xS)dxVVtd#FDaor>yjFvFvhq2M= zAIyeZ3@WYBXgjlG>YCc|PgFLe3F;k+G!Lp-sZC?FlBP9Xb#pjLdZZ-Xoi*N1Tb)ZYLK=Fu#pXbV-P!!D!+*ywkah_I*Tcb|0dOKrdjht=zE&bU8OaAcrA7-6E`ww^SeBq;y zmOfbDf6H{675tX5*VWvn@euxpGMNNUoB^d@MVg@|8|+;fmtL*68v?y;(>l}!yuAZ|GerN-A(MhpKBbI`S)HA|=LCT7;w zv;+#kh2be`p!H&)^#-7IDVk!QQ?dt*Z65_ze_!&KlCMgR7NPZ6u)-Bt1k~6hP>~5b z0aGA&0;O0e24~0vvk0P?3^M9G7x^=_T4chCGVo*dO40g_2KgJ|2&2mvXnFzZsVG4R zfq;5Wu+AqTii4D&MX0pJ-hvzu+f(DxbfySKKClYSL_o5DX&@Oe7VjaFS1$ox5xZwF z>ghTir&mg+P0M|R)@(n@{jqzQh}$(XMtZfyWoGYC$R5@kZASUyp*Fd0WOz0)cML9G zx@2knLSmRcII5GkhHjJV?Iy$ZGUeePsbr3(d+QR(2B++7ZKCe>P6y+$`lGp1yn$$c zm{rL6ABCsJtj3TjY~wFBD5lhAhS2cMqA_xkwpFd^6RfOE>eNYRA(YmpV*Dr6_pM3^ z!1oQ+N16=$i!pDrnX9q8bh_LX{((}7T;lKZH|T>lGny#%&)8+QmtWlNnWB^N4?++V4N_D-Mknk6 zIa+7xrDQ9z#j4U)PQ%(+rCdF?t_&7DE62|Q>#QbN**bvO2C2c`nRV%N=cp78{R>Jd zdE64ve8HGBw?3w{8T5hmyX9K7UHMqed1&QD5q-V<5%6EJ^yWX~xYSzt@3CBfxdn=> zz+}MzC-^PGWk{VEAttDKbm@YkDzEbi07Kpj&{>>7QRIV4Qp9--7fH106XDTdmD0_f z{IB_4cTKsfGjoW4sVUDcPyIiXy$4_$ybXxu!_Pik z-KfzNzjgRue;jxq-Ew@g8M145!t8QK1t{u;`L~=-tweE-)@sy_e632h=I~Bp+00or zbu(wwpO{f!Hv?~qn6&|B>%pbBEAM)|82uhyii7_X#1n9LPLG1B8W&Hv799eoSSUbf zXxwgD##Wo)p~*=R5HsSMU;^f4IlEM{xWHrr zGLX>yjKZe*sf6x#wg~C3J{Bssz9Pw&`}8)&eMXyJeU{x}vhh zZJYj+d2=3|wsafP^;On&)!B{L%5)~Z=5~T*gg>FKR8g}6wWrgav&%~St50ao{YwAL zVDr{VnRExCXe!KfBB};C%~~79sU}qv)`!eydrP9G+X~f#M^;q!TP;(o7wp4nFlwXD zXN&RG5(%{IKYDn7gM|HZK4J5Ss?KSR+sq&*G`w-jvF?*-#_Cz!^J;t%gT`jJ*}CT* zSh)Nm;okc@j&<`qtV#%rwS-5Vj4auiiPG}2uAa)~6J;gp=#KVjm-{Lo>8+UDT}~>a z<=qJwd{8epI*mG$J83WU1uLd{bTvEu27`ZzNEdRTiyy(Hm%Y%k84qUv9vkc1YU_LH zLa)b~LedxbRc`=I^DT0qK}q0V2a?JF%lHCvF@b=VdpzVoNt)kBR`)vB@4YZaXEuz6-#$MVsn z@ThuaN30K867Yc4fIVTU1gyejySi)7o;;RB27}T}71~lxyWXfalQvr#O5x3Fy-BzE z%VlT@+Va9vv`j`n^cwhS-<`SQpPPAJ2g8}|5Dj^m4Ou6f^gPUD^w~>rvrDtZU0qaV z3>&nBFW+#ysBExx)^V4oZE=x^I4&ouZ^*k3m;R0+4h&fga-2qpjpG~}eMefS#uytI zah$QS4`1SIz@cb&!0UHqwV9{K`hfsTO2JJr#}{K3iv{e3lFlP#SB-V_#<(6weF9s$ z6VZ>4li2zinjDgFJU1yk{BA|n+1Az@E6d&%9!|GP+}%x4TSY{JW;`V#`gVT>mg4m< zKAK#(Xwm%2tzUO^PEpESCHkL=sD21YVgx*%lS7FXg{mMhtDn{JlUk!CT1$t)B=_(# z7*S_@bh}w+v3JBOdu0Z&bGObU-vXR2o9FyW}U-T?ex$l6-ag0 z&p#=ZD|FxjE&lj9USw_B=B#$}B5T`h37aI=ErWA1KL)9O)w6XPO~aMXe*E!*2QsCL z5)M(S+pHy)=NXc=JL(e9E>@ihrMrxX*Z4JItHof_*+XufK_*Ag+2z=vTWu{VuIJ>O zMeQ0sW+^U-M=cun;1E<}fEK5KG+u+m#jTL+as-oxNDeSy5XA9B;s8Ak;$FlGkb$&p z5~+%nL7(*cvqU~ay7TI(g1T9f_rZ!__VPGDkQ?$((JpDkf6r%kXDA?sFLTY2L( zNz$xWn&?7X+HNB-d(sxW#Kt<4_R@bXLmBke7c&>gAiwl1VuW7dx|RR9ga;Iw2nX=- zJ>GP%!>%*i5@~2KY$U9}y2WKft+P+K-K|3s>)K%5PzR3TZQvMwV4QU^zmA~?v#uA* z+%Yx-UFky*;1wn{!x?icW>J&GEEIz}iEjsm0zOh5tQ)@NW4J}J{p#loB!_9;n zQm`nDSX6t>nNwF+AdFWkZcW)7P(h~RNvAboH{)6mi>Y}}qo{Pd61DkVK2hiPU$to2 zM&XA)tlL$OrL>u_CW~g5bu4x|3>v#K%9ofUH617s;#G1GKuV@yx(2T(Wir*A)DLZH zRelYO%2ivm&bA#Pkt;obbr19=+n@IX+>>lfV2|VcyYhM>FHRtYad|z#5aa)EW*CLB z2Lv>ZLkM(X22G2F=D8oorL%~A(8dx0Kq(s>nHX2vAVb6>xl1&9mu`#(SnBC+;p>}3 zX??1^?1V5n<$lPMu;pk$>l`NqSnTaFn!9J#)^vPvX6pQ3EgO1aUeg|=?yj!xO&JX* zC|C|^ZpFIU)nD6qpggob+CF1;%aqj_wQsW5J5+U*yx3e-oML=%1*21z%V{@o^LlugNc9=I6&tNmmOjIv8>XBr&=ki5mKo6j}W45H- zsWYlAq#a~7P=-dwo2s|gJcKesv%2QbPx$;^Pi$b%qNSUJ+pev=7zVX#VDV-J!TsR$ zg^^hm)gA3t`=mI!LZ#Q53lr540)PbupF z9g8)|=qHf%^<$45DKHC93twC+WdALEoO<}LC872ooZ3GBRb+a8;}%4yr_Oa`n&wTV1w8CiK2=SiEt}#er&sK0yIlBWJA}2L z?z;Tp&8HsOxM0&_*^=rR3lNdL-C;Bp+pHO0>sknk2QybO)59#H+LKNIx@KO`s@dpKrjW3)YHUmDRC>1LfZw;pWjFCBTxU?Q%Cn- zytFH6G40eYg0Hv4MOcJ3uZwM2B$a$f$6c3!_ATxjlqrO;qeoC*PDvFa0Xc|dQTWfGz^s9_U-SpzF`PvG%N(t?p+-5qq z>*D-^ONH$fhaNc7x9y?j!motUqq9#g3Q~-mLWILrwlFxay?DCT7)%!h{koN8!<={^ zGHpsrEc<{KqF_psu@KS|G)QilYjtYW);U%=(tx%lwQ8H$q=1G}fV`3O&u~TbIq<@s zg*1{UA^AoWd6HxlUm;8mQuEu~hQsav(2OWgK`cs`2#s-C0&^PRerypl#%$2)QT#3; zg@}0Rv`iSs=yA~(I5EO7%o`9UYFg+I1}KF;1~fI`7D~1zb{QrmA&muihATR=Yv{4= zX@8tH^*8M1n;(u>D&S=)Ad?@e7j|uLYrBJ|n_#)x6 znbVsSFlL!o+mr!&*^W}b&R!p%9Sjw9e|?!|c$%^fEl21Say=JO^)HdJUY#)s;e4Vh zQtC;h{0-Id>*ICT#bOoK`eCP2ZPV&Fa1zFn$fqOkii8VG!M~hrSG!o7Rc8TnhFx;@ zszZ@TwBW$jHCuX~7B+4?VN_{64kk3KyKkUq+D{UOn@iVj-MOypCQep)c42I3NAL6y z)XqbXGP%k)&{g5Elou{Yq-t+(L_aM~F1X|y6xbr1zxm>Md`2788?}iFnZhiO&5rw% zkf}Rs7H_o9FD^0%H5w>avZ|Q!1RpX8*^*>YVorZ8@&=%lmx|T;jF18R+w(7Qee^eZ z2lIZE_gLP~MR)xFA_xHZ*h*LGwFY7l7&T`SGZ-VHsLjc0P>*a);ji|Ji0t3gnJT)$pF|7@#RRwB=j!9b5OSO;=Mk`f{o~V-> zy$;D(D4(NkE0)Gi#>!8|o4XopUPQg42;@gY(OA-lN)vuRbPfx|g-LN~D^}x;xI#gn zqj2Gg)BDPbOJe)4Ke2w{hie)`J)tQ_+`K~Na@Ph*_7_Gg?n#;*$LR7VKJAHK9b#ol zl|Bqs$?1KOZ_;Xf3tIj9RPj z>}+47&kQ~UyKdK_xwW3EA8j)P^Z`vk?I_yl?>IJVR_UsG|N6p)!O+%w&?0VTJQ7b8 zr)RuUSy>dVu8tN~;rE)zZdh~}{T>r^(mHkiC+q<_3)SCSAT)UIVk^WPefHi2Adi9SUtW6VIel*9>GJmF;Mz=VB)7DX!(pPD^Tv&ch3 z^QU4M3sh+0fevVyt+lN=eq$_tI?>cR$tH)|PC~7=SLXXmp}+>(;OV>jhNeRM%|^8J z8yM=dv=*Yslh%Cm(2|#a)E>^?)Wqj^bDYYljoRCLH`VF2#gU75UOBUf-W4txuCLkI z+MQlrUtT|}gY?0h~WZX&r93>}{Z$dpmcZ<#-z zAxMrbTRGrCPQIa_rN5z~Hd>~2Y^(u4TKIhna8tZe=B#o7 zxe`rHgs;i(ta8XIGxCnaT3)L&1)bI`PIjg_NDfg$CQ(utX8+Cn8%iAJfj566=sQtz zVOLHun2=a!=|$OdG^;lQ6x z2_OFXV5_EnN{A#9{HTdyf6@L0!h-iVROF!lC6COZGF9!$505(S{{qN zf(i=@_C*f9u{IuAVOP7fMRr@vGTU6$c4b>z(7kBh7jxSuKP24!!SYuYM$qkTj9dXF zy=}G2E*@@1!|_R_{zO|zZK|WhpX?|G%i#)PFLyojAMnHMhgBC0nG@NGAf&E%p%8(K z!@dWUU_Xc;f_}UJXR#w9&KAYeeF8gAaD!>Wn5$AfIU_*m=@RQ7)f|l_bo(# ziw0K^0%qW62{}1gAe}73U8Yu+?O6P0VZ~=(2+RJwa91*6zHM{M)r}@z1F(vo+N)MG zjKaKl=WWZk-Mr}j+m38nM#H(3%9;$!GRiNGIAK4FvEj;&?PpBvIa*)ju@-0;TE}V( zw8GR{m|veEjgT8fswzv3QB%~_W_PqXBIc;6wDO~ij(<1|QO=G`VY^--Gic;$hE+!F zu41tD^w8;-)-HT{(cMCyaB$W7sna&DLA%iXXI@05CFvEJgRc%%7cX_FotgqiY~GYh zU!EM!uZvx~ZILJFs#|dBk7TXhaP1qrfqh{1sa#yu_BUL@%sL~yz} z*K*JuL8Sl-A>wLb5krr{Ngz0m$U({JGcPFz08SMecT`qZUY|Rhv0zB5d|* zbP5#$Y8X7f3fAH4#hm||3$2}!`aHeFC@w+|*Ei*gOmbMtv5JT|2`zwMYz>HEO3?@$ z<0bCdY^xv_UJwQQ`Tk{=AS_ zT?+9ghUPRSMvblPmXe@XVRQwJa^x|W1dQM{F0)Q}SX|>e3+MSu)$Nd=BzXm3K?4s9 z9tcQ!2FFu)%re5>Tp&ZRO@R8a)8N83&?bfn00Agcq;|VQ_V(7DyXk|awn zr=j>z-=MW>TFtH{k`JPmveO>x3f;zh0-BNow;r+_yyVx28q0q5`O08v4ZBRT35vvx7vN%WPFGIT#3#E9^Ue?ijXs{)1{rEIyV^0(xQJ z^TVr81uRf4?)wq$J*Q!K|{S5 z@w!=EqK`S8HBRWqqc*kn>&wG&h03gAC^zc(38kunnneA|Cw_OU@Td2dJoe&n&F;aP zl@*>^4|Ke%%=fw-nKy^0_OR;pkyKmKmT}5Mp@hd_^=I1T&N5rFB^@E0Ju;4!2Z|b& zzgF(>cw%viVw#%EmQ)Vzs{IK07XSVoIn2JXXp96z{gFxSUTIel293h7%Lqyy{Mkm0 z#p>4RX@f#zAQX<3?mRZ{CSf6WfU!ewl-c0PJ1b6}cqQ*`Ad#FE6)uM6P>??fOu|T1 zAfX9I%#FBPs@TvSJLiju9O(eAU@$!{@0PV0Fmn``yLub_5*oL0`uRk^% zS^>@Y3N1E9&2SopxwKBJ@#ypC(F@rUt-U6ANxoK{Si5D^UWUCY{!j|NlMeXP8Gk78 z&$Qo{Nrb|2)CG;x8J08}51qX8$3NQFk^hs$#Rnh%@cgGfQ(?>m)0xe>0%vUbF@@Wp z_4RB@+sl1Qt5)q%sq^P;i)pk;Qf~Oz*ZqW-eN~Y<-o{W-|)-Fya5WmA!uPKugoW z;>MPN!KQ}6fu=k;DmwpX_M8mmxuBUMs5a472ZUpwiR&^+x zsga3HgHsm(c$n?5YkkZ_lene^{Ir1#6ifqUf`|@-X9-EG{V{lpf_EZoO?1eL`%2N7 zoE?G1G^E0SD^Nn0m|yy>Z*qHp){yqd07BX4h-BI9Y+L7b_X%eo-Xr|=2TDTG_hsGO zE8g6`@#9w)3l)BcD&Tfls|QMo0?iEc`(pUMsZXC-vUORfFo32Y^Ir63_WlBX`anm0 z*SbsEgue*)>oRD`gXCLwvybLz-tDuheP(;M!RIXP_S;H&vp@9m-i*uYPt!lvL+2fm zfX*6fP#{LNex3qI9P-zpmpQ8_lfm&a8F&$70mxq>We{{HDXKbDSX0pSRHky%6$hGk z&0%KW^w-CqytTjm;+nhk@d};UsMC@9fS#34`f=;Ly23?wj@ozpeihp7$UaeM>ut`r z@<#6NTQ^-w{?(mkRRj_By%{Vb6l~lpObz!JdOTAKb{2Gp-9@d%=%3~gG(7Y0S6Dre z<{2^#!7KxcH!OiLp^UU^dz41#prGnfYe~YStl@M>rK(bax`T{Q6v(r16#Wb6a)<|? z^+VRo3Shb&V(up{HD^Erd3|EIMzk3dSpAE(5Cm?p@P>>{5Xi77%86_&da(-0uumyP+W8d;{>fAO*QLGTlt{2YT^Y=^V z4xhcI<(WhFCNCeblMkT8?whx56TbdhyKxDi{2O7}QxE>8$L(z_G*=+)^B)r3=YD?W zMH{cY>7q4ziL!&Pwzlg=dr0Eh)93|Vupkt339ps<{INK0J}*;MYg7ULdXvp;H(siP zPMyjb&`w*cy)vm%B<+?*cvVAn>ur#NuWxL1A*cg zv~>yO@AQTyXN;Nhit748RcW-iw9IQoP#Wj=L3*9b>U74TDsBGxF#Ce+2n>$G^0NcC zHiQ@rusdG#VP9XK30Tta1=fNhYCfkJQ{J%+Kn{uJ1jt)3>F|h$#Kd88-;#|lK#o$| zXjW*+3h2?%5%dTYoXj}1F#Ghtk1_)X;WX>#?q`QgPN!+{V*EP9?w?B*cBD_I zJ33RRQ=Oytr4s%?A`$TB-r?zgem>1Egozwjdtg$AHl|qW;bw05iSPr#lS&(4K49%Z z0)0V|f>;xYp)xTK^IL+m^y4n@ohmDdK9OvDEimw#Q(Id#y&|O}!COzdE@x>tB3a zxz^(KT2`;duWOaZ7ZZP2c~a=h^iD3jD+9@w*~uADl%Vy)SYcunT8mEkO3BmcaYe}F zFHs6nMG*YOXx;f=u@A`zsN2mzL%TV7OK<}v(4pX}#%v+G0_79%}59%dhk`iVq|qgdIoDMi#&CTp4OET%S@ zuBCgIUcQRHY}MRv)+K|)M?o+OvesadPCCsiT*VfnR=dLN+@s{cERD(4YI#g)5Y!cs zNJV)xT0VMBd1XPQq9Q`Hcv}3>c5C(WqI9CmIJjg=-;}3vkg&$)lvkiP=0YH!;XMZFBzamLs*fJ(88iC;aerQK2u7bptLSl;@EGj`B zW{_9smBqY8Sx>R&gHJJWf4cvH4{y5Z!=GI78CLwC2|q`p;e*{%_o9D554Y?qw89tK z-P*A|EGUs65Z4xY9)H|b@{-d?K529$EN@F0#DVHHxsJ5*MghquDmDmUd8@iFtR|N$V?MV zEOAYY7i_gKb_yF;AVl=0uqf8WeSxOn!E@Rj<%9-i5xhJTCCT?$SLi}!G&Vl)DaMUA zEZQOC--Pc*{9)|Z6yAk|%ZN9IuL4pmC#`T5fV8B*sNj;<9?%?^JMUoj{R8vn9gOa} zBi3HRN7WvsyriP`;BQ}js%k^k2=cG~yxFzk;U{|v2*Oze@uqB0rPFIVnwmOD4Gho{ zULR_09U5$F8zlce)Qq=U21iwh)iJE5dUc6PZZI3(=Dp}mmApp9E#zxBnCVobR4n9F zHF8xA3sK#7X_;J&r#FlSSuL!$Xo7f827SU%-G3JB-@jnN{)6+5>~24i^g@DMM5pbV zGq6ra+#r1Lhiig`P4~4K%XB{7t&Cdc^b>?nt8P*1mkkfDfV3gCb|kuZG29y5p7=i0 zN*>i3bq<13ubOW+rd2wlhhNL7tK~{_wVGSRsj3w!m7-e35r;J#t0o5t6?agEyGqCo zvT9O){vJ`OWcpMfmR z7oeQ!6_7BHtoFZ1NC*AXfdNHrA`*s4Bn$>i8|Zc{|L95lI+IUT$*08^F$Q&eYwIM9PKH!~_r{US&QfpQ8S)7o z#Nkc1-?6cK_3G}PHEViCT0NFCe2~m{7~i&d=ranpcgVpxe*CUZ;dfu%mA$iT(ng@qkn}-Hl+6ISP znuZ5kh+o=Gk6-}>FGNBKe%F3TWn07ZYh+5QpOG_4w50FUm0f+p<$b5n+`dz?uD&Zy zc3sX4K27QYC?wk_rN*1w@wwkZrtmM&RpP_!xaBbtH5t?i4K zG`B7uXlofBZlP0p1zo@5MgXd3dD;vwOA*zg8Tot-E)@71um} zwHyYJDLO$?GK$B4d-1@uAM6+@a1^@iZP%}{*q>>ps3$0@iHg!Alc*xP1Cen`(a2G8 z>V2Z2fy#sfREDanqbdl$hbfybE2XGX*H-T}S~N$Ei*-$!}#xq5mA)Dx$`2uIH7wqpm-T3znHU8#TuRvQ5|9Or-y6m!&k)2k1^G)k4 zcBFTPi3K6w(Pvkd6=^FY*;lZv!puTl1CIa-v2_UDeHzQd`TGvcJ+OV+(Z#W?&3m_R zy(-hRZ0o)T2rC3S?kM9Ou0TV;*>HbUXD|3g(3x!zmArH^EC9g^`oa0XaV_+7;9BuP zT{Z3%-VGs!Ay9gtv?Xu1WbQG3Y!71VM5JgQY&~o>ikcC?gjW&Y_w3j%{J!-EX`wzn zV!3PO%DXJW@9lT2SaFyAQr_j_mrQ+VH^B(&=^;P#uv!sFrTrt7$A*?1JvKaajNA=d zR~;J~Iwt%ItrU)(u8kDd))q!;N4L~Or7ilok!8dC_YW`Gw|8kj;Wjs$yu{}{!zuLO z17MMV5=f;2gejODdE?lzq2Z%P;iOj&;&lY97_Njbr?1W;sx*fbCgYUoXaU3eE1J@Gx_nG5hyn^k&Ep zwN-km_qtiMy?PICC#mcoSxAwCLPNIET-y3JQ{rR-YeYzmyGuMwt)3#*6cI zL+t?0as)Ru2uDcfgJlF3mg349%mYc50+3D6$OnMU=iYc11yNb4lulYaK(-=;p(Lk9 zc;Xw5Od^a8?8m@sNQi#}4kgpVGhrKr!qPu8|LuQC>H8oY1@?(}Yj_(I5~hx@kHPoG zW4`*jcv%4yRoYYG$5WMLv)sGb8pNrMKR+rsI+zy*e7z&boB4i|tuH+`&#>wrH|9sB>oplQG+5a5xO)V^b;| zvr5_i%9^=db&=)_JFTK-F3h7bLnL0QB$dz)$-rCOPKVQC)$3&jyWQ!q+oIuG+Fss2 z)0f{6VU1ieK}8>{DK0N!jX#Ax$dJ+jE!TFQpUj?>4d#_Y^Q}4<`PKu=)ake-%(%ue z_({@l@aEnyJz+yrL>5qh=&lh>d-ziNWFQk> zXso~G7S(^y_OBLxg|_|&y_>t{(Jyb+J(W0er&Wm>;OED~#4vl>o$C=QGe^;yg4k$_Mvo zE!c7cV4@$G0I@L`>GfWZ-T9vppQYIt;VDnj=O?~1{*Cndstj*^9E_s zLzu=;7Qcc93mDS=ybM5xv4 z33iY>pF-lD0*1uNvS$xnNq7JrxRyL7-z|mFT>zk;V{Wj3O#Wj;SefSS_N7TsX#267vO~+tIuae_wfPr zA$nYB5}at5!I%37g&<`9J5~XAqo5(7;vaBrj{bxs4cTVlXRNjhso1KIAPb@-i0uN+Zq8&kbrQNsIaNNa9Z6Ze}Z! zhJ+AJo&hLB=haPg?qX~+TStmeCQ}eD%jD;m5zmhPf%G%!uQssy5R-k_7~8pk#DLSUqCZnxaf8C8@xa4`2y^RUs>U6 zIQAUz*T(9RuV%k~5#H_GJrpt6n{g;>Rc5oNNnp7J5QDo3eUA zwp?$u>PKsIHk%G+J?S)B$S%o1V=fB%p{hBU2doRIX*I;9sK1{iTix!urPG?`=s2z@ zY!5)Js;c0yGrw!I6FrAmx0GeP> z^J7YlO3A^7jD)7tFlP1_oP#OO^8m39gA@4#qz;RjhGW=;*}Kp|iorO>*@lq#0k0GA zOaLgcG4<iZGXOkpL(I4lY<@06{p59|oTQL`5)*r~NO$iozT(CIETTYbu;Wk3Y=b zT3!Q?^+Q$XKLn5P$(QIJgraHrBUe(#XAc_8k3Ii$|8M zLYm_Z+bSLNYZT^@Bh-~kvxnamw!S};TKm)wZu_Ox6*jsoW+=~Au)RgIFCx=+cOgyn zH8izat9XY}#Z{LE8Z=tH@S|VZ^cU-`KeCluIX$EjB8H`WyP$OrNid2Btni!dS5Rc| zPTmr5-dvZr8r;06AanF~aP-1|*kr;f;1W;)pb|$U;&C1pGyX8xx?#wJ5|e#k=e$%> zoXL$BJ+NTGApk(Agat6R7|EHITM~`^7&gJV67f7!5R%2l7!I3cIWiC#O-x3FssnsJ zuf%=On1Cn26wE*-5|iLMh4T;GHKM*0j9hnqajDj+F!`)yE((X_fg?ClqY&PZvD!(~ z&*we(;CT?ZKaS6d@Gcc)no_v2+EP%}kWhoMs%KajbdDZN)&|NV9lr&2TNE~lq8wDZZwckg*TPM1BoGt}n(vD|GEo>xC~>F&oz+P=7R z?p+rvn$s=bLhfa>p<%ucm+}|XBGa8V2Rz98tFF_#VbT>#r#IGXb&v1a z{rHnR8`j01)M@{v)|*|IoPKgA`cSwQEsG1sChdsN_e?I%BZ!BDSD1B-0*dLu01(}u zl0ugvF_MbURDVs_RK9xMk!OO`4rb$RMGv)@gnV#%GXX@^P9>gx}xJRXQSYz z;XU9(F{&tvb4Lt8KnkTY^h6`QwbD-zK3TiCGFsdbsyd}GnLN#*-eB#Fz~$}JhRo?u zym;y~yZ3UxJ6T_waQm*A ze#IBk12s%sipai;$L}DL*;kcczIldCrA!5UTJvR5A6*EhT3aI|rBZtMkAjTgX{kus zW`#C%jHh6SN$c~a6)OAei_7z?_7xtyY?0q(pH>h^R8bEdKmHI=5f2tjvpa$_H|cP|7sJ4I z^~){JY3@`c60YpA`X_}Z2O2A@ngNCFerta$KVpv>Ok5`5?R!F%Oc+b-73H@ij27Xw zCo&Cn~@{b`m@H;f(m^h42^PC>Oz6?_?&LF3L5?p9ML@| zR_+gJE9D^V^eTJ4p|YT~@rMsmRo(aR+MNdh(lZrn-H~8hd-#fyXeJZ2 zyEZw^?QOw`J8B!Kx?x?DyV+Cafc7_Q3M0M6zSPvt9jJeYoYfYspR?!AJGOVN=!AIp zF5$ktwMDm#nzY52M@#v9eYjBm)21eKJOn<(n*5`cB~UHr_Fm>RH^uyZPm#H!;b|@! z)#vl2@dIIBx=a%46v{oz z-(1`~sj6;K%9%0Fow9k2(_?e!>$WZZ7$tlA>7?76T7()Znx+ecjW77&>C0}2==8t5 z`MLM9tKxv6Bot-NNQwzy{-mZpmaJB)ZL2a+U8W4@<3E5pzCLj6x=qr9ZHr7?Te44dB{3^@7UGP*%YaEPlQKE|`FmO7J zO)|W=5f_aRfyV1cu(!!EWb(WLWC@Su7W1V$8g&c5A2?l8wd&UP+PyQpReOW&^Dgc* zyJ0?jW2Mf&;ECF@WtJ@kHPxK0cUy5wX(@C>wccRTl-Qy!`n`yI^etC(`5Oc2Do-RD z(`#jP$P=%Rc_RVOti@$Ir84fTh$9x9AB3{-DHL9Ix~jCish~o*zAUmTLAc!!5Uqba z-^sPX<2fPw@Hh~68s}Uv3_Qp<(PT34jVNTHMuh~?3!V;WX)5}B;n_r*#46EXkUVqn zjyL$RIGXo+z(sx!xWYt^A;gJM7+7T2b?a6%ph{Y1EOTaBTGP%(vy4umNz2zBH=zwQ zJ4a45G?D~6mt@g80&($^?~M-JN$Mv}<}P?>B%gHSTjL znIoc&i0KhSbzH9i&u7BLoK%xBMuu3$U=s;MO3pD4sS;9{3Koda^8w2;v4we9T#<{` z4Wtf(3j~scMb$M{*gH||VdH45aQFZ9d*l-z06Gtp9d^$;q3{_ZiKfz;y&-DxbF&li zRSnlpb{}diX$t%N`D|&ZG0B&xSX&`XsGPoN(!fu4uD%Bqw5P(0?Q7S5P`n~jxuIZi z(ZH-=tMP1l&w}2bQ)K3cTP~?se+?IGa9734I_qDARl2-_?|}R#%oo=5eu=sAz7g zM4e4GI{}683R!*)eM#}@-6S+dv@{CmcUQpt>*l?PtTmOAKRd!H97{vV%@xg8Y4!T1 zaLLyCS;skrZJ;>5J=1tKOl7Jpj%{w3e-!px7EElZXgaFZ>FcAVTN_7TC!+D$6clL` z3SY1zvS+}p=Y|gBUEpx8M#DbF`*PE7Gpz84DVnEj!t}mJ;ea9 z7k)O;_yE8)_~G$O3V)<=WCyDuFP6*PsA{C;Ks|)8`D9{@w@Dqj@UPm zSVu=p_S;y;7Klg~zKP&hdaXt=dM}*xPK6pyI8CV{Co43sfqPf=+OJu5Ei~ACmSv7G z><*S$C*vMx*tHCIWE@`4itxfY6jLF?9`yeI4PLq5p`Hz|)&NWiQFLMB6mMWI1#kw( zpz4!=9`-Vj`~r0gnUo>-6Fws47ZYo+;qbqvm|;U&3R!)1ppN+tS?7M^7YfR2HwSF02QRXTPdOomh`7Acj5cw22<$m$A}%0)01Ql(nR zX{!eCj?jQYHYWIF~qvjw}HiHDInH~&dSwGR7$i_lzC;k?Am>k8v1AvDk|z6BG?6@fQe?K?EU8<7FTt<=Eo85gQtVa)x6v!8lG%jK{oUy^1;6^k+lbAQ6}III#CK=K&#D{!HiR&Un0fGlUPC$H(*2_B^lwQ z$dCifg)Q=&T%(a6I)ssYsKIYMBv%nO9aklQiyP4XjlfvpZ8ED>%U{H~#n~;8y$UUq z7@e#VHy!=~6gMkKbT7D_XKLi9XBxdo%UMfihCu|uJ* zQY)4!)OXj`QG|-Unp3aS$l-I9b%$In)Pi8bGG{xdC2x^YjEdMtXs?0%I9fv-L<-qi z5}Mf&{;U;Wh&UENa^@OwH)xs7iY|oTb1%u>gbWj0X_9Muh%2B1(6LU2(M=}9H8e=+ zL|n|=;UV||KZkHbJA>GF2Ny`;xkqpe#yj_OlJE$B5RU%snDEG~r-h&GEh?^~wjE=LaG;$6ZApA7%$z+Z$P3|_VsEq|dObso#2O`}htst=C; zTKLTyx1vKZ(#2l=#ooe8S5y>IUn^;MaOS$1725UNxFY|#52=w|8#OmxH$pGjUU)ls z>QDC`c>1G9uDmDD3hoi<+43Q1%$vNaf61-+&vk7PnCc;7V=6@FsVG#~MxfoImKL!hWY$F3Z0GCKRKu8ZmEez%* z%$duO(ofK$3V#uY(nRXlne_87M@CEX~viQ&%t(nDdt6V6WNo)`ZD59LcGYYFMX<5wL& z4|(C4@p29x8VWX`8bf>rte=x68vBi^zmJc;S2I7;6`!?#+};m5hBJe-~j_$ zge4|LB+?@;qQ^l3xFKFd9LSXa!W%YbQTxZ{|{MHra%tgPlYQ++Af-YaaHr(!W*wx)wiIT56(x8SDizbe0{|s zwC?o4aaNW5O!b2Y`<1`^l$b1+N?F}GEgBu1cGJ>i%A&j203oEjEKF1}Lw*U=+CYO19ip9_tc+$zXaNl&5*g*BNE3M5;SMZ>3?EFu z5rl!HKOsm;2DeC2_#6ab7ALznNJ@oBEkS$a)T?lVW{!kPUZf=Oxn&NTCG_x=49P;v zLMuBRdNNW3vVmKhw7^AVCX#k=6amvHk(wi26bxx#;ZYEroT3~Q+!3Wilv_?|DMT^w z1aJuhhM0A+m4*gFlCxDAEKryt8 zU^Fy*N4Y3DKn&x7ARH{BVSmUp#0L<<8U<)n#)2pZWRc>VgFfRD;2|PG*cec_V0qi% zX#f(y9dHzdVBEV3EJh~*_$4tyIFw{bCq-FU*o!Bim7f#fqCk`$t}bGoAW;{_Jy3^? zvcPF6Ks?+4WQ!pZP!Dk*Hz&Iqd{-pEl*QOFLv0~^vI6ulc4P!;A}ANh5f>C^{i4Ro zC^bUKzpXh}9^c162cX%P`?hahQFyj@-Dc;38@66`0GfkQS6@VYaphi?-b%?1U56$g z-9`{g5zQVthBEYaZugO)9hYc#?WV|!DB`l?*|jXe0z4@2XhHKx8}63s28WM{pdE5r z329IF!t=8hf&vI3{P2Vp$PpL-hVViEA)q~mO2aPTBXUrb4IUIIhmwR^Qvo%@Y41id&I#ch z7)S`zVTb;07K)Sslru)`%li@+5yyA_a%pxX&Riotgbt-T#!a=wn?NHg%HKyk32 zjG?SJFh-gYoPz!%7#ScHNEtyvZLWi(;V`fWS%(bM3=~ul#8x1FyPVMj%m6^ZUp9a) zWrmvvl!#2bTn3eO6x<~;%fL28HdaQ!MP%?i7`MV_z(p+F2cco%Mv#$$fD26kbF%;$ zfGH1iah`xD0*M5}X$c)IcTx!H;OUhBF#>REhO&3!O^EARKsJg@CkGUeFbNQ;;U1lk z5^Tcc4o{P(9k=PiFs(EV1q|(A*R6n<^ae-)+zNF%dm!!OD(tcSmIe1cmMn>fFp==M4fHQ-lsxkO}jg?&(ST3y*8rNkbRmXs`S}k z5)obY*9rqspvu0*5&>2A-E}sX!9{3P+UQRM$V^^h&<8wv{(|iIUq&}1&MnQpVT!qk z7tzvlpKuUh*Q@E=)OI1?Sg&4;gD!#2mE*iBM$EZYzFYCH#|s=%)O6& z{{M)3&oDWvYi)FPRafY$PSvS8=P)_-bob;WP1cM?(kSN)NFao=03jieKtjkMg2)Du zqYWk>_zak2qHVA_VEZ@`WQ@T95R#^J-(5W;gnj(|x%YW459;ZzuG+i0_S!42cR31& z36vbY&s-j#kwL0=ii63kBDp&b=Y^{G@}gg2sa+0wHA|N}ru`2rp#|6UZfWe=bL5!` z71b@=1;peodxNw*2V3Y*e?PZIN;kn4syCMlE{3etZh!J_?TJ}$XxsOfS6@WBA6r0L zuRTgOX`>(kN(K0bo^VHh?#YM4dCxO-O~6fJe-sF0eYxPMg#7pV9Ft$;%ZIdGe)KP@ z4?N>$i~}_qVRPAn?er7f@OB?Hu9R-j|Sv$ zYs}N$hgJGZSf!PyT5m*tV=oGoMo_A>plBhfm7vAo`=&L3GSMD-b#kVOx^5(vD&3v|Mn=?3-lO(Lb9)o)eUrSh7(R>CMwV81qJ)iD=HpB&Dx0l#2hWAzn*^ ziP+U((9~wdvO$r=q9*bJnKaqSP<6^H+Y=A09E{OI%Rz+vuv^}3;2aG2w7^Pu>MBl1+DF`Yvs3aAf)!Ou%0AQG2*(c4piwpZ>}`(B$9Nbq8?(LeGt;(KF}V)dMT zL#k_OOz1YaFL+{AODODEK@80{QLtF!`QKa^sAg(2ye!uF*sY-N zo_5Bv!CQIQ_W0uEJ?ehgvUK1}a zBU9Eo%sKFRX%{^;Qqtbl(jH=i-SCfW~&6}H> z&Vq>`aSx@8tzqBAj=qW7C(m6NuX8dc-eUGhgR=e5r&)G^x#W#|f)=w)WUqKNzU%&= z70M@T9^SsDF}`>x(f!C$$A`+G_RQNuJj)E#5#hE{ms2*nFZrtX;wNGx*b@3^-Duu9 zr^IhlchwUq9Q8$fEMEl^PC_f$WH3Bwdt6lQ#p^F|OLisRt-XI1z`bnKxy{C^>Mpyf$=~q~3S`+eeX3!mWP$UnD&XqNnxvr9VlEO+@>Y1o z$c^MK@+f(Zynd>_q;Dj&3L10%6*9$7djCM?y8CL}1vo~$c@=J5{TL-o{SA+tTIvvl ze3&0fM(WO?{2a$=loVvH$A;*j*q37?VteCSq)(^oPys`a zQ`a)Sq~2q?SM(X_WcWXiBlI}fb{>GMx*LbiKye%JO7EB61l$mX^D?O%y{d56D)p48 zaFij|OMd{pCa!yi^vrj)4iW)}1PaLmI5IspEZ~P>BZCd#-smv+T==8k0lq5CtWQtJ zL%dlZ7eAK%a(H3%UyzTb;_#H{-)1PUq2)f9+nL9^LjDRKRvtB4X(2}QqA)Ubnc*w& zkAjx@JMHQ(W?wmUZd2FAUp+lkR^AD>$YC4?2kWR5a<5tOSJJ-!>4&s8 zKD*aV(-_g&{)#W(^fLk)Q7<_ zK%Yc&8_ojHIZCe25%@P4Hw3Np-YsgeQ8d~Btq+T6uhAg}8o%6SlB`0<(uulU zHBR``c}nwXGb`iAV$#7w%(jPi25#9uy197$bqsTs1Y<`xzYGFvvtkPBia>aq*ehL} z#UgSxK`mDWMJdh?%_|kmf`oKl)>&*v?l)sKs4{iq@PY|tL9mkqgEgWUWZ;(hjAlGU zi_ONvNH7XUH=Z9XY6E;3P{J832Yx!`GI0x?PGTL~a}qiQ8K{oVwu zn`9C&5sOJl#15|#?Wu*ug{u-8{S4~pkUcVMHUfg47bQ9c{6#X7Jq#9n2V;+kI=rIl zv~d=w00x7Z5|b#Q!98`ati|s}+Pi~6xrfPMVC-n!ON~O12zpsqIb)IKq&kp7l~*q^ zjb=doTLwVf4KVdCF4{M){JZ|6fl7{LS2f!Y7B$$nYO?J{ugbdcrMfw*S0FK8U5a-YVs8CLzE_VN4Zqt6$ zeTpLJ?A-C_7gQ*J^YzLsWb*0_+S5O4$e%Omtk&OZ2WG}MlZ9{0Ac0k%k@PL1@fv9Q zCQFk~dnb?$)#ryAv*zZMc9E|>I9uVgcf8HMHRP}_A2@ULRd1ymdnXjgJ{!#Y)Mo!P zunFQd{%3=Y=!xoo#u~F0dxS@AQKgegKy(?oWK04cH%O7xth44eT6eFnibZ#BLbi_K z_Wj!Z7u^`#ahH)9{T1?4M@I#z)Nz;Wu#&Ii?w~`^!m>=Bq1IfQXM&(!@+%{p2wH+A zUl|y!8eHOSM&z|x7W?f1(IsmQGH_}t0k_jQ^;$!kmFY<+8DR-DBkfd zpz{2dm`;^QFx5_qqwmI|YTwoARw*lS8!uvK^@dV&W{=XE(|7622EKtL{%H3!h2nCrDN)}6JH@b02eh?7xg#+ zZmQs7*T(?+rU*|$TE9&Q72I@uY*1))fV8A6T}Nlp^NI8r&vzXL7af0m;6KIEj>2sL zAd^NxtY%GTnN$I-MMciTsit{Wagxbo1klM`R}giCm$C3Qvhdlk#3dME%EC7Z?j{+eERBH}}duE`eGKu2c+K&Q*{@9P5*GpDfrM6Qv$wxT=I0 z1VWJk>Cf7W?wxHaA7OhMS1{CxiY6F2#LCOdn=~|Ay zn=m9`0CG7!%MJR;*c4fcYg?EMn+EF9_%*aJGS9*sVxh}XzNvqvP?>3pyGMDmO8>{c z;Nxr|hgVmG3Myn8wm2P`*1V7Z1bVtVNl&r@NSWRYy-5tj;{!ew#w-koLFqQ5%A1}t zS-36Y)>(LR?CI#L-S`dTg0W~Tz4X+1$6%eSn1U*(vw6q~z3aMu19Wc}Zd+|*sTzm_ z7(>kgac=D6*!NlLHo~&dSg?ve#+M-cqp;z=hlu$ProN=TbYVpWBIfeop2rSfz5C;p zkG*_#cU^JgDe>|>f6(qYefy^SzqoM$X*);^WYg$h|L58_v=83;{?AGJ_*6rQ5hz?W z^C}$H0CcE?k!OX>nyTjw6#|m_$`os$)F^(h_n?6nYuKsjG~z^5BdI3dYTx{@&4&1l zWCKn@e5e!-U4tQm7)Om^aj|4Nu+mUw6p7IE%&{gTIDyY-CCniK8(Mbzm}WF@+-LU4 z^(M0?Z!-JxW>qGV>@=6F0j9ZRKm9+6w3KfVv|P0F;!G+RIb>9K!H1A3bVZ6 z#n;az#(e&N#@GY|3Qc zi41?Di42yjZa!w1HPlghKiTlyNB_0|!O!m;{aWPW1|hw$*k(e+fkEt#6)Tq4P8Ea* zo1Il+l>>qlx~zk(VO0QQ?Cb168+zj6L^M=RW&HuLUnrCuXRO~l5jX1)q~aN*D0W~z z&&OCafwG{C+(0BSS&_JLGEYxlSy8FoheKqa0j@iS?B>j(_Kw+aL952m8oN>Kj&O*X zP@S6RxD_$>Es2fur$Mo1jDjWRbRYYp$VUldb9czl97T)W|6XEc1k@r=d@!Awx5jN4>rvxb}@ z;Y^7kE+eExoP~g9X+6Stp(9rc5V@eg?P!kn#cCgJnWoBa^ zYVnD3Gnkk#Q1v&|SfoSi!|UZR^hlw6;oGsY)IJ_(gNQoNcsnh~LyFEAPfEM#MVL=7 zi=cXlZba(*ir!%!Q_unQ`RS?p?Bh3$vv~pF_4KF?U`J|kwr=v`lynwMP@uO+eXBIf z6=#D{(n$)B28^3(;^WVW-ac-a{_spKO__y)hU?l8X%qBR0jE9_9T1NOW21hIr%#z;8N*8A9G<)YLh$hbc}nW&Sf{xRc(vkn-W(%<)IV{j#1z?I2MF3yzp zvksdelje5O>XD15We&GE(@r^1O~@T*^lpsWC0Rm1>&Cmz*51vWg={_1TJFg%^!m8) z`Q|`MQjB~_jXxCjrcySmD5>i`CCloEb}Vl3WQ=C1`SjLgqc5xGBet+rV4F+Kw&LDM zB%N5&dSnC4FlWMH^xIjVEV(QZH!4WYrX3s#>&1xK>_W43Nkrg%CT!Y!Z4)fABVY_9 z-A2hxd=95rYv8PgV!D02OiaQSV2Egu+-U12S-Zs08rSYpS0FJ_ahfJJALZ>vh;YDJ zDFB^-pqX%@$o>ynem6V|#jHcYfl2XpK!AGvQrzyeD~aOWcQ$wDVz#(i;r6V&e`0ak zz?vPf31#b~GFJsM5E6v-xOUo;R*s7>F0muxkR-H%aTu8>ub6x$BaBQlA9UKo5w@qS zwmEdJJz%xiyR3ootygz7MeG(?L>D8QP?K17&(yPjJ3F~G*)|_hcYEtJ(%fiGRdx{2rNHbEX~S=TVBqGt2%?^qKs_zlGX9>H z<x2Rdn|V)gT*GB*;eU8ABs4pU7zllwDfe+`EpQl zBER}s?Y?t|o;iceV(uopeSZ0Zb7Y^g>ipV^{%7|mXZu`O9kay6{1z3pYiwn)J*=pb zS{;@w@KV4hg&k1htDO-HJIkG-K`1GXr3K`va6)l9f&w%)9#Y|1QA~c98|lzjoTw46 z6(*nqQKYB@>4X$rUJo)O%^6%yuMnW%+VB7XRa@tKK!v5auV8hq=otD4!V3kS=u!(h zEj2MIAEW#hyc+sFn{}MLh^}Da;k(4?zT(k+*_3v5s%~^&iH9xnP4;O!Cg*ZP$Srz) zD3?RqUQ$|R;n`{jbNSMx`-uy`M$NrswJH9IrTO(cV?*n=l@wn$l&6V+; zeGaGCCq<6lO!gu556jCfiA}OG5xf1^^|_&;+>=l0ugq7aX31M^wON;5zHgb$o{Oqz z=>2S+EScS*c6Y~xO(t>ljqGGxzB`&qv9rdMd=r zskPum^jU+bD_P8ism7<&XND6Q3xG+-Hbp_$(KVrd;~d>zc!$6AoWf!Hfqso4b>W4N z`X}Qs8JyQ{VGdnjG5aH-%38%$xut(WFWU6@t*)*a;`u8bow?Sjr6IAmb>DR8Hj_IC zx?FYJ`f4V`Hug{U`-1bsfemM@dofdKS5_bgwwe3J8}_+Sg6@nz*I!-S(ppv4!#&p9 z#QE#%rzNcci+#~mliO!R71IU7)3-a!VYr764_ufTX(LRx6^HtL+#D`rm630PX|BQ8yg{ z`Y~1#0#AOv4n+mXRnKI`mJ^(Cj2hs%r*VWh)g5SxtR7W>FTJqx#%>Cy^!1JORV+DT z7yf!|RvWqD?2BKCZ-S6thEB(1nIvtBzjE=~E0{j$#O^uuHIsc8$nw4PyL&lazQ8wB zTX|YB`~{n1-eWWAP=UzWsUC(f^eiN+6dr-TCGbOanegp{0D+JF zd?>S2lcSG7cT1_Nv4<9 zPk1e$UF#kdj7fFYyq=Vc2c-*DbfHr)3z!l0U1qy6KXF<5e z%IJbQV*sIIIyxg=0q{3n8lZi3HaP~`giGRd7T6eYa?%q);W|Ik?}Oe8plYb+F_p$~ z+7tt0sC+%@KUwpFH?v+}gf&Eb`5Q_jY+1zVVOJ+7X)~Ykab&`T>Q*;mB1XB?nOZu0 z+m6AircK&%Pf2R=P(>$PUQB52^!YEYKJ$0yFu_26?xV96HD+1xgL4^TbqQuC*+QP4 zR(7`b*FDdZk{wM)q~EWJtJP|K*x{KLLGxO!u5M*_WA40YfXp+?ETEw;U+PAuGS|R% z$!Bt5PedVktDGjGCuU73MhV>l`v#IJqf2sHNtiY>l{{sxlHvu9xGlXlC09NHXM{JyRGbM}jE!>vQ1WEb+PgQN_Ubf!JZ< zvR2XFF-3F&M4#-e@yJu4Uviw8N6K7Ook$puBFYcAOi14}g>2DClgTZ+W=>%(T?{9o z>fdOhK_Z}j<+V@HrzZ!%z=gnVg%PS#FrJ2UjDr7#fD;L9cDmokX*N#`{^XZ6Fn$3R~rs`KDY!5ONm zP3}8h}s7hC^vL4upGLk(D&(@;Rqnx z@EK?cSXatdAl4|FuWmC#r^3iOk^&qBaO?usxp0#hDVLG90K4Bex9p-nK0iTB`DDMv z>FQ9+Wu#UBvM?R6$nHomMH1C=$M*&-A&;TV9WvVmljJf-P#$FvU~jsWGKVNwgZ4|c zci(${#kxg5>dv*1IJstht~CfR_@1M$kg3lxPG{OCGLpmZ%wK!F_rt@t-1gBQ?_`fK zt|~thyEz8B?cG~_4ok2{fY!$ zdWnP=&I!d(2Oj)-qJ$@w2g7MK+~J>HxgLF@6zkDG_s}UM_3Y3MhLd#e3rHj1{}Phsvw6 znWd!tXnl?O`*Tqk`UR5KUbEE}4O^4Zi1WIfw*TYXZ~5fo+iv;`Tu9+6duZYM zQoo&9oWF~dJoJe6{*`v0ZR`27?-7MRXn&Z%i}|w2-J6$9MR&fi#pH_<>7JKKQ2X6a z&U~dOFi=)cVwZ-=&F1)%x0quuP#l<9TAT4L;ZbM>J|LY_gzrAsD)XTxEl1pRP0@w_ zRv}n~jO?5pWKb06^hknBNh{I(xK)CXnyvzjSuG4Im0*Taj6;BX=~_BU_*9@Nq8!(i zN#ivg&jQ3A9b~YF<5@wsGUIroqU8xyv0TQtwC6@&*B;lN8hu-PnshPM$YAMX>PXws z`?b&4bLpw`P86YmlD5P)_&*D;BhtX=9giYmiMzpKxz8rc6f4d)LZX|G{%qcg6X>=? zzTflp@)ckAOk%DmZL2jqyShvj6_1ycOf3mCKJIY49VWNi6buH?oat@t(Xk1;F>ROj zD31J?ePZ_T(Kj{_*YzjR{i*0f_+@hE9RGBXS@OJyD4NBRu-Qzq%_e`deCZsnaf|kw z+cmBG1aUB#kw|1D!%mwtx-w|;Im=}y?yj`bj1Yy%rkEs~O~T#64%&I*TH~GkU5H~3 zV=dM~U!#4iR6H_hV2&{s%fpJ)%^(=!@N1!r84VHB`VkbbUgK0lvi< zkLpGw3d<4Lfr`MplK3|c5=<>r!{2Oj?SFq-KEUUYpy4)6FF&5FnkswX&1ZA`K>44M zRs?_{vnU^*!an3keyB#>ilO`HW~b#Xi;IaV<|7s-`OIng+M>kKJMJxuQ@aiLi+F<< z?x?m`ZFSCG*vx{G5s&X4MAK%VAHxb%^6i3ymN1q~qbMaL1_`uQd{AlTY;W<)jZX z{O_N3?K}D>e~s7)FZC3z+)w)M_?a^# zjyjl+Q@*qO$+Yil`f{c}#e6)fx|Me=j!M>RyAcpQj^bdTxHu3j_Psy1TwCh49Z-C1 zox}2u%}q9|Hl&g2FJz-zdD-T6+g?`O4``p)lZ&~%ifi9kR1l`Gqf$@V;w9 z3w1kr%a>xxT}p=;pmKgIMV7My?9FHHIQA38tyoB@;#O_!W%|6uK`lqzqb3{fOZye@ z_!JVOlWM<3F6?*SGj}`<>*UU&r;Sf=Ujse=fubj{Oi(w#L8y4r{Z$ZZx_PGC+$xq& zUprkX#$wSYjZ;%;bkvE*Y12NMgoS@JG)d$97{m<;rbp5X$(99I0xqLdB=yP0Wb4?D z(V195?V&;yK#x>DvUFl${4j`vo{a!uZ9W56Gw$OIpDWT`y4^TbORwvTXOBg}IfffH%-)DweHYs~{u-C4r2VNKL7RUTzIq)ynKSDdys; zN^*YCTqH9$ow!ig&wqf%0zE~u(7tmca&`7YZ@LrEEdN6}AJ&^HNby1wyGj;e5nVma z>#w=-KXjRaP>zmDW9L)9SfRTTQgpC56wM5F79|wWmVRK&ZmPwYRO!-no*d4=w|EbZ z>(12Dj9cnZLY5Dv0E@-@BG`2&uSjB-gHPyK(bNK__o|!1^v$>Lbm@nSiQe#u3P|oa zVVbAUXwtOz4z@3CSTNIrp1V~JAfC9r{f(Ie(>v!^PMZ}%DuviQeR^}H-=S%Qarj4n zQv23&$+&pqbuJTfx=b4$?U&R==gh2xN-XyT`=-Sr>FF)`X~Sney;NGbd1}3C6}93( zIw+dCxLvWvm7rp@*%PFt-yE4d)fX6(M79ZsPvZFe~=u z=54A9N*nt-m%)x_-lX&U9R@+WczQF?-Q}DAn6}if>}u^kHz*Bw)`+U!NP>F)KW(=I2JKp2jz6d!X?nB6x>eCmsb%2+=vo$PCZS zG$qnj9kmWf6oRek^4bG1sxAKu47d||>i7<~naE$c08drKlv-+nQS}hkZ zS$2^Hg$%Q%+8VpAU%K>QYj#QLmY&`%W!2)UzP>X=ayMY;DyNiXSD5W~e{*DFWZ;Y& znwLgPf~D!MzT(TPOB?c~{?L}7+SijV4V8pfG~95;K%_0)8V;8v8D34#7-(p(oL}J{ zI2&tCOjAcPW|KCteQ2PAJd4E7+zd~Htv*sUX;M`% zulyz$jRv)QOETFKKv~Sz`k}Bs2Zc3^Y?&o6u&yn-x#*6f`~OW*rS%%RrpjQaVzbi} z462LTinzi9&F8QJ`Vx(!Fj1oxV3&L$>8h~S0CFFbMSAd7mnU@Z>I(0_Q30W$Q>tX> z^n&USEOlN@7ydXXAXqvNeO6Z^wIBbhdU^D|?SERtEt#?K%eR6*-9GCkW(3)JM{k+F z0BUBxv#q*#QlP}?@=wVO%^02g_?}%)*P#RJ6&KA7ske!2h1cu^j7X8Ye0l@3z9qY0 z3hJjU4yV~NZEm(DmA17ay!bmMojiU7YqRh`aq6ws=9s6q^QzXysM_5*k@(^b7k{%B7l|8%4F(T&>Oqo23B z{E8iEQf^OAtHK|6_@Rr2)=##Y#HG*vZGNWLacw2Ae%Hz_-$)hty*cCdPT1J{&C?~5 z20Wp2!-2|%#){elC6oF}%EA@F745g5F-@&dwdR&3fyxkjsUXgsRMIIQuZ=kK-TsO- zVP|flzns|H>wS^FrXo}uo){6+LKAG5I#@8<(Np86kQ@ayVSI<_vL607=XW-lzUNNa zW+`k`>Sd#cKsZzb(H9;IPFMBhy3-H%mYP7gVw|FKg#)ybtiX_Hn8-2{q^r(2<7#OF!}P=VldiaP&DulB2usQ< zcMc6*R9VAso-({%`rbNZ*9yU|=8kD1Z!m4j8$Reht-CncSiSqSr5DDl=P=&M0~M3} z>yGp`I?w@q?eIF}wu!u4tkjkcPwD<~eqGZHiO~}EzS?lpFZr4D+nIg zqQy(wb1Q9;%%aA+KJCh;^4e)#HQ5ei@=UJc9inzx`^@Pb5fdDOl(gKj<#+LZIGGc6j3)JwxEU!+Qf-p{+dZI_(iQ0hg(qfD}S$Yg|kqN15u%^bjSgy!|9S)4qW^ot+x?GQO$l+xt7lbz$4vzxhpd zW@>-VeE;@2dy5B0UME{d2Ora3TgS($Gq+yOEu0{>R#j*DRg*iF*+h((+k% zCL@fMZLe}z((~6%+C+&`5M$n&vdL4?g|VkJnJo{E9xl*&t$5A|XkGByiEnVAqB~Mv za>X+E5-HuMyK5L;G;GQ<_cxZ-40l%6b&#KcfsH#3PGM=I`wYHC@2tC0JF-@LWaRXG zt@$4QdH0<=kA1ZG7wcXnt1ePqT?-9Sr_w&RM|)+E_1KXkZwlJ$CrpmYNTRSuhI%G+ zxMbN}THG=`+>tKn7^*v7g`I%VlXiO7w5b!xbtvz~PJlB&mYp@;!d9@GgUlAgTF~}j zZem8K?Ryr^Kd^LLNMpi@L|A*fGT%_i98sH#dz$>QNH~4#BQ2w*vZyS((q&$kDO4PY z*=+Fyhy52#q^cvZd1q-Ypz%j^$yfpB&Bc(5_ZL0&ztT6@5qJyQm{Z~|$hUN=h`Irx z*yal)2h}w;hENX@f=txKASJj$RzNzF=IK$?zNPc1W(K*jG-J6y`gs-B*D1OT3oDEo zVl?`Sb1)1>$MtkyL6WCRrk$Yk>W7uvLZ^ZR&}lq^}lY&&`I=<8(t(SPzl zO^t;-+TXMVpT4H;VWwR;u{+`nR!mHz3&gszMcG4#D~e}^AkG$wgXt(BoZ+i~_&<=_ZbPK!;FR39uJx}rzVVN{xZTHo z*?i;NwzF$LC*S+vZtW|CU=6k5;w*98;PD+k)OB8_DYYQG`Cgml*Cw08PZnNbjXRoh zB`dGZ?OP-SeWUNW+=mVyK6KGLiMix~4I3Z0e^X*&h`jEf+-`E1fe@5P!E+N2b!o2B zd9}H@=b8d;pXF_vov1EvR73>qvlAv`gHQ$?>c*mnie5(lu)pX%NWo6xNca0r_XDb) z>MW(e&hns#JdA3}5A}$bzVL-Js9>YCK}kTM)C5MF%Y*-nEv-%i(3Ax?1Sh2}DkDJ^ zxIiaN$V;4$s4FI+b5ItT#DEz+Z$;4kUniR(6fV_;E1efw9k@2n+hNba-TK3rT@t%OXfE5@X~rODa~m-SU&mHRTuRAX?4&%3J3 zQ{B`xLL}pyPH$7*(in6aC4_X`h}yUk+uGRRmh-*58ocVzp_iPB8%<(AIU;ZYlfS3R zZ8o0OmI6~^Ik_jd&=T2Hd3xi$M@Bam=cKR51F!C>n38x-^xCx_nqIhQ&#OmguG;nW zeGh!~_5I!|6&7=-${~7gSvno9t->L{*Y0nvPo(zz^rw6Fym|*pc(ixj=?MGv?{AMM zWKYB&V9;eQ#>sW@>V9`{>%`prl8`0sbb5dQ(8C8t_DxQ8HOh{lePW;01n0Yfm{Afn z_rd|*x@vZ+def5|GiJ1SHjwDth7YzmQx21%%2Nsyo2&2dpS>_8$ws?0X-CbDvh?@; z!7m>;l3LU-^0LdR9hWUm&(23)-bc39zJA{QUmv;e{;zl>=(=pC_-yT0ca)VzTb6W} zdYsaZ9~)oZx9??~8ENaZC^mAv$WmH-Tf5LWQ`n0p9FGHY>&>DMNxN>931AhJS){?V z;xr5Uq+&)rJ0Lj9k|<#!Frhlqlnm*Mu2A87N_H`U5*`Sc5)#O54Ay*{>~Mh|Ri$6) zL?=)&iIY`9Cq6ygLFW+!>NX&T4*=fDxk(+-C8RpdWP|_Ke-Jee~xKNnyA4w&m*OqC*h9!iGbS z?ie@_DmA_^dNzM|4{FJ5q}}iHwXs_Bdaiit{6 zL2pEd(V$FM&2{bKMCCLCvKi$NZSp6lnz~f0>1PKJqGuH?a_%*m3eQ2-<*o5lJC#|N zgwvhcA)q88;Ta`l$XY=%Hfj(B#U!)Es4VkJt&gsHoFE~(7DyU)LsNT8sqBg!LL^q0u(nH-KWM$YY*>>?rA z%7(#6xF?gI5)Zvv>T-LKSMMhCwq$>|d+4%8sTzPZK-v)GP*6c-?<{Q|Z0VmvW2i=; zVU^ycL${+(_Nt<}gStAb(`KF$mHq;HrtH6$J1F{*bXMZutN$2{ut__%$Xi({kkbM%-@L4SSx zTeEd^zq{|X`Fqns6=f?JG%z%A-fK4b=e}-r3%dXnimTYpf?B(aAOrpupkxIdZn3qSvkYCV9PZ71z`9XPU4L5H0y z%gZu~!hyKCLC*=LcW%MkVa{aKr?I(V$oVt=tZPsoVV zkq-1pqStJZ1SZy<*!%K(yM}IV59GFLf4$0LU6|AA8n2q(y1OoAt5a;!>vNRzmraYu zyCb1Zb@b#(0gsLfP{iX*D)k(iEnZ(@|5waK_v5)xoi~a_~c`Rc@XQkAm ziPpyB@!_=Y6{W0FUqzh*LS#}FTaezAlTo&)^Qtz6a%P=TgL#3S;V>lJP)1?$miGag65teBlCSFCz##0Ik-1K zL&}%58_h_Tm{aAoJF2~Am5ey5e2SyecNLDN;O1Coq>rh;AZ)=No@Im&K7gsy($lsB9xE$@)M@am2J;+K-@&_#E7i^Ij#KbGaV(D51Ph3Ffpp z_!G^z5VOUbaMtboB`S;PSy?TY_u7Lw)j_=aXSV~8GPj)QG+xTz2neCmiY`ZY-d}?F zEWjnBvTuyr==>xd*W*70Hb*tLN<)Dq!K9}3m9$!hUZJx)YSdEe8fWXwPv@ZqE<*<$ zYXkskSm3CXQt_@ZFy)*DHb~!P^e&ABR`~zw9OI>(({nq{5&57$ThUwBbA5RA&z7Ej zhOzhiGs4&RW_!z0H39jooeeYESFg@2$ySkuGaevA4?NU6u(f{mSxbf!o(G$HANry8 z$i1g)FIVPPRCi{lbgK3w7NkPTuF8E%y58(xP=53i!D{iWNO&zse&KxrqK4+nJK8;_ zh>5H-HPbKbAu|t*L`jtB7q*b}uV3)y{oTzTV_asIP7eP4V8-ikqcun4+BUD1hkLu@ z?52`vO(;=w@Sl;%{ry8=pu{kB0Cj^X(&c=Se4=|+?-jQqd4FIxNp(1C*nb3VFp(M~ z^}CxZrX?H>zs+HEqRb#=>Aa|~c}_*4I9f8-i`#0d$kvkfpE0Qucxv6aq%H$O{J|NEwGZ$2SOgjAPDZQPb=$PEIjO_srud|`cRQV7d#ZR|^{VM9Z>q8^ zX|*HqkF2&?gD5g}S-*a1d2H3TZIOC!+yt{yh$*7k3A1`+%X{k|{%XdA-nL%EH~3#@ zZa`w%!{L$DE$1feXs{#~yIiGxo^n;R@bGq981~tUc`CS8bufBvN@N$$UVmxH%$o8U zfUIW}(U&1EnJ+YItNyTe$AZ>7}#F)mp#Dzv0puK4fFg*51=Dark``ws)tB zH--`enU2%3`XTKX63-tiYDd|}-l88+`GuvW+kc!#j43htfx-p{)4&4iz6o8p(ZvVj zp|NP)M+W9Y2fzsWvVsh7w$4HF4ysI35-EI1GxeZ(>nXj|qZ}`=6R6@vIifQ$43xmR z1?(F8Nqq=m3fv|@Ob=$1`gz*%Ig@XmG09ouYWV5*2hQ$j?d_iP84R1mg%%zt<}Pbp zWBIaYuROn25^T(p%#;+OPX|_QSbI9GDubdZ#!6M~zO1Xc>F8CruUMR&8ZWhryd~j5 z?WiQIb-nBB+TFEaBRTh+;}-@jo3;PC`2+2f>FoSz_v|=3+8XhCl8i}m_igMacg4)b zoy+IU8Jg}qZQtZXdd2?LKA)#aDW6+9ZD&vONZjQ&J8WgsD`uUaN)HBFpSf@Qw93hi zqkrbj#&T61C<&NLP&Oq;eFk{)0cyf-I#YY~(7tCY8ynj)Zpjhy?zmCi=hB)_e%_>3b(u84n6J zgeM(EQIzr5AeW&Js?6e|GXXnzF~H|;D!QkrC`s2}?_ohR6!c~XkW8ot(pwzRV?s6x z<>n-rxZM9gy=P_)%)RKMxdYsb`XBAbHywN9rutH*uB7or?R&>xEf3QsK;iQD=nMVu z-=`djl$AxoW$)3KvTy`%{+DwKSJAKNIHz7dcGWc(&1DbHyYRw!$4{Gk5jXwlecbft zOBk)c)75+$cd`0YHZ)3qNzpmkU&;M()o;<2dtlN zR}^+z`qb9wbYtmlC&t$JU)#Wt?=CbFA312V8HFzP__c9`-9I>3m#M4Eyx_TH>dJ!$ zS5CdeLzJ>$sH`j$DkTf1ZQe9;^w9K8n@6}0`s&T}?CD^HJS)>xt9F z-TW|UKZJNjV4w*QRQ6D<2f+qtM3iaKngc4+sKP`2y5sy# zKY_|Oh%~%rDc?$^2=xjShM@9rO!L=GMmX|lydJU)$7wH7YD!WmhIWAkp$RUh%R!7t zhXm`SW8*gHo#{7Gkc?v=^m)c4qb_%8s3H|FINs=$Zs>I<&jnr!T-NZ`jitEOChR); z29>lZ9@6J>iMv13R(_`cBiDWQXL93b8WM)h09r#Tm(isC(RHL=G60asN;c_wQWEFm>)aV$$G+e?8|AXhc# zrY`CPd{vicxU0OovY3lRTJkOw(sosqlrC$Fpq+0Zxh(FASUo1IyTWZ1jc$KPv7?B> z>f}5Yvky>NPPfAuwb_}N+EH0517=IK+MHJOju(u!2!QsivQwe>B}S3C>bAGZiZ@>& zi+-hDRT5NW*s#3Uii!;KIsMW2!Jm=qKl@xe?KARzz1w6l+6+O-6aro`Yw?DG&JdIS zf!tCfS~>xd3bLouIL}Dz^^9#a$uyqRNu}>78sv($FoS`b+q! zpn70_k|MNmyPe*(@;yR61rQB-76lXcl{U|le*>@zu%ZrMYt z33^!7VGM_S7E562#C}Vtd}4k^#~D7my&TwZ4p!nWjZ6#0O@d%EGaMW3jiy`{)Rr4) zQ6cI)Qr^XkWScVY4YKm*0N{CM^cmG2PP*+unrdLSc#ud}<#pSDJ;pk&{VTcdFMlNm zzxYC1@t417%m2c!XVEq(di+&eYc$%SUij;2l1&Nt;yFM|sMu08Y|H~kg-J;Q@+~-< z8TKzz2K-EyY!030@H+j#!;mFltyzprYA9i|tKQ*~w87|g1qq;i%^1B*c;=4CfIszH}9sxkM3uE|3%Pd z$&HOvORJ`!uA^X??qvZHQ!1u#2%BmAo5EJ5pKuPO4X2B;u~}%~l%AZ)kgL$9CgD@m z+?=Lgf4HWtP4;kg#msqA+uhAQi!Ux&_qVOur&>V!c;h$Z{v)UThOm`&$>G6Uf=wm| zC_zFxu*-bbhQVAzbm}iR16ok9xdi`LzgS0h%vuGMKGmYQTXxL#y5VxN8U`8P*+y=p zwf2H(xBRzyc5nW)tX%9*EGi8H(y^<#d2O%S+<@LW9}~xy+O)4Tmo+Ok6A%VlZU^iN ztV^rlv)TOiXkyBYSfETWNc;Y9e(AJQyRF2cST6fYd+n~_$sJ~^Lsk6S28k14RcIb9 zC!*q~{97QuFf@<$qDizmLS&-!anf)F{cE6)HGv|PvC1Yrzwe~t1g(s2e2z*hoYAJH zWHGwJTgex6qEn|ROiG9Th#dIj6LR3A!`hmUwObsms>6i-Es=TCib-F8O*P22rvEGY zLO*EJbA8$3?H#Tt>dO_$I;%d|9ZCU`AyQc@%g7)Z(MpW(3f~7}_CJB#;Up+{AydeF zD8Hl-@n;0mT@63-`}EMzYw(ZUoo$g%Vlz7*VbO(RqqkFgkl4oA^p=%}pT-J;(PCN8{Oytt%@&)B@oB$B;hTq%;nSGqsM(V?EMM5t4RMu^=o1c72c3Pa=RDv>Gr!zuodBtP6oMbEx z^I;-8Y)0+Z&x&4)_VNra2slpdw_fDF+T;%}k-eT}=bgu)^hkQ!!?-O1D0i56tR(ON z`kDZL>mLsDJK84}yJ3fRm^IiuB=CjNWO1y0M{{xjx<4lu9))BKXF2qrRPb&;$}}LHuOSN**|b5-t06iKOm`!`I%bAZIuKK)7pUf z=f9S>nTY*QRzdue_E)o&zgGLuVD+Fp7Y-B`Q~QirkrnR1aujozr9ZHYOA%S^i__sK z(>{DoHnaaE)8C|zkN$g;On>1)?bU}LBH2fNM6wS(tiAe>_A|v`R9sMDIE6=|n&eR@ zL@slcmAOQTw}|EvskyDZqNSyxysfpOvbnjkq7`Ak+fF@q_To=qA2qg%nRsp+%d-3&iffzdffnw{5gFje4b75co>9vE}#}yS@_W{fd0Q!<_ z$@eZhch60ObC+_z^Ds{=i(G&z+7qu|ovBb2i>;PCvA|QMR(dl2Oy`|X=|i z6cr}L9gFw+)J;pYeVNm9Yo4ywy8H>o;l6~dBgsr=B;76gS#uasCIoVS=EYy5;5|Pv z59pda$M%ax0X3o{n#ig)GCh9v{EFG7BsDz4;XE|@s|{|TYf6IaXKpNzarD|BXkV>r zpU_Y{9GTvnn}aIvWYF8-4c547hJqy>)_aTsDyxTYX_)HV$nlZ7cxpn!MdV77=xk^j zs>qX>6_*aHoShR z8fc6p6X(>CHtP{ofe?673}S{%@_f z)_b-94hCn=9Gux_?|pXpwxU}gU~SPv4-2WA-^YM%1orfk%qgre&*T%59HM%%Q)@*b{h7CGH-*{Jcq7d=z{^j}4$X`6c zd+9&5HAEBLsQ1Ct?2_NmKZioLSJr%rT8_=mebg6F8MO?rP!<%#t5#iS(E1dos`8BR zq5K74T9ro?C!9gy_GG@vBO|L2EE3P5lH8|!ji+Rnlr0(I2RJ(OZkKQ+KH<@nO_=1p z`ee#XiYR&H+OQ&05F!H;S9#52<4bO&NUEwJ!8p$Fml&ZFMW&G>GE#-FltXfLI-Ej3 zSA;kFg-g)T^`DbQNwcVPe+4#rK|6v|-2@0%!` zCNpqGtv!20dLdsa4G^(gC2uk~ERi$6feag`6f-cHAgqFg=vxZ2gt371b#@lZ7GY)o zt6bYu&_9>Ys0^tj+hd?QqKTzF{@!57jUbxtgtyM#(Qjs`@svN8)v}zo-GA{Djm@5^ ze5^yK6_Ea@0!`EXLvH8=YJ?Q7IYIefv_)+?)Uq+&usG1@&1@|8?FQ>(hc z?)nC!A6w2b$X6jQTNuNcuZ^;-N!Yoj245$W2-S*nt_{!b7rodXQ(UNK~h5|sl z)-pGZvK~vS>2S$c5YsH{^G)Zw=6IW>KUCPLGNdK5*Rp-EYpRVJ_NS7x0NbT{#D?a?L0ZOww47;uo=(YyNV{2%#uqF+wPrI|V7MrdZ&NuKz8|~UVIQP{P z@4m?R3BRjPQ>}uv!I#PiMtz^w$)f@e8yWd-!tbw31Oi$5&u<(Xn4m&|#V31<&M>&G znU#d8;?k9OTzutI^!=+PW6<2S-W9yF*%-*Qt>KJXK}?1lLoGaY``T`Av%NT_XLjnB zCfn-bCZ9PUpGYC$9Pv47L+rw#jwZ(50DRw%Uio>d0U?Vv*513v%7sJq8QwnFP0IZ} zvdP{UbYHZ{mF3y(gVZfc1~WD@XA)Dc_T%AkhmPTqHBYnhJ9p3Qd2mMlN{=_ivCg0r z8**s4xJ}Oix;Jj_J;H3NHL*@@PpTv&#%lGfi&WO8I#)*Zt5@&1yn*o!dxcWEcG97D z2RX@Q>t07OB4gRw5%e<2HXF-`0P4}JfHvIUF5F4!_->9l0oIDO!?{e?m*P}dxbrJitg zn+*O&3#7l@YGg#4Wb%4W6^e;)zB$_RyH(97V|ik-FzAwuf-x>NP8}WEbK`wC*HATg z%fCSo%O7gK3ylc@lnRp}Yr4Fmu=ucOD=uT4E%ThS<~aram8{nHe^*_Lit8F*mAegx zV77{*HL+g((>|*0$eRCGeJuRW`cKD85M+y*h%bNpY|^~4^ukqFztBEoPG0@&Kq1-9 zztsd!;_$>r1?17u`F2bNs_)Tgdo za%hx{to~R3k<@5G8s7k7=sDU~DFgO<8elpg3D5-ZMmmZwUq!5kHayq4a-y zjo{Pb$LP;#4iyg!xmmX8h6n9HESkzS#F_@-I|C`0@MowHL+B*qP42aseW7~b$oM*X z+9FO-bQJfD;lif5y-DDP0{_c#aKRbbbi&~bsZ@Iojz>&Ur_==t?s|}s*kCfYD1ApvYH{G{1G~vmG(Ri%VLvLd!dS$>q zS{sQ)a{thSJpV#tsI}oJ)wI^>H`vWymtfNEt_uO5coY>RESK2$c#Fj4|U6w|KR*jxE%>P{XE@Pd~kE-D&boajyXS0N^lMiIbT@YR;!WuMPAh z*~)Z9s?qC!909@w&lK! z$o$(e|sCBu9XI#8$&-1_ABIM4?@35w9tOiX@7I{GH z5ccpc!{h&XlA?R!^-KC@a@K&_z|b2`q!WOQX0W{!I~sfUl=v98B48C9zd+9-vWw$gFuA{$=W%j(v?& z_jbN#)mo$c&bYd?g2JpN{O_Suz%6M1puwx3<9s}94?miJQ@@t60fhDnAP3!F^Aus1 zq%I^_9dt&5N1{_~Q5Buuv38plS4p8CrOQ5iq4^9mmBi&0*F#ccEnf;S}p1&{WVI*GHpwU3x} zR9bX}gtYxYJd#-)T4pvUyLe%eGV_6$O`z^W6@+XrN39XYhHoD}r+Dn`Ti^MEXt7%L zHm!UwYgV`Iy~;FyCEyi%ErRBwr)zH5cH!SXwDFQfoms1`0q&`2MiYLi{usxwPqJC> zW)X#Df54a-yIFpbHN*8w0qO0p@(PQKkzW@P?Lb8>G3rAy3`c7D(?*k5FtXHu#bGwl z1!KwutaM%86bwYPLpdqUmwhbg4QCH4Fjaf~()al`^`K|fjC*>dBd5zhQJRgll z{6+H3(PJm*gw;n{>5` z)9IfFcRh`a@(;S9=!!e4>XxDu_#jY77)4--K(o;?<=GJ)0btQGEf6uMJ}YhWRd5RO z?ZP%S3))l#C%M+Tpr3_ps*x#58|C)p?bX8e`c3ld^QRHh_$J<8qvD9xOj20BRC2rg zXE~3T@{VwyZ{el9&tLSKk!wiYlgw=zdhNq8mm?8#0VwLXNryL-aJXXfhY}HwGZuBp z7V-so$=``Pz2T(fj>;cSguRYL+>x(-cOdETgcFhrC^_-4*BOtwRQJZ6o^Z_RjQ~@K1FU5_M(Dif+WWv2%^JlrLtK?#$AM9(c2Iy_7LwP(YaHrBwSDE@SaKe+PW0zbVI*0Bb*zFcu@J% z(^c&Rv?PO;QlO#)DC&c>^jlE>JM!N!;J+aL;5(G1Oo34swFeZ8(Rw1(9bpVM8u50L zff5>#H-|w8A<%w7p;sJS7bf3%JW+XzLQ-K*CkC%v|C3N0zbxWW*{^&pKPgp6bkRD_ zI2m<0<78@7@O8OJ#+E`xSAmS`6rS|Y$mrvcSzq{HdD8!9WCUQY_mR>6N=0RslvJM2 zKc%8qKtnn(Cffok3S+e<9P$bBVB$_Uux}I|5_irgz-QtPXZ!$RCZL7rm_jmf2X(Z{ zf4p{02#X@)a@Wwkb%1^;&sG=UWHN!$zw4Tq7kqoP|@{-;1vqn1fE40Qx_R&)a{A< z6x@jL9=hh2Na(Cs`)4HdiJDK>eChwcB&5+r-$z2C z5R09ar~d;hN>Qh)a%~${b)Ec&id?gSd~_hAS18k9ne)4%FDF1miKtt?ggk|WLrR`{ z2Xp~emc6tVB^!0?0D7MmylF$N+?b+MM!S`rG;6bS~@ zu$n_gU?)UO!jLC{wUd zU#ytP72Q!mI>Zzb{MI1Z2Rw20OOG8Vk@52%CGqvgAAjlUYXE}J~O&==csCEbca@W>FQHU zkAH=Ghy?9F_#k(HtQ56cG>sgp)tlZ850V}i z6-fX2p+yDN4@P$G7?npz*Vt_Rp{As7*4~RG^a((e?Bun&zbCw)ifH>m#MS@pv%aY(E2^iOTc%JfK>0IYqNiF) z_*nj<$@ZO?r60KD)w8f_lv>yhG08 zje2cCo5pNU`}kyQ$Jk_R>*SJ_*2&4%?ppeLvnP&!e7S^59?LJZNe+I+B`XW$YWZft z;}NJG|NAir(SQ0G1dzX8lLD}68%p9X!^BEtgBq@q-x)eoCKNUjTL(?zo?wq4fWQ@c z!emlGRd8Y2>O%5EIplye+mwhGPkpKxDgU!l00?Dt=m9GI?I)@5_miskzDn2={W9QW zU8maA-?6#0>QmL-abgGN|?Ml_?sF@lA^NRL)B;j;p@e%N3#b2uslp-H= zfGPrMAYr{iLWF+uR7G>uX$U3#zd=Gu1&E3|99W>CGp~4sRmRI!&R%AmA1VJ`BDz2E z3F^zd@h<8u9kLG*gmj#>P#^ik!|z_~6YeL^`hLMjy|CbTt5>{FL=qd`sMTkW-~Gte zEn}lww~eU#w{III2QW%_r_Vosn!LP%iru$kpNU7rJ3^gon&0roecSi9kv+Ej4(X24 z{qM)~x$VP4J9Z8Z?btRlyq)~5(VzZBP0Y}y`8*((S1WMKH!6V3cN1t$iVur$tCBgd z%sfuy%bBs2GeKfUKJ!S5Q4;N^05FRnLNSjFsW49veYE7E;c->HjD(B>|KfaQi4zAY zVmq+GUhoX4FrQ!eGPJdF;=1u(josvU@8tOY zy^{qewXAQ6E<$ye`j@v3%$lT?r(R5}{`&6b#iiECskZj1C8d_7Q|)b2i%X@&%i7zg zrrSscP}`I`gro^kwmE#_sqc}7_+m=aa_S{j)7ckLcz$sD1$~S5PcS?78@#;VDSK@oS3c@FR~)_mHalc5|E-G@nHh4ErtOE zEh$eQ@H%+hfDt2>EW}VL1qK$f7ghGL!-pS#{P5w&o*c;K2FQy(B5(72`Do{^+O1R6 zaEk{HKNUH72hUayM ze)-Ke<(H}a4}VDI<=@}7Y4a_&Y~FO+uE!rg|L|jv9VU0FU$m%x?C@QQFx9Yl)3+8! zr@LF0e2do78+%wYOxvFty58>h+dm2fbn3_FZvFB6+urf|hON8g2mdfm3$SX}_RmZ5 zH>lzpuOYRt2=mCIzB=6EuW_$P$F!MC2E!JPe;o zFr*|tRlM=UC`fbyBG|A}NEDPp)udt=PQV~prt%Mdt`x8)7b$J1U;#;^4N);jOp8rL z8}QE0z6wF&Pa!%)Q!%J-;C0Pxos?bEm-p>?@8_#(L+u+oo%L*y0SJ6Ihq{+qsfl|p z8k8KZ+`Y#54?GF#=}h2ue>UsCJ&>V3jpIN@d7~bPvG?-Lk`#=4-u=AX@twfTYTcjY zcB#ESREa5~tdm-<~&7KT!`!Av?DXp2@|R_-(6qNFBPtE_9 zwpCx>@pm`tO^01r1cy$)u76DTHLwxxpG61*W*;s0Vq)Y~C7m(;(-b!N4ZA6#? z{Eh-~|4-^2d1|EOTUDT{g2ifyH${P4Nbb4^C0rZKCPspnG_Kwg0Wq3QYhIcslxjFK_ zhS@$$8z=TWm}+lJUz~1hPo1ihsZpihYOe_|@Pr0yc7dz~MTuJvm(3->@wg2W2f%!# zaDg&Igu1JM`${e>_y+g^;myTD9t;920Bq%d`1nE9&4$ZAcpV7 z?+v+ZZ`oYRn~6FpAEq91TgxY`Zt7{5?bqZ>yxnVcSH6({T|NSI{UdizABnl=r(8w0 zWJHmOlCj{XA9lx%Oy4c}ycswaG#Rf?VqKPh@nQaFq-*(;g3~F`7fj2?&VHnpEI7WX z+K8N`mQL63C>8lfeN<<|wqHW(L>vLXP1vjr)Qq7f~bOHg_ImVemrBfK7x zS0|D*^dO!P{2mex3&gz<#0G}^#VNwXiDLpUr2%{v2Y3m6A(i zu}Z7THCin|`p7+kTGAb9BZ(nTQR3zY8vE~_J-FkZhW@)|>2qg)bH&_6>9%$APtSh1 zKX>;|`4?CGW>so^Tk7KP3>?h$En%oxwL`vwS<;s~I3PIGQ*G;0tA2CE%n9n^@&%OV zir>tp*0nWV{i{Xi=K7Z~@<(*kdS*#~?%YL59rY)S@jo!SpDSa;yZlRx{r9bluj`UL z>bK-=o!2jZOYNcbl@rvhI z<<{3fgYu!Bry1rg?URZP@q^wAs+vKgvy;pk9rG6QPDFG!wlI=2iClovZs}YlWycHvI}IG;hiK-+JFnIR1hDsp`!!1%JaWPvL)( zjaKeec|Vo@rMwfr?51B*-^YFDQ8D2g_~puX!gMq|Dy|#-Y4~krX;QvFXZOr4keKq%j zODlQ@F~krZJBCd+S(u7XyrOg~mY6tEB+4y9${!Lsjc5>= zL_&UO1xyTPAAD1Z83F_Z({Gi9L6Y>_V}Nukls)` zq8b>)gK-(0U1G=@HUrc=YYtmO&gr0*31!E-G{shnVROd_D!6GFl~&);s9)bYgiK46 zY-|jwRr*B(bF7AfV~r~2JBD{%yM?0ee12JXV-w;LX)Wp^Obw3!+mDVm;Sv;)8fssq z(`N^asQiXLhr8F{vF$r8GEeJ<6|IB_O!6SWBwB5i9VkP=Pm2? zV76yARpn!J3ko>%&Ir8q=8e%lul>>~j~g?Bx!>2)zAL0*f{~F6z22Vi<{O=T%i8na z@aj?9p{0OSRPUc~)(NiZe16*HXbX?+qd5<^^bpI=9!rJW$7X50r!&0e2En#?lQwC# zR43H6Olr_i4e-K^n@f$(^9MV(`h1JCBZqCt%+f=&Q8G>J=h$SfJ|Wn5O{g`8mb$Vc zeS9`Q?RV{8GM^Qe*H2$Rf8txW zB+^|rZ>z^B(AiYyy>rNoojKjcyrNr)sstZKMagFgc$5+(0L@WZ9wa0eS=fPc#-~E= zl;^H|C@W3X2MRR`#GpJ?@(d9u zg?hJSX_h+%@@~FTWv6n2Yz7t@c$DMHjg-qOYm;oHb>H81le|T~XY;?Ui7jeGlH$!T ztcO~eetd3)b=w82>rp9vC|lQhs8ujV4LqwBVs)k^UX#mU8yX8Xw|^*&GJDx}PqcZ$ z65f3x!}_cNgq_m#wu#oh&+fhWVU+bYC7<4ZKFF0D#S^+rn^;kS)Ggz=9$blrQp z1BFOnynzZuj9gvFn^kij%kICdNFN`${EqvN)n#>R&4t(h^v;8y4DGw|fpthjKYroz z=?gASI%>Cc@>B<>18U>WEq%GA4!2Rhq*O|$TT7Ywm(B5dujHSIA=%C5oC>5n)(XgQ zU3|Kkd0lrD`A@&CnTMRUQw`XH<6)n9>_sS6gxj8p_b3uNsL;e&S|B@aLI6s56PUTO znyZOR%C_B=$Jp9d zr3RO!6ZJ+^20-w$w?Ex9Hb70^P;a!x+XS0xZY)0ZtxK@$DJo_Lcl(L{>W8d8{0SG^3qL_Bgal0oIT$xS`0^D+F+rFF1}!T`Gu6V zxG&*a2cC+wCZn1n@65G!0aaqWn4LkGkbG zf37)Av6NV0xo0^SC_Nn3y`PE3%QQ{)b5GnK+lz~$k zVI3$WT&|y4Az{Lc6)G*$InENh7nWQl(Sam^5b+Af7h#_tTqqSeOM+R;0j=dmWK84+ zxU#(Xl%dAUfwdLwteS2@uCQasl{E(x%D7ThxT(PWOl52@MV!W=wxGpH3d8!@V#rtD zO4FU`q&kNJ0z(p&2p}2Jg8f`?xa(4&eA|FCp7w;3lzKx%TPUtRx9?d-lX=;^e%toT z@}tOgEgp!3;+ANX=S+6DpGrgnR=*`On3|d6w6Knk*tU1Y7z?-UY2o~f;yI6k)58BthR`4Y+D%BU9;IG-lQlwV-Di+SKifb6b;KBm{>eH(@=L&qmZz+A81lv z-W<;hlEGsUg1kv@u{8#>%~p=r@CHdmxg^on=x-Xf0ExtPcp%*4RtpZZ#JIbyCYwss z+X4oCbD?3XH(^C4Zy={r8?C)9Mpxb5zJays#(kTcmz_i9JPgCu+Vm>cA)1VCfx6ve z?)23s!!SQxb-GJ`x6X$^*WY3fAz-sYl5@2vtVwtsd9M(y!as?TutMB%g;7^bh=g(p zlPC4g6nNfPoah8q=3HgJD5uuYfpBg@&qSmsEV)4Fzf5^&*sDUol0u1PVlIoIvN2wf zEsBys8jz(@?&}PHMmH6TMNq9m#}#5F02t+k2e=uEfHf)h%*p_eo>>+>L>nm|6*T1v zwN>e|P-oHKy>qIio2K>Zu7W$McZQl<%>bRWFMaYH7Fo3c)DZ8Aq$6sBmWmr$y1$!dDlqk#l}< ztcdD3_4i);LOkW>dE+s3fHHsLPSK6BfQOAq`|>C zbJ}O)ZbTM%U19b7wq*~mH3uytw~e@3yhH1Q0h7yCS{i4=dYyaaMk=)=xq6AqWO7Kc zc!Sjvu<~rfo#_ZL7X_AG#s_#YV~aL}t*5!>L(4)V$(aLvCvrCX^pk6s(CSN8WG|`r zwo8ud_RQVq+cMD2*BtnxLF5lIbH%qx`Bc4KgWY z$bOn!NjQ))S(v>DIbD8O1^xHb-StAAtL-kWUNzUORxM6@+nNA?!mZr3onBMt@fuNp z0(g}D%QlsOC!x_;+3t3pzBbiVVt7VP>s7#uT6&2zva~QRimmpOr3-ZtWW=$mcU!YA6&Qt{%b1ZvGyh?iVE4w) zv0E?q*c@T6Rf-^6M&Mk$y8z_U_C%ze%EzP-O_{`@9h^bBbcMBTnMX3R4v$%IG9E*2 z5tThUE9l=muf^-tEDn3<^r7j+=VQZfFk+uL)6xScA+J|kliEMAPUG0pOO$Dbmu_wj$Dh zQ0=&s)f?xuE%#nL=m_M(4u8G+E{m9-v|Cl(HziKUbMiYkpmKV?JsD}u zuZUQX4%Hv_q>MVFi5(lYMr_u$;5i%mI(1q@ax~@lw)=FJjt+Ccu_~Uab%aa}n+hI_ z-%R&3rVdTC9-P~_-8|FQwuGW6q23XQH3=@-VKSp~JrvY{=rfwvtv1a)&I&CQykrwm zdMhx#Y%KEhjFa66C3m$snqf|6DrhpVDx<{4NA>NmxiB3?Ga$r>W&UXOJ0q3>(a?y3xF`JdLw8g(cGT4 zbHF&)3LBp|Eo*9MLw$_WxxI$LQp*a_*YcfcDIAEF zBJiKq$$vFib!$*adtc2l>S5}6>bul0=n&mZ_t0YtlvHKoMwVUqAO2u5Vl`p0#Fca9 zkJT;T*>Ea5)@6B_l@}<{J!iL+7#B}VeY#ckcOp_nm~(2jj#?#N3wIDkEpjUAh69Xj&h z=-v*2543gMa-mjp_lAupwVfBYJ2&;MyTiZg$;BJ4)6;BUwm5d@1py&qad1s*qVn6& znxqVB1vE&^s0sO}ZiJEeB2?O_cP6IV24_Z0CX_o891RYZrdyK+#!=W*m#gJDhG{)I zOm{RRlSn6WMxClLVTu8>!hnKq#jv5a_R79=zncQXk~DscMTpw;tQ`t~@f>*CLL;PJ zdn#)dQ4YrEZ6qL-d>A%atCe@h8kzyoNTcGP#jfAf)^$J zHW|47Aj|lgwEUV@y?>*RceY6Ntrx0xubX+~!hmV{7cBid@96f7?d}_>YkOvWog|4? z{YC3X)>s|Ej^y}zK`wxznYreGp5-=m+@lB1jHbA+H-2c(>L?dIH7N=DgwDQenhlHk!WKvb|*286=b6 zs-`SgjJA)<|IO4Ro+;y}sYdF2UPuJ#m(p#nr90}=UaKcH5wL|g?~PNY2X|hzaqiYU zV-q`z7suT99T{AmS}6#TfzNEbn;O_=3|eMR9vIQM#BA~0)wYaYRAsp%V zwK%m~$LMcq0NzsXwp!fTF4O@w7u=}+EE=4JQ(sJrrIjo6kDchDRT>K~H8KEEr+fEy zc)d>@u(aPk?DIu6f4(uG>$!0%&>fH*K_rw0dqZv40gFjJH){4~ESiavNu9wuRNt}5 zUw~>2kUDT_ty;yf9+NQ$CJI~-GvGl@;LNN}tx}=5wSl7501Hx4jKOR`x|Yd^7!K9| z(cCyR(;>uBgk`4FZT6rzidpQ4iv~96A37K|o7g3*+SacSje1=|J$X%o-j#H549E4S zItG^>Is9VW;jl&a>}l(prK!pNAAQ)eeA3Q#b%$U#3=5gj%VQfm^VCavlaWI~jlsdN zy>*=$C?OUmys=9(8%(gm_vF8*vvAtli<~w~OYP-z<~1j$mwI!8B_K!x8CL@;e(1sj z&2j9P&Bjrc*NW1l)?9PE(Q3N3p`=269s=XoXi4NJ=?OUo9M6 zNPU2OSXZ+ezL}HMUDUIX5C4rxaM3Zk02wh(ucSB8`{)blTj~4h$LY_}U#DNCe@6d~ zKBa0`O{>rxx;^nIm4y;RXSqwLw`W0o%Iurmxq5%JD$A(ys49&A{X(F+nXE|4ig+mV*oq{m@M;+8 z%jQn<8>)e0mETiof?7oQofTnGvFIpM(FD0u9e!D0l=-^?G8FiS=_z*?Q%pmc0?bEM zU;y0;Q@J2N>wN>j)IkIh!9gaO41|cfa*A2`5yZ(uE6YDjUh&NIp#P?YDLo5Qea@8p zBaP0ERYcOyndkbi=3`+$nE2|Qu|29|ho23DFC zfxKQFOPRWq>8ea|Rk)Hzi%EN8Ux)$)WW{2Gni;+-l4S{3?4xXN1P^spR0C3?bZbSV5LS& zEOvZ+Q6Ht&O;LVyq1U)vjp_gZ_2?9*-_p^&8)8!xOhL)D0cy{}aAp)KWSImbdI83) zGu_jY7EvS2)h2a3bZFJe2l7cF#_M*T6Z8i9c70;BZ~f?GzU`?^T(fAm=|kDog^?B5 zIVA$Mqw^+ z0OOPxR_jHfkhnqTsr$5r@d0uf_5&B=h`U5d40pzC&YYe#8(|%^X|(3lr#Xh?6&qL- ze-oV%$4;ILgXSVSfH(LBB(hTUq{)zk@tSX-21ScWa@$NC^_t(o@?IOa!|pL#L@$>x zaiY^Im<4&iADApoffuQ3;wmlcV6~0q9u)(aKrmzrhS?6g+0?9ViVj<3k^U;^2!~xwgx@xGOKeV$t=Z92R{*E+PPIMuWKv-z&aOuYR4aXa*a{DUGiKK7Y=F`N{Y=zt(* zYhB*tu{bPdkA)8KHW8)3H>fsSmptnOPpbLfJ>k|!3ksFBDY-n*x5 zWO>K-&pd6OSt6R)^^MZ{WMH_hR2T0Jbv4k(0uBwzX&J2+fa1jMu8jO+55crFNCr@D zT=3Lx0twrFNt6j<06fXCteydWc(yjIH`j5j*}*Z|MC5su(P%WNG$Tcc zPraR$YK@>$R>jl@X4D#Fk3hfC;n&0+zNlPFOBQFn&*q7~z!>YL2yfPE*d;m#CuaSK zR|>V~UA&Mq@h`bSx-J{%n2ll$heVUGN^%PpXV|iWce~8?XqeiFEE`NYV+70(oALpl z#?6V)s>Q@*8lDxxJm``V-I9T`(PrcPendS&39{5hRQY<9)gsk@)9i#s7!h9yI(>zJ zGnm&mguPi%Y(gtvHchX&Z24kuy@0Z_ma(iq%QY`QFBG4K<@5|{van94C2ccub~De3HF~O9KFR!6_aVepUV!pm->i9~ z<`;;Os-t?VTVkb7N?GPtB)_uxgDL_jf4Z!}fK&T`yN8MlRd+rCE&xSzk8EDrV)EE* zaH8$Mb9TqEl|S8K>ASkYI+W~WkUg3-dr0?4O8@m<|RpkO=os{B^MjcJ7RS& zKGL_=?a3w#!h2M0Z7`6n3(#z!HX97o*2=Rw4(U{GyH)TS7$)co#*OAQa%%Y~B6&*n z;h@G4jYr#gmb&#L>+78;$7m3;QZ}3lGF;3pOWXy&4Fpr4nNAq9DKBfJTGX3H2e%nN zy-{%aZ1`{Oh_}s>S{85p{?%(QdGwAtYVHTyt{R-XqO^S1h1U(=L_cx-hc_<2;=uav z%a_;ozO}}8Xq)Qw>Dw;bPkC2Gu3y@|;fE`SC%-N4xK(4N7XSX%k(L8DKK$8$J+SB3 z+eaV&deiSpx4p34DD(sa)=qaM+IN9ot+BBFRIb2C?m)pjnq0LQjDaz`dm?sMBJ6aF zbz5iM#yVH1f%Y7r&MQWvg+eS+RICGduy-Mv#)Oj0IybKs{O~37F-_1OV6}Xs=t}xn zgKCg^WpmIYHM*SH+4Rz`HF=ck|oAz{2}iTye2SNSv-Y9E;U@CCTP7Ss9~^4UdUFWIvF(AN-zsRC4hu&=03(ZJaeFSxMmIzX2$yL}2j^db zVjT(}PM3gYW`p2z`B0F}L%y^16+aHKDvIi<4{u(V_!|3ULwXuX4I7 zlawu_4u;OkB5c4+H#oZJCX;bfjF!WV^0YQpe!{gY|>;HiZZS2lhtBUR?VJeKm~QN~Sf zj{{{^u-lK9e@_857I3Pg5F1Ebf@-3%jtW0`*3d&a5+OwHE(%v-J%pryKz4 zH+%H1I!|q!MllDoN@WeHj4FF@Vo0Q0)dvL%RZ+}(L`(#`MUy^kO!6jH!vJ<%14Iwr z63ozgC$C3&KUZE#+c+Syiz2GI)(U|EZ)vGkA9scY8@4}&rnuE5V6Rb^tv4X($ZYj; zR=-DnJ1n%n`mw*>V7B--KahyWtWrqubg_(M*SJymdl!&;SgqmV_RXVWaK)|WipAxELo zp_=BcdK1cstJEZbK#dA6V99*YScmc~teD<5v|&x7k+tbH{;jE`8C*{^s}h0@k%pS7 zL?YQ-7~>mtQV@9lfOqzr&8Dc`$OIV9lXXiiq8&diJlbezH&3bUMknF}*L?{)skOfK zfZxVu+KQ-UfM5=IS^5fzkJ;Q=cY~d0Y(~_@Yj8Gt8MU7ar8!P*QEN~_NpC`d8GBTV ztv*G{1&A75%fZeM$)76Ny?>!LzDlY2hc-=LTx@bR24eoC&oe#CaPXpe*@NLj(>9As zxZM>l$v0|3dQ!&Hk3vl#R4uA20pwff;T>k4^&44tz}6b(fQg9$k90?b0~WaG4YY|W z)Um1!8b#LPH>jR>^oY)o_ZuQ>bh_*u^9d6>sR#75>qrnAbJ0Xf%(zdR^)6jE5Z`X2 zI;nM(V(7%Ggk7zo3MhOb{tt=>B3u-g6>*9a1py3B;mu`+N?cSG{|h(_^pmsdbeYkW znGP5fxs{@RkR!qzsthSkwitvh5Uv^y@$nc9^C+pbs3<<@QHi-ZYZg>)LSPOsGF++H z;|K#fBSI8-PQvgMjtZ8c{KNMZlNI?9W#)V4k}{|2q?O^~EV;YFONrQklb&==rV0@U z(CQRWA4QUoe#j3|w9e}N$WW>gIpjWi*V<;(!J^e`rdRuIt*%0^nO4tD^{hp3ysEKL z1$dp&#jMa6zplNpPush?b=^vfZRL&Lk~ZKq=yOw8!hELhXe*74)1% zNxD9qEdl{fqt4{5NLQpxiMFr`Y3V8ONWr9I3#oUHS~*`yV@e1NiaKK5MpOC5d}|`+ z7^fbpuM3CkGEtRhm$q(eVS8h{Vdsj1(G(3ia!zkgaBxLWpSR?jJl80B7scB8rVJ=8 zxpLY1p_>OX!N!()p^hI-@4shu_t$!D8E4B9k+Ehs#5jZBz#1=G0XK(s+xb?rWBJk0 za+Ox>FBMigy)%6@aI#g}_O#yB63}+U^qk71nZMLsqy~O|UT2J9EIyAm<8$z}cB4*X z=38@_*iufCoJG;o0^fP9G4{C=oJvPKR1;^1xWZ7|lLYolmdfbbXH= z%Vzr1vEmJ!n;Yq{vh?Qlsgo~DpR4aAp7$oq3`OI9Z-NO*K0EblF%io*1>+@zWis+QeL#0!%^?KPhNxO- zR6sr?s=Y${$SYm*M`N4jziW)qy~9ogmBNSR|QeKd=A8nWupl3GfNh$ z*J{s~t9k3n5*&&{RmV^i1t=O7U4uB2ac5#DL+JLyKiO)5Pi3`X&+lXXR$+ffn_Lxzi0ijX|5c3}ZYeITi(y_A*# zZ&fpt#`xr3S$>A)Z|al+*En?9Iy!n`S22*@oTO>_OY+^$?#YH))C=@vy7u2EsabVv zu6Do^IJdRf6A)td#Ar}#iiaY4rsam~0|wLR%02zN9^1tq>&i@ww`(mZ#k#qNbz_U7 zfdFULSgzdl&`(!@vuix7pFIDfIq^`DwHw7=QX83Xy|GiPyYS^lkJJsumaW-*K24o7 z<)`*i{cljauDcP5Dkc`R0edQoS_O#7(VAxA4l_8kZ$AgL`LLzS30bLDv8~&w?vO(Y zg(OEv^@*a|!q+EhSeMK`j?u@htRb}Z=tEcdBcqX-?LH~o+T|CMM*w zNIEs^a6?HjGX?`Zio6h!tX3n+D87rjZ$`D>ZO*yEw6Q6iNcc#WE+RkQJWcEOX}?lq zfq#&IFa5d(*!E{_LFHvp;ns>Y%PZU+QmL%Y;$2xsN-P%zb1Z!S>iP#a2z)BG zCRw*O5ep4}G?H7WQKJppq*>{-FK)>VK~ zXm1S(@IIIT=51*R`8$Cws^*}Gww`VbhYR^gxS;v-D_h5M-IBX-=fpkhse?bdeePpV zc3&_vyyet?9@l6$e0P0xC@LgutLhs&GAk*nwqMHFkBJFePa&5J_d~V5T}!zH*3QJn zV!KwvyBb}`9(PE3gi&bqtzV=<536?Fv}EU4sLO4Ddx$l(^mc7p6+5}gJ*Y7Vbv}(Y z;p22Z_rcauD%IMSkhfY;^@`Kvvf0-5LNu?^xva3x!xU`u+tsjI1MlOTt*xmSEFVwtL^LowCYkpt$GehMe44kMk3eki^C{G-R%NJ_Y8G^!I zR|WI}!c=h{{F1nv3D3iJ6ZRUCD59u8l|5I5#e#sz2@+P>07kFyOG45*Qf?I#2Sm)X zlvAjxD@r@H!paFdS14a;yf7$!K`}znR#_fOBS{fBWvvTAm!S3XkI+Be5izKxju35M z?yl?HBt#ms>iVWY9fqS3Tpl+)lFBbq#XIX9Ef($g#o*VB;U{lDxlzM0iGk6H@q1>x zoSE-fV|Q}_*r@Grr8CC1eJxwheJp5DYoJ@1P-UW#r1R;Fc7Wc$^Mjk$9XNc$p4|s% z!}e0#lfS0aF@K*!v|8H%oNqC>Qs;Mt>#0vS!hzNp^4HBbL<6BxJPMJz6>dfw-bJLczYHr<9 zUo@3!qH3ObfGQoL>FxIRur44{p-{u9*Auon!)}kG*2^{7xrn#W_SQ+*hqQ3%Vf40;G${x1%L;jgdh;Z zJPxZ0G2vfjg90d1#FIq48v%_0m4VY$l>0M{#Y*XnGi~)X^Rq2CPnx;r-f(7B7ya)a z`{<8of$s>Rw!LNY0*ykj*=*7L=wlDR2KM!*+|i;FexPPwo(>}tSl_T$`*&LR7ELK z_69&GM&s(T#NGze1&<6_v(Vj?b~a)Gt}M3d@>G^8vD?6+OS+FL?@A-&rj^$-c)e1U zL>UNrLbhR{v9gN+U#SR?@`A)tET=o@;yOIaGacmhBDM!Z1BhiG{lwhH6ZW2KJ67Le zH632F`*Tg)(GXzZ9-=z`NOe%jk8DxhkNS*%eG6IU@7;Er$)GPB?yc`(85`HWJhu61 zYV~8KM4&CVa&;Z==Ed3;*x-%Rot%Xc2b#u?g=kuJrB+6El+_PkTe5dU|AY{=Mi$FA zY}ssH{m$I%d^^>iOtp6;Spy#hKlW1^gT<;fsF9XcH(!@*Z%-!MDI01y>YQFg^ahNu z2$>nHfqkx(wR->=iei4tNX=!==7twEB0Q^}$TRxFpa#Cx!P-KHk$yYUaNyX7myYhd zaOLcjC??b^RvmuqCqI621yZ?@WPYjq1$p<2-TvTbs6!NU(Geu3U-Ind<`1l0y}AeJ z*z}UN$l5^Hb<6L1r2Uh=mb#3gH8WhdA=AEXrXYe2Q7mIIO~(RblT*X{kWt^1st+&n zG&owK^_Q*GWg909eoJt0_~NxKx2(!0mh^Q{KU1k|dwXl^x@n|+S?tikXfs`R$`1ps0o8P&2kOtM->>E?7 zOv205j`{bfZQmA6h?s7flwU!WYJ22r`7?K3DgWYIoHedm{}qZkNwrXyz4zwiGn+R* zGZ}sEx+|We_Vo^?>lgJlocg`SL_tk!Xw=5dTz$T!gQm5oE=KsBJtYOxR-IuWl^USN zj!e&8VP;MH7Ip4KU0a!HiN#9IiDVP~ig$Ywi~U*WpU^!znVd#M>HxP$mJb2PbgEES zR8&`6TvP{+e40B76J3TfQE3oZy&?`{i3KkIh}t-!x42?VjuOA)p=o3DVq8_j?N(A+ zP>|*=EcB*Dj)04<6X~!7vkH8c6oNaR1-ezbvvrq1bpTN0C|It4VeZ;gm~G42SzkwwStCMLMOS?79vOn zl`IQ3ba0r|!BVQQKLhHOIbmK`a(uEOhE{&6K7G|L_36*$?9S$ZHRdQdI35m#_`>&V zHm&tPlF{{b z!9aaO>5BS5X+wP=SV!t!YImoEuO&H`U4F3hLN!+14z598KXIjbtYN;+sms*CM&=yw z`XIx|RJJZVY$wES5OlC^hT8qvuv{$+rlOk|Wv)g9H&;E-fJsq$g1^!77TE>l#+#p| zr2oWHaHsM|^+t8Kei^^KQT<_+M@&{GKd|P<`<1Xg_Cs>ZBl&Zw?FRM0!J7$vlzmF} z94{tq>P&Se$;y6Q-TUP!v$Jn>ir-0 z<*E=Hd-Kva7~MIk`$N(~By|QCe)uv=Oyc1wQ$AGhRbL=ildIKx4qxtc!|+MG?jm!N z;Z~5Hsw)CFJvzRWpPR-*X^2J~z5@!<0v>ZPCD0||evV*HiC5^b9d;RM4#Q;fP0J}+ zFnZkj5qm>RXWrtw>9{ZU?fd%|d-r`Ibf&BKe%Sw(VkdvQ>@YjtZW?A_H^R;vahS>U zt%<#tyQVF{?>Iu*;0t(Www&5O?8`5&-+SbXedLV^)8RYDFM8wBM3J_kDJ z(1}B)ZbP@`052B+XRNLVw9o}$lHCfRqZ{&2H_++fih@ArS;G$i297G+nIxbMAMFB^ z;TUxkgKv||Vj(S%Ab6qMm;igP z*le4T3vt-Gxin3#ddX1w`xn47NRPo7Nh}1Xc8_xtk8_jL!%SMj9bT{FJ7aoYr5A9vOYVY1v ztM?ul$|p4jvZ$Mv!CxX}QO+|)eS!ay>ut`2`0m))H+f+}4$-Yz<8 z1z}n-^sgK$F4Y35;|xriEO?Hod{KMn#n=rRO zNu6ST+4x6rER_PTsYBqQu2VPR=N^}c!nxwPdp|&IV9D0OKvy;lJLe|AGL;1G*AE;y zxOwZ%;y7jG^Y~Dt(4w^_4eUOm2)E5Is|WjEQ6EqrfZv0pkp@T;X@cLzAMN_Gant+M zM)kzi>ig`-vFE#{5lZgw+fBURB-9qATrjoyfYH#u!Lw;xt@{j_{iD^CnXcbbKDtE| z3f)D{_R})oleQFp08?%E$lRK;cvr+A^}e@=W!r*Irh4uA91$l-+#~ zDO;#j{Haj!U)3sp4Y)y`3WoEhpH+Mz!A-!x^1-ly4$NSVQc#3Yo8gz_Y6Ez=ejF8Z zySed^0N4co^L){6i#kxtTDJ=NRa^mSyx5Ru+L&%+s)yGTXjOc8I2%R*|8*<*{=(_w z^kr+*_peq}wedKaT9}jC+aZohjZ2Sf-*aXEzG8QR3nmh%UY`yt0qVVF*T=f8HgN8l zX2~uy>DP9TJNEo)lDeDd$bCeozQ1QYP>x$(YhB$IBbZ+($*lUlzp}b!{(1r#YmPHPKM%Myjm0@OwSl--A3Ho@2Bw81O+K@ApE~2mtQMO<)CcVWM`l`A z;n>qAOaVkU{CDIql;af^Gi*TT$^uBf zAz&y-0v?aP0w7oMC#U50t>H96K)}$SpX|-d1)h$TgMj|M9Hh+%WDKS1_ZlEAWT#gJTF$c1HRdKoNB1pbLAT91Fl46|FCS$h0uU_fCw(FYdy z09%!&+ah&^4pbVbF$1IT{84mWi9Y{#>kUw=LABxkxB;Y96Mv*9&Y>on$|HJc1uBtN z5-Ktb<-u~xJQCrCIjV-#Z`7~U67@K^VFaobs7gRuHCTyW@Nug z9OQ$c0nWz4Ya}??B|xb)z+VHzS{gIVpJA2mOT?yrp$66C;3-m6k6=Q1G`zZ#Q+Q2k}QhSgihVu$(q15f!scuf(;fI>V*|mV#NUQkX)J$N6fiKJQlFf~7==ncN zYRL0R4F#Ob1DJ}qoD@G23qkd{7;7rB!)*Okf}#K0P=r;=nCtk>8ug1%lt*=>;6H;R zpkRSoG6Y2fq%Q3zK@q2of$F3|k(;a=graq!+&x<^c-?C2X>OkNM1(e~c937_o+y4H z&Qb-nNY$xt{AXC?&d*Hs@EXG_Dd!i8AQ!My{S1pZcalI!{T3<#A5k7?G-$_u&N>8&12ke1pb}Ig*kld*f~+7z z(3F`v2ud9lzofXzC{blfW^=sj7Ynn1Ev_8(e;Z zuFmnMX1u2XQ?`ekg@k;R9lqeOLBl1i@9z9TT+&RVU&o{Wj_@qHmi+&#Nq)|Kpc&Y% z=<%ay1^3^<5igr)%V-3s4tHKQz!B68%`*BQ!x3sXs7FP~>Ns*E&?Pp`UdWXNFPyXf zHxMbh8i93G&Djz){r1Vfk4V{GvcA)$k>-_Nofn$js3}l?4p>IfVDxK*XE@RH#D8uX z4cIPVB}G@K==1*!fIx?Z<(x2hN9PuFGy1N1H|iW{1Y)`CF#Z02&={(?BQ} zOe2lV^CzYeFynF42uaa62ys(40zzPeXdsjaMM2bUouDI%K}S5P>9dS<@-;U}fF3jl zHlc8f3I2X!AZe-%l-(P@Z6FQeQ6=$upK@R6S%l1@p!|IoC2x>ZI zuWtcf)7gzxKyjAi^LlbASVmxkXQf^gZD{`*BK_Yrv^A3`%)$;nS#L-j>Y@EgZ9-mH zG_(h5)6YyKJ@~M&6B}5xmO(>%ur{5c)h2EkX;2hZo7w=fu#*O4h8m$Bw~V;{L(2%r z>A0Z{EVFqboAvbzUI!)dS&oOZ$^zAFX?~JNv(BUH5}HYv&VOYl9S4*93oWtMX#Tti zC_RW2iv#A7R+oNm9?geJ%&8f{*d8>Gpk_zFNMm0NCKWl!av&civl|EG(3hbyd2%w- zwnfyWrrv%Ho38qAX<-LC-v5iew#YcyK(8%qhyEZ=pYl%IuhgYzWXgq3+aSmAZ`n!M zY0LH=KOJn*q}*)JZzEH(|Cf*{D?g~!reOZn3nG0s^3`u(60b)CJdFQy8|h#+P<4V* z%U0+A7A9q7{T!2MgL70cFhE!w2#j`;gZ*C*bld3mAyDElnga9X=_j_B%|;VU9Omn4 zfQ)fA%x9bdvu%kmkSzcsBg~P9VaP6&&E0?Vkv?}60PsONZK)j}8S-mme6W!~3!vlE zSU4C6fWhB%Y4;PfNTqC;cXwqE1(1z6IIe{>u6^Rg{=0x>W*S< z99NfLPz$>Oo$;>KqsE^CTX$H8E8gi_IeOxnup{1S9I>@$)d6DGTV1oN#;$bQ-HDBX zaGS*$n^dE2hM|{l)s5tIFs5x}t@_C;yRUxb)vK<2b#wm8wzgII%8Hum75dZn1JBK6 zdyX90LwafFfx3>4{%5R?tn37x#=WrTe=oR+4@rJ{5UfU@$xZxyHIN+-d=Jz;G8*jboR`pY0 zzvS!}x$5Wl97wP)sE%>O73EKU{p^izOwP%-CB&33we?wdS^B@G^tw~%`+L`RZJ1$n z#XBsqp0ma^_NLe@PMe!-rL_NR%dW}GuhX~P*8h@vTkpYn^B?FX6RC8dV0_Wq=VEPf z`I*C)4QwIGHE2Bf+-r^i)o0d-98yo)Nb?iws;^c(%qbo_mE4LkLDlGpgC*Pr zju_&!TOESMQAi%G?@&tWIwc*0V&7_x0lw2XorjYQWJkD8<26#K1bcL9quYczCc5lC zBu_)S>&cT}9{XxoVL@eOLE$jQDhoKzQbFY{SQux~g``D&s{b-|1syh`SrkXKgv4fk z93koi8h7GaQ?fBhTOAXIWmCUNG#b6yIyz|s6r#Ky@-+m8gWF%adqZ)$ueg6lvCmhu z>CTt7SG9|BYpV?YpI>Ry&ZJhhtiA90j%CX_*wl`t#-x|F_f>9w#&(mhxY+lHKk(n1 zY|m_FPi}wdxU;2ImfBin9)r}@Cb56CFI(F26@M2fK=nv>&fe#v^lhfSDAp_B6;3f=XLwg$VBnV7I|B4vYJ;0APzy|eGrYLRKXD&?s!K@Z? zMnysxnYPP{=Z*O+N)7r7dupQlb!U~|U+oAvt8@I-&i>Q*WtB5>`B%3mGqFOir*x+4 zvTn9z!~UHaZqn_}xadr~eG+-4`!ZMm?#!g4?yM~L(WFc@Y3Q6imr_{OnM6}BXsy}`wiZNg>GLen5EHwKKBB5I=vnir(2w`B%=F*lbJ*> z=#Fvwd~S6;XBQ>whtA7V9N>&?Qb5dbtLxkuS+G=97wsy^7u5rT1qy~NoF~e_15P&Z`J*s+Rm=uW18bvmv<0fXb!!mFvv;WV zg!%eXSbL%VPZk%OFUGDQ1rncDWE&g2>=bcaL z?bY46#k78++G+K=+fsltDJNfNB43{Pz*J$l4YD;!m#0z8 zt<-IXWhOe3p^bp+d7ai=0c%tI&=7!!YjAY{ZxIe?Alk|Se!)YZOy&yhfj0z=d{J9F zz?FUUibvnT#0)ffk^yUdd98=FO|Z_7=0JRs_CCDo#wEY{Yt(mInwwku&$if{HY+o{ z6K6I5lfEBgjepx9xS1ot@<4e{qRN)xbK;j(KM7k6^v<+H!H=uNmVKFI^ATL-tVTnUAcPjgX`2k zzhJ@q`uG^O)f`Lb8SLN(%kQV)gUB$CCT>U+1kps2d0u%NoSps(WD5T;Cuy+j8T=z;_=p!)0a7o_OfR5*D0zK`fhEywMx8P1SQ1O*FSN5GHHG;# zJcn7WGlf*?^OE}8lJfLbu*F5lfm{c^-ohIChPA0kz0RGnzNEIcWIg=W6|V>B(~6p` zpJ0fmq`<=A5S(~Cu>U$FlR1pV$1~Q$YSZE;QKh89XBO9l_{KeswsK4(n{N>iA*PcZO?0kBh)S-3ALpdcbAx znYt~|827<8m`8Q*VgYj#&HxPrS_a2CXk2ijIoV5uwThrP-HGn#9evR~>yyr5`SL&^ zC{YChtB(;VxfQO2qaQiAq=A>9-4B^dPF$JHdtC55`bpy83;ZF8fCPuzY*-Wx$sM@E z=l$F!A>PDV#~m*o*UpA7KI?LespgJFXWgxsESIdy%IQx`$|^}s^NB8+T$YiXo+Y^i zkC^B64Nvhb8L!VsOG>MRJ)ps~tn>oW4WxZ3#lD>UbPqLWq~vA;OOofSiTZS}JFCt+ zk*0eJQ&V%YGKm4WSd+Z@g&_io`d;jTDkKMCV{nGi- z+TOLaf*c?_X-NvXym)F!UT0oQMMXdpbn<_6s(Iu(pz@UhN; z!03Vv8}REi=0FYL+ED~goT6LXZ^|ngC_%WP?Q>N)X7Dg@bb;px&sRwN@VQpW0mo2uZKR}w+H8+oF+?$K^js2M|jJWvU|O5q|9&4u1V>%6@o3A1R|MmPVjoF$7s8h8zGMOR z54iGTg}8(qlM6iHjB;hiJJy5qhJ?bN0BjL=WY4W~<-}8>KWEm;-%&j{jy0!`TG0QJ z6*g-pXU8c-(8m}F8C?n671by8W?RazhQw5}52_K(pB|LeFZ3~vnB-WuyEG+Zn2_a? z+C?R<#?WO8rj!Q9!g`K>m>q$0s*jY~;++=Q&p-+$k^3DvWYoImIA>9klw2|VoJtco z(r8-i_ks_D#DEpLqluYTBP?YqH5og{TLe?hs$#=%Vw$QqF1mFSz4?#Vc9mvS1*T_O zZIq_fCVLCrDRjL%A!T?P^LI9}G4*F1m%4Hq^CxX0hWU!2tX8kzsPEp6fco0HR&o?Gk|_lVL0#cp~woWH~6DNQL) zDGhjvGh<`dH0&7`RO^efV&h)TOf4%Zxw@r4vpfkl-vU_X#HQMedSDh8GUhQ@bgM5d zsp=r6v1#^@-y3UBbT4$mo-TuFlv!WUK*mg)Ft5cJD~+1F@cnJj>Cvkv%vr`%==3;r z$pDE^1-Z`SfXG)u>cRg7Az?z?rf)c7`40{J*SPYB*Nnx-*Yxk+NuFc3?lcZPdC-}9 z>JP6$_ig$Pbr`#E2lN=nsGpb)8`E|58td)?j=Ne95o!}uM%=5N%Z0)*hvd-%dlr#YePILS-OiuB7 zyBl7-sQcQkvcmLm;%KRAQtRwwugBeaTI1La1#Y+FV{-9VakMlmy_k}s)Yis8pm9Q) zG$9Q(@dttp$Cp>;CZtWM4~6RWV;s+^58Wr0$Q|L)%J4~v{r7EqamJ;Exx;e1>Lyju zw#4b>^5}3&xm0`~se8fxT6%tdy1LwdQ&pg`F;H=s-2K|0%L9!K!SIgS!xbgX%_T6# zbJ>ZrOjj6v(CbqmtGaZHz*WXI5hZ2QV`J3z2P@plhL-LYO7vMUK;wihV~WDJ_G-&^ zAP*q`?p=ZG1OSLY3(sw6@zAQ_4~z`HwF@3={q&)HgZzfaJOlZrJDsRyDTIZ1B1ORu zu&M2--J$8{Y+qAs4bFaO{u3vjUN-f0qQCDU66&roS%`De4RhsdPwZ6RQ6GU7lWh|} z+Z=cP?$ZPPpB;U<_b+D?$J=*(Ob*<)$|J+p_e9&`aWni5*jQwY6Rgm+GMQjAY+|;$ z?9A%f7l$%k$xq)ZUL`JjP)r6t;R$O$n5S-{#g}Z64u+?1Z!j2g*G+0XxACl-r?a5z zl5?Bao-$st=9|(hgS{^uxqah-UEwv;Yk|;vj$fFxyzRBOj;Lq1&Pixmv9e(8PI>K! z-fcUksL$MJiP`WnI5am1wUy>H>d36MXV~?O84^rJM`f09rn7zXvdSCYbSnY3Ehp>L z=ccHei@Q>1?Z~oL)H+A+%hLvq_y|IqE*6>+Hz;8Y0mh7AaE@zZ7(swMgfh@7FM%2k z`yREtMOdr%N*GEuxAypr$Vb_}Bu~P~LOprv%VT!|*IYs6@O=8UC87QP z1$#dAI4pWvkT*IeX>7;Ht|UumUf~&)g@qNvVB}#fA1J#J#vF#>m;icC zI&IV^GaGzQkPxW()gpWUH-^=gl2QM{^l4~&P_>QijyBq>|{pPR0 zN)HHy@0)Iz*MGA*`8@S#-y7>ZWEjh}V-C9!9tw0g?2sjor#oN73 zlLgGe%A#OvlD8q?hMnYecdR=7w3aF0R?Tbm7mp$F%e_D?lDJlBP4pEHD+;!ej`K_Y za5S~5XU*zs$bK z!UEtn2TSEkJv2j1-ky(gJe2ghV)6-@l7yV;*V{v-dG;_>fhClZ@bDDx5B2c7=aEy`?MB^S7L@U8Bt2qNA1Na>Iv>V;}qslZvgMWuqe|))E$(4 znNnm|?*+H)^iYt}GxdVxb+oV9IJ18`Y}aIl_EjNhlHL;+g1d&fIvcDO_w&6#Y|Ur&E`?6Mjqt7t?;4$bP6m5$c(vUZZ% zT975z6y=uR{m`QM4=;wE&3o#h{w?Zk@@;1@Uv3qPCH1axBY_Nf8g&Ey3(YCvDqHgYWj&DxU4tO#28)-{n z02z)sC;e*AfR{#l0&Ngl?w}?JL*rAAsyXVB^VDzq4xj0v2gu{l_}U*Ukv!q4L zD{Ak5UMeabu4ETA33sJt#H$}W!^z-OcoIOU$#BIoYl7MM@I?5H*)|f~44PJ0EXm+P zck-whyE$gd1dGFDdU&Zd!D!l9WwGiF_gh48!kA{YxpRF}W8)Hi_BhghUdhr&m!%Hx zId#>wbQ8;bPCZTSU;5_xrEGF}TQE=VD9#Fx4CRJf#N&-|L2q&jxiKwEZ$8CjAgp%1 zjg6aY(~oYm2<3XG#hxe>w^{Y0=ESmbXW3ZINK2;6mS$6PQZr%?8GOmU_+n!kkhYtk z!Crh~lX-{XTp02$)P!NrVa}IshxQ!}`n(?sx6M07_Y_xts4n{QD|PNWa?RA%_-wJIM_IIV z!;5@ zkgs6zF)+_$Ac!khEAb}p-ui?T?%8N;N#j2 z>=o;T`z<(0?+rFeg3h`$_ltbu&*T$j%IO|M(b7yt|qlH|pEN$_UbSUde&)Bdug3%YJ^am272tLu_#g470_U#{Q{d zw31)m6;ch>TG04^V2QSPH{DxiggmP@wOUeba%WRip>eB*+wcR0_6t@GuvKcGi8?=$ z3z6sQN=u)wIaplHkot0 zX_X>)1C9|8*IL@pR#wtf2cMdxVu2^Y+}~{2B6xCfbJq+J5RP<3 zhRRtpsd2VXgJ$mhfZHvDrq9r)4`f5c#L*Ahgy(sU&}_*}zL#^LodFhu_YiJGo@_XB&GiO( z^6lKBerrb2@WRqIrRku>Y`10?*-W9bFNzE?W@{|**v!ybiaioPtT50XYw^^T#JX8P z{nNcmA6Ptgn8VTGa0b>~Il6NRZ*2oMBj}{87u3^LRntkm@h!zt(}c>z(lldiX`wOR z5@Q-4e}m+U2Tzz#X13IDoLO&)S?GgSJ#LsgFFr1zUnJ?%Zy!@LQLwR!bxUSK$1v{1 zQggCloh}hjs1odNoc{lXc~>162(FW$2wLOG1~`lb@r;WA6lNO7p+RRkMkAOIgUA%! zV=;Y_*?${=?zdnHeE~DHIef!0%yzySI{q5RDN}B-)hGnZ9?3UaP2d|o9^6Tqt?EaW zg~7Ju6rO$H#D-21^zZDw;DZFpp5Rt@QAKf}jch6|v&GLYDPsdaxUnNN0`^h8t*SBkrXZkt3GBh1rCSdp!Y}g$f_6Z0 zt;2@XSA57JThxp~16a>m#%T^#c%9NkuEC5Jxh>6W8q`*Yrn7L!B9hkwuY`5lV1|YX zyxvACt~UDv@qxDI)tSenjp6c9JHQM8nnB!51jhvsC^{6BRdl!rz$Uu%ACnU#$vm(= zqd@Up45YL3uTk%Wr5I_|{)~{v4ofktj*h^nUMi0sRk>guxH5Ugl?=Y564M;%Lh3}p zWp||2`cp%(R%?Pi0RoQ-R!w)()PyOE%{Jex-3#a0Y`$*maBBsT4Z@b2jrrr)fciZT; zYd|;W_Y{|1v~TR)r0UI%zIt~_W=fjdW;0n`Ig>o<%!!qIQe$EWiBY^Mqnk?ZtcxL^ z7|k|!Nsbd(rL=&FISTaMJ?UA#)D(|kn&nAN^QWhJ-FgN@D58@DeVJyr9%?QluoTvB zhIKREl$s=q0G(%8kLDYmNABrNjW@;%0`FdCIBUJ7c5rnYpH`k!0|ZdXnJEeNm1QO1 z1L0pq5wJ}qby@#sFwnjmHt$#Tf2YotCf!y~-}4p3#N3&> zpl<4>A3v>bfMqq$Y%L7Z-FClCy%tDy*@qi_siG<4MQP#JLG>t)de#JP8OiczX$zaX z@2JS%*+;7zX_u&s9jk=8SmR9hsG<@NwI~f6rm3fuhDgz@E|Cm-me#0@IOgOQT>~ot;JYDj2d-Qhvgm*? z&@OUCmVxX@+`;w0Jyf+Wmo`fZ+2|7WpeA^$<(bLxk3bv){6!G8%=f+Wx5R-~4S~^X zJ*-g}XsPfb$p~lYGh7*3H_KpzNA5^s^-4I)Ww5cTOT7CQPTP~?^j$J_(e?3H-d4`_&X=&~w;9|1m_&di-I!jAC`JcXY1s{f;;qFq}V4}Qn!Lc*=Uh0bs3mV%N zEND}@`d0S8TuvV9|1;g$KTkh4drx=ozLZ{YT|7GB+TKYQXZ5wlCr;Eqc6@KoISZ$C z%-y=+=Jv^x+uJ8kX=|R|J>r^Wgl0Xme(mGAaqc4`Om=;t9DKnhRoJT0|da5WR8 zHo)&n_~WTw0_o;oy%L`B-1EWHp?CN_A0YKw`2Bh8{2PX1=P~fuOSBSQ2{~J#D}{e+ zw0|X#hM{sTfihhRVN2oe9FScKDZo-I{TFX9hUd{`HWb4Ocz+e7lb1=C_BA}Go3!w} zW{rijxe#Uzq;5G#8z_rjc%uidmuU5ee<#lk{!Po_d?m!h!;Xa-*bc`|t)}oYABwsC zq!^0e*)P=(UPpZYR?7Id@UPn+feMWf439v_@Fg*!rcb#jTY@3pEbcV9)1!W+u$h=!{eR< zslwdh`7}p&D&!2$*A?)Or;*=X0A;%jTH6A+kF{znobmkp_w&^UrH8Q&yt_gxiynx5 zF8uXCth`Ak>@Ud&g&`1lI3tc zfCp>g-YV_ORzVp49oS-dim+u5)rz0ix1Y9$6_8?Hvdci~3aA@AZM|^h-#8CiLMPO) zc0hC3h0OmuF&DlP^t^Tv^}|kuo0Kp>Rs#%)J9V&h6pZsE7;Z>`rI=pbR^3goaN`14 zWTl4@FC%yaGLsl$flVZQk#HPr1Ga-73b$svA{3p{(9nv5Z1Nf#MM#*+zTBAG-clPTcXcpB*@ z)5#1nlguJLWHy;Y=8}12K6t%aNEVS^vKW{`my%`RcX>H%>RU-xk=0}kIR0A;?$=Hy zXOMN|OmY@kPtGRikPT!b*#x|z=aTct`Q!q!g={4kl5N0pvYlK^E+M}omy+L;%gE(q z2iXapwXPsn0te}Ca6z?)TtluU*OBYVUb2tuhcy>BkUs!_^-bhvatjQ?-$rgHcaS^D zUF2?Z54jgSE!+>>r3cA_z;OLA_+NUIJVqWTPr%^JQ_$mkhCB;yo1Q0!$P2Lj`X%s$ z_6lq-e3iUL{zP6UhsmGG8{|#$7xGu~7Wo@_o4iBbCGU~<$p_>^@)7x%d_q1YpOMeW z-^mx`2>B8Q@V4^O$+zSv`Hp-~ejq=RW8^sL1BZwzIYD(W?hn2jfuaE>=z!71 zM9nmYTBw!AQX7o}h8H_cfW<*h>Y|C%O_N};QVR7@FRW}zqv_N~GiWBwqS-Wu`hg23 zkLJ??T1bm%G5Go~r2!hGAu3WJ8KVjf1D|&}t)RnbB^^$yXf>^&wX}}b(+1i|n`ko~ zL0f1mZKLh912|nf=_pv5G=`3)U345BPbbicbP_PVPN7rjG}=w4(;0Looke?KmD3#H zf1O9?(*<-PT||57V!DJbrOW6kzzVy9uB5BzYPyD=O4riU=;`zfx(>Kv&!X$;+4LN` zfo`Om=w^B@J&&GGFQ8lKR(c`bMlYh<>BaOC`a60l{XN(ym(v||C*4J_pjXnX=x%y7 z-9xXT*V60g^>i=YNB7eM^alC|dLzAw-b`Lgw^e^$){fd4~|3SZ@ z|D@m2qx3uaJ^g|HNRQFuw2$^vm7V}-Ai$2nm;mb=46vZX#LO&)S%3y5mf2Vwi)VJ0 zz#PoUTr82fVYyl|OJN@7WvMKU0c#L+6EayA%Vs&u&vIEF%V!0wkQK3FR>DeIfCX8I ziA-WLQ&^alv2s?yhOtUu<*j1X3|70bI#$mbSR-p<&1?i~VXdr7XQ zvTf`lww+zfE@8i8m$Kip%h=^?2iwVZu`Ae>>?*dKUCs8eYuL5yI(9wV%l5JT>;Sug z{ej)cZelmHTiC7aHg-F^gWbvQVt2E9*uCsNc0YT79b^x(huFjH5pbdR7<-&O!JcGK zv8UNH>{<34d!8L)FR&NcOYCL#3i~5_mA%IP#9n8I*`L`P>`nF;_E+{6`x|?ky~EyR z@3HsU2kb-k5&M{Z!ail6vCrAx*%#~x`;vXdzGnYm->`qOZ`o1y9s8dBznqWY(gARqS%E5!67&W7}^rtLXwayqzE3t zE2Ii(Lb~7+GK5SaOUM>-1iz3gJV( zVM3)aT&NPNg&Lt&s1xdi2BA@C5}Jh(LW|HUvxT<$OF_wQTkhd{(YRj^W6$ zEONv+g0%z3TJ63V95~kDbt66(8zc7{2kzJ7^QJ)Lh-s1PLxvH-GGM9F4suX?D%W8u z;*FyFI_Sl11J%} z;fSDcB*Jq1aHTd9aV>tbQX5HFZ6thMBtdnN1l3CzZ@nBjM(#I6?ng3QX^JGUDI%vS z622)Ce+lIFjLEITBuugp(ubk|W`j$o=xj{YXU(mq(sQ@-bY8`5Lanbe2ct zmy7sbUF3V~0+`;q0KQ)r#P6&N;&;|X3ce20AqGS(UN~yyB8E^d9LufH$O3_oI6V|- z6!Ag~L|+7>FGA55C%q#@Kary^l<13a^hH_pMY(ptt4C8~QM&$I9vbj~iD<+mHDPj_FiB0Aq$W&K6DFw%lhlOrnowR7%4lokX9Nhm0i$udfiQG$#TWRxJI z1Q{jBC_zREGD?u~i)0ieqaYat$tXy{FH-P}6qKi+JO$;!0t=o%1?4FyPeFMK%2QCD zg7OrUr=UCqQc%kfDX1k^LST^)93{jTDX7Vlf*4sa5}B4E5`vBt(mqTIVbme* z!?BSA=XKVbrNEogjgpb)=7wU5@MZ%SSKOKNr-L|qML;1CLy{>h;9<1n}p~lA-YM3 zZjyvwgdis&$Vn1@5rUi~Yjr|GtdkJyB*Z!iu}(s)lMw49#5xJFPC~4c5bGqwItj5( zLadVz>mtdkJyU@l85Bg8sb$&FW7MhJ2ef}DgPCn3m52x}6;nuM?hTbQ)h z5y4A%aO^EcY;$dl+*mj1>&gO+s{&5Zxq1Hwn>ALUfZ5-6TXe z3DHeLbdwO>Bt$o<4CNuZNr-L|q8rfnX{knZlMvk`L^r7%zYh&1sT{wr9KWv|D|9(l z=yI*WO66D!%28f9eqT9$UpamsBBq3hDIsD?h?o*0ri6$oA!161m=YqUgor62VoHdZ z5+bIAP$?l)N(hw_LZyUIDIru!2$d2-rG(%pAvj71juL{Sgy1M4I7$eP5`v?I;3y$D z@~$6m{RoZ{f}>QASb!KQ)nnEnR7&-jb%>Z!J!V}!W?emIT|H(U+PhK%7F7d6LIcWc zzzW@fMb&`v8nCDuP+kKTRRhXvz@lnEc?~G90p&HIyap^P1aS#LTtX0+5X7ZMl!sU@ zA(l&sWz+~VY6KZIf{do79Mm!rc*uB($*3P>G(2Tg6LL_?K^e7$jM_p*Z6Tw! zkWpL6s4ZmF7BXrJ8BJ6fwS|n@LPl*NqqdMyTga#_WYiWiY6}^)g^b!l4rwJOqtcL3 zO~|MwWKjD7GR3idP< zEMf(F8VdF_6zpjz*tbxy_!TUE1&d$7;#aWT6)b55OIpE_R4A5uuv5& zR0T^@!O~Q)G!-mO1xr)G(p0eg6f8dl%TK}bQ&8J0Sa1p!9IR^5z7`8k!4gxjs1z(J z1&d0-qEfJ^6f7zQi%LO_u3+y)LB+114p&fzE2zU2)Zs9Uq=`ix4ueQ|r9lD=A>ow< z2@2|P1+|lc`ba^2q@X@hP!TDpffUp^3Thbz)r*4aMM3qVpk7f>p(vgDjQ0Msp>O4OH(((gjD?dQW_yMw&A0S)#0kV}Jv}_eoBZ#O9 z#0aQj;w4PAgciK^I;a;kf+e(mB^)n+!8gnTY%-`!6;z)JDn$jAqJm0ML8YjmHdIg> zDyR(=l&7FlR8Z9^sN)pWaSG}<1$CT)I!?rvwpHjj;rHpp6j4ek-n@>iYPez+hMw?GT=?a=Z3YtF(+C2)|Jqp?=3fd?N z+9(RzC<@vrBI*Yb;X*`!5K$F~h!e1CO8Y)+uwult#X%AYp-V#Ok`THigf0o8OG4<9 zaF9g8K@tgpOhV}rqLhRqAQC157GQF*685Jh>`zPBpO#U&jM8P4E~8i(#mXpFMzJ!A zl~JsWf@HLsWwe@Qw3=nKnq{<_WlRJxT=KNYI6|OcoC?ON;5R7PHBrz~RnSCL(Bf3k zxKttopHOl*5FCsQ6bB;%$&mqB1(TN8i=~7U;55A`Aw4~dmqC0u=T9JRgYH@_1cAE*D0eI{pYMm2MisCIVNv80-?@<@Gk9a!{FJ*!Lv<+XPXDl&K*2EZ}9B=!Lthn&$bMnZPm`e5j4oN zK{|QrF>PnUx2M4Ns}k6{SqJMD#sh22LRi@VA?ZKit(&y7FX8Mu?d%_LcDr_V1kP^L z&c24TJ=)n bool: + """True iff `expiresAt` lies in the past (UTC).""" + if self.expiresAt is None: + return False + return float(self.expiresAt) < time.time() + + @computed_field( # type: ignore[prop-decorator] + json_schema_extra={ + "label": "Verbraucht", + "frontend_type": "checkbox", + "frontend_readonly": True, + "frontend_required": False, + "frontend_format_labels": ["Ja", "-", "Nein"], + }, + ) + @property + def usedUpFlag(self) -> bool: + """True iff `currentUses >= maxUses`.""" + return (self.currentUses or 0) >= (self.maxUses or 1) diff --git a/modules/datamodels/datamodelViews.py b/modules/datamodels/datamodelViews.py index aca32f56..7a327fd8 100644 --- a/modules/datamodels/datamodelViews.py +++ b/modules/datamodels/datamodelViews.py @@ -23,6 +23,7 @@ from modules.datamodels.datamodelMembership import UserMandate, FeatureAccess from modules.datamodels.datamodelBilling import BillingTransaction from modules.datamodels.datamodelSubscription import MandateSubscription from modules.datamodels.datamodelUiLanguage import UiLanguageSet +from modules.datamodels.datamodelRbac import Role from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutralizerAttributes from modules.shared.i18nRegistry import i18nModel @@ -197,3 +198,114 @@ class DataNeutralizerAttributesView(DataNeutralizerAttributes): # Manual registration for non-PowerOnModel view MODEL_REGISTRY["DataNeutralizerAttributesView"] = DataNeutralizerAttributesView # type: ignore[assignment] + + +# ============================================================================ +# Role view — admin RBAC list with computed `scopeType` + `userCount` +# +# `scopeType` is computed in the route from (mandateId, isSystemRole). Exposed +# here as a pure `select` field so the frontend renders the user-facing label +# from `frontend_options` (no hardcoded mapping in the page). +# ============================================================================ + +@i18nModel("Rolle (Ansicht)") +class RoleView(Role): + """Role extended with computed scope information for the admin UI.""" + + scopeType: Optional[str] = Field( + default=None, + description="Computed scope: 'system' (template), 'global', or 'mandate'.", + json_schema_extra={ + "label": "Geltungsbereich", + "frontend_type": "select", + "frontend_readonly": True, + "frontend_required": False, + "frontend_options": [ + {"value": "system", "label": "System-Template"}, + {"value": "global", "label": "Template"}, + {"value": "mandate", "label": "Mandant"}, + ], + }, + ) + userCount: Optional[int] = Field( + default=None, + description="Number of users assigned to this role (via UserMandateRole).", + json_schema_extra={ + "label": "Benutzer", + "frontend_type": "number", + "frontend_readonly": True, + "frontend_required": False, + }, + ) + + +# ============================================================================ +# Automation Workflow — dashboard view with synthesized fields +# ============================================================================ + +from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow + + +@i18nModel("Workflow (Ansicht)") +class Automation2WorkflowView(AutoWorkflow): + """AutoWorkflow extended with computed dashboard fields. + + Used exclusively for /api/attributes/ so the frontend can resolve column + types for the workflow dashboard table (FormGeneratorTable). + """ + + sysCreatedAt: Optional[float] = Field( + default=None, + description="Record creation timestamp (UTC)", + json_schema_extra={ + "label": "Erstellt", + "frontend_type": "timestamp", + "frontend_readonly": True, + "frontend_required": False, + }, + ) + lastStartedAt: Optional[float] = Field( + default=None, + description="Timestamp of the most recent workflow run start", + json_schema_extra={ + "label": "Zuletzt gestartet", + "frontend_type": "timestamp", + "frontend_readonly": True, + "frontend_required": False, + }, + ) + runCount: Optional[int] = Field( + default=None, + description="Total number of runs for this workflow", + json_schema_extra={ + "label": "Laeufe", + "frontend_type": "number", + "frontend_readonly": True, + "frontend_required": False, + }, + ) + mandateLabel: Optional[str] = Field( + default=None, + description="Mandate name (resolved from mandateId)", + json_schema_extra={"label": "Mandant", "frontend_type": "text", "frontend_readonly": True}, + ) + instanceLabel: Optional[str] = Field( + default=None, + description="Feature instance label (resolved from featureInstanceId)", + json_schema_extra={"label": "Feature-Instanz", "frontend_type": "text", "frontend_readonly": True}, + ) + featureCode: Optional[str] = Field( + default=None, + description="Feature code of the owning instance", + json_schema_extra={"label": "Feature", "frontend_type": "text", "frontend_readonly": True}, + ) + isRunning: Optional[bool] = Field( + default=None, + description="Whether the workflow currently has an active run", + json_schema_extra={ + "label": "Läuft", + "frontend_type": "checkbox", + "frontend_readonly": True, + "frontend_format_labels": ["Ja", "-", "Nein"], + }, + ) diff --git a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py index 05473fc7..5ebf629e 100644 --- a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py @@ -98,7 +98,12 @@ class AutoWorkflow(PowerOnModel): isTemplate: bool = Field( default=False, description="Whether this workflow is a template", - json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Ist Vorlage"}, + json_schema_extra={ + "frontend_type": "checkbox", + "frontend_required": False, + "label": "Ist Vorlage", + "frontend_format_labels": ["Ja", "-", "Nein"], + }, ) templateSourceId: Optional[str] = Field( default=None, @@ -108,18 +113,43 @@ class AutoWorkflow(PowerOnModel): "frontend_readonly": True, "frontend_required": False, "label": "Vorlagen-Quelle", - "fk_target": {"db": "poweron_graphicaleditor", "table": "AutoWorkflow", "labelField": "label"}, + # Soft FK: holds either a real AutoWorkflow.id (UUID, when copied + # from a stored template) OR an in-code sentinel like + # "trustee-receipt-import" (when bootstrapped from + # featureModule.getTemplateWorkflows()). Sentinel values do not + # exist as DB rows by design — orphan cleanup MUST skip this column. + "fk_target": { + "db": "poweron_graphicaleditor", + "table": "AutoWorkflow", + "labelField": "label", + "softFk": True, + }, }, ) templateScope: Optional[str] = Field( default=None, description="Template scope: user, instance, mandate, system (AutoTemplateScope)", - json_schema_extra={"frontend_type": "select", "frontend_required": False, "label": "Vorlagen-Bereich"}, + json_schema_extra={ + "frontend_type": "select", + "frontend_required": False, + "label": "Vorlagen-Bereich", + "frontend_options": [ + {"value": "user", "label": "Meine"}, + {"value": "instance", "label": "Instanz"}, + {"value": "mandate", "label": "Mandant"}, + {"value": "system", "label": "System"}, + ], + }, ) sharedReadOnly: bool = Field( default=False, description="If true, shared template is read-only for non-owners", - json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Freigabe nur-lesen"}, + json_schema_extra={ + "frontend_type": "checkbox", + "frontend_required": False, + "label": "Freigabe nur-lesen", + "frontend_format_labels": ["Ja", "-", "Nein"], + }, ) currentVersionId: Optional[str] = Field( default=None, @@ -135,7 +165,12 @@ class AutoWorkflow(PowerOnModel): active: bool = Field( default=True, description="Whether workflow is active", - json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Aktiv"}, + json_schema_extra={ + "frontend_type": "checkbox", + "frontend_required": False, + "label": "Aktiv", + "frontend_format_labels": ["Ja", "-", "Nein"], + }, ) eventId: Optional[str] = Field( default=None, @@ -145,7 +180,12 @@ class AutoWorkflow(PowerOnModel): notifyOnFailure: bool = Field( default=True, description="Send notification (in-app + email) when a run fails", - json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Bei Fehler benachrichtigen"}, + json_schema_extra={ + "frontend_type": "checkbox", + "frontend_required": False, + "label": "Bei Fehler benachrichtigen", + "frontend_format_labels": ["Ja", "-", "Nein"], + }, ) # Legacy fields kept for backward compatibility during transition graph: Dict[str, Any] = Field( @@ -189,7 +229,16 @@ class AutoVersion(PowerOnModel): status: str = Field( default=AutoWorkflowStatus.DRAFT.value, description="Version status: draft, published, archived", - json_schema_extra={"frontend_type": "select", "frontend_required": False, "label": "Status"}, + json_schema_extra={ + "frontend_type": "select", + "frontend_required": False, + "label": "Status", + "frontend_options": [ + {"value": "draft", "label": "Entwurf"}, + {"value": "published", "label": "Veröffentlicht"}, + {"value": "archived", "label": "Archiviert"}, + ], + }, ) graph: Dict[str, Any] = Field( default_factory=dict, @@ -281,7 +330,18 @@ class AutoRun(PowerOnModel): status: str = Field( default=AutoRunStatus.RUNNING.value, description="Status: running, paused, completed, failed, cancelled", - json_schema_extra={"frontend_type": "text", "frontend_required": False, "label": "Status"}, + json_schema_extra={ + "frontend_type": "select", + "frontend_required": False, + "label": "Status", + "frontend_options": [ + {"value": "running", "label": "Läuft"}, + {"value": "paused", "label": "Pausiert"}, + {"value": "completed", "label": "Abgeschlossen"}, + {"value": "failed", "label": "Fehlgeschlagen"}, + {"value": "cancelled", "label": "Abgebrochen"}, + ], + }, ) trigger: Dict[str, Any] = Field( default_factory=dict, @@ -362,7 +422,18 @@ class AutoStepLog(PowerOnModel): status: str = Field( default=AutoStepStatus.PENDING.value, description="Step status: pending, running, completed, failed, skipped", - json_schema_extra={"frontend_type": "text", "frontend_required": False, "label": "Status"}, + json_schema_extra={ + "frontend_type": "select", + "frontend_required": False, + "label": "Status", + "frontend_options": [ + {"value": "pending", "label": "Wartend"}, + {"value": "running", "label": "Läuft"}, + {"value": "completed", "label": "Abgeschlossen"}, + {"value": "failed", "label": "Fehlgeschlagen"}, + {"value": "skipped", "label": "Übersprungen"}, + ], + }, ) inputSnapshot: Dict[str, Any] = Field( default_factory=dict, @@ -464,7 +535,17 @@ class AutoTask(PowerOnModel): status: str = Field( default=AutoTaskStatus.PENDING.value, description="Status: pending, completed, cancelled, expired", - json_schema_extra={"frontend_type": "text", "frontend_required": False, "label": "Status"}, + json_schema_extra={ + "frontend_type": "select", + "frontend_required": False, + "label": "Status", + "frontend_options": [ + {"value": "pending", "label": "Wartend"}, + {"value": "completed", "label": "Abgeschlossen"}, + {"value": "cancelled", "label": "Abgebrochen"}, + {"value": "expired", "label": "Abgelaufen"}, + ], + }, ) result: Optional[Dict[str, Any]] = Field( default=None, diff --git a/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py b/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py index 8cdb18c6..c84db9d3 100644 --- a/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py @@ -7,6 +7,7 @@ Uses PostgreSQL poweron_graphicaleditor database (Greenfield). import base64 import logging +import time import uuid from typing import Dict, Any, List, Optional @@ -278,6 +279,7 @@ class GraphicalEditorObjects: "workflowId": workflowId, "label": label, "status": "running", + "startedAt": time.time(), "nodeOutputs": _make_json_serializable(nodeOutputs or {}), "currentNodeId": None, "context": ctx, @@ -314,6 +316,8 @@ class GraphicalEditorObjects: updates = {} if status is not None: updates["status"] = status + if status in ("completed", "failed", "stopped", "cancelled") and not run.get("completedAt"): + updates["completedAt"] = time.time() if nodeOutputs is not None: updates["nodeOutputs"] = _make_json_serializable(nodeOutputs) if currentNodeId is not None: diff --git a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py index dc136395..0dccfb36 100644 --- a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py @@ -526,9 +526,17 @@ def get_templates( instanceId: str = Path(..., description="Feature instance ID"), scope: Optional[str] = Query(None, description="Filter by scope: user, instance, mandate, system"), pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"), + mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"), + column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"), context: RequestContext = Depends(getRequestContext), ): - """List workflow templates with optional pagination.""" + """List workflow templates with optional pagination. + + Supports the FormGeneratorTable backend pattern: + - default: paginated/filtered/sorted ``{items, pagination}`` response + - ``mode=filterValues&column=X``: distinct values for column X (cross-filtered) + - ``mode=ids``: all IDs matching current filters + """ mandateId = _validateInstanceAccess(instanceId, context) iface = getGraphicalEditorInterface(context.user, mandateId, instanceId) templates = iface.getTemplates(scope=scope) @@ -537,6 +545,16 @@ def get_templates( from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow enrichRowsWithFkLabels(templates, AutoWorkflow) + if mode == "filterValues": + if not column: + raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") + from modules.routes.routeHelpers import handleFilterValuesInMemory + return handleFilterValuesInMemory(templates, column, pagination) + + if mode == "ids": + from modules.routes.routeHelpers import handleIdsInMemory + return handleIdsInMemory(templates, pagination) + paginationParams = None if pagination: try: @@ -1242,9 +1260,17 @@ def get_workflows( instanceId: str = Path(..., description="Feature instance ID"), active: Optional[bool] = Query(None, description="Filter by active: true|false"), pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"), + mode: Optional[str] = Query(None, description="'filterValues' for distinct column values, 'ids' for all filtered IDs"), + column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"), context: RequestContext = Depends(getRequestContext), ): - """List all workflows for this feature instance.""" + """List all workflows for this feature instance. + + Supports the FormGeneratorTable backend pattern: + - default: paginated/filtered/sorted ``{items, pagination}`` response + - ``mode=filterValues&column=X``: distinct values for column X (cross-filtered) + - ``mode=ids``: all IDs matching current filters (for "select all") + """ mandateId = _validateInstanceAccess(instanceId, context) iface = getGraphicalEditorInterface(context.user, mandateId, instanceId) items = iface.getWorkflows(active=active) @@ -1272,10 +1298,19 @@ def get_workflows( "runStatus": active_run.get("status") if active_run else None, "stuckAtNodeId": stuck_at_node_id, "stuckAtNodeLabel": stuck_at_node_label or stuck_at_node_id or "", - "createdAt": wf.get("sysCreatedAt"), "lastStartedAt": last_started_at, }) + if mode == "filterValues": + if not column: + raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") + from modules.routes.routeHelpers import handleFilterValuesInMemory + return handleFilterValuesInMemory(enriched, column, pagination) + + if mode == "ids": + from modules.routes.routeHelpers import handleIdsInMemory + return handleIdsInMemory(enriched, pagination) + paginationParams = None if pagination: try: diff --git a/modules/features/trustee/datamodelFeatureTrustee.py b/modules/features/trustee/datamodelFeatureTrustee.py index 70e02c45..ad85105e 100644 --- a/modules/features/trustee/datamodelFeatureTrustee.py +++ b/modules/features/trustee/datamodelFeatureTrustee.py @@ -837,6 +837,43 @@ class TrusteeAccountingConfig(PowerOnModel): chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt", "frontend_type": "timestamp"}) mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"}}) +@i18nModel("Position (Ansicht)") +class TrusteePositionView(TrusteePosition): + """``TrusteePosition`` extended with computed display fields for the table. + + The route enriches each row with the latest accounting-sync state so the + frontend can render `syncStatus` (select) + `syncErrorMessage` (text) via + `resolveColumnTypes` instead of hardcoded label maps in the page. + """ + + syncStatus: Optional[str] = Field( + default=None, + description="Latest accounting-sync status for this position.", + json_schema_extra={ + "label": "Synchronisierungsstatus", + "frontend_type": "select", + "frontend_readonly": True, + "frontend_required": False, + "frontend_options": [ + {"value": "pending", "label": "Ausstehend"}, + {"value": "synced", "label": "Synchronisiert"}, + {"value": "error", "label": "Fehler"}, + {"value": "cancelled", "label": "Abgebrochen"}, + ], + }, + ) + syncErrorMessage: Optional[str] = Field( + default=None, + description="Latest accounting-sync error message (if syncStatus == 'error').", + json_schema_extra={ + "label": "Fehlermeldung", + "frontend_type": "text", + "frontend_readonly": True, + "frontend_required": False, + }, + ) + + @i18nModel("Buchhaltungs-Synchronisation") class TrusteeAccountingSync(PowerOnModel): """Tracks which position was synced to which external system and when. diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py index ebef127c..2c9c3328 100644 --- a/modules/features/trustee/routeFeatureTrustee.py +++ b/modules/features/trustee/routeFeatureTrustee.py @@ -30,6 +30,7 @@ from .datamodelFeatureTrustee import ( TrusteeContract, TrusteeDocument, TrusteePosition, + TrusteePositionView, TrusteeDataAccount, TrusteeDataJournalEntry, TrusteeDataJournalLine, @@ -209,6 +210,7 @@ _TRUSTEE_ENTITY_MODELS = { "TrusteeContract": TrusteeContract, "TrusteeDocument": TrusteeDocument, "TrusteePosition": TrusteePosition, + "TrusteePositionView": TrusteePositionView, # Read-only sync tables (TrusteeData*) and accounting bookkeeping "TrusteeDataAccount": TrusteeDataAccount, "TrusteeDataJournalEntry": TrusteeDataJournalEntry, @@ -979,29 +981,16 @@ def get_documents( def _handleDocumentMode(instanceId, mandateId, mode, column, pagination, context): """Handle mode=filterValues and mode=ids for trustee documents.""" - from modules.routes.routeHelpers import handleIdsInMemory + from modules.routes.routeHelpers import handleIdsInMemory, handleFilterValuesInMemory, enrichRowsWithFkLabels + interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - from modules.interfaces.interfaceRbac import getDistinctColumnValuesWithRBAC - from modules.routes.routeHelpers import parseCrossFilterPagination - from fastapi.responses import JSONResponse - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) - crossFilterPagination = parseCrossFilterPagination(column, pagination) - values = getDistinctColumnValuesWithRBAC( - connector=interface.db, - modelClass=TrusteeDocument, - column=column, - currentUser=interface.currentUser, - pagination=crossFilterPagination, - recordFilter=None, - mandateId=interface.mandateId, - featureInstanceId=interface.featureInstanceId, - featureCode=interface.FEATURE_CODE - ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) + result = interface.getAllDocuments(None) + items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in (result.items if hasattr(result, 'items') else result)] + enrichRowsWithFkLabels(items, TrusteeDocument) + return handleFilterValuesInMemory(items, column, pagination) if mode == "ids": - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllDocuments(None) items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in (result.items if hasattr(result, 'items') else result)] return handleIdsInMemory(items, pagination) @@ -1181,6 +1170,51 @@ def delete_document( # ===== Position Routes ===== +def _buildSyncStatusByPosition(interface, instanceId: str) -> Dict[str, Dict[str, Optional[str]]]: + """Build a positionId -> {syncStatus, syncErrorMessage} map from + `TrusteeAccountingSync` records for the given feature instance. + + Preference order matches the historic UI logic: ``synced`` overrides + ``error``, so a successful retry hides an old failure. Any other status + (`pending`, `cancelled`, ...) is kept verbatim. + """ + from .datamodelFeatureTrustee import TrusteeAccountingSync + + syncRecords = interface.db.getRecordset( + TrusteeAccountingSync, recordFilter={"featureInstanceId": instanceId} + ) or [] + + syncMap: Dict[str, Dict[str, Optional[str]]] = {} + for rec in syncRecords: + positionId = rec.get("positionId") + if not positionId: + continue + status = rec.get("syncStatus") + errorMessage = rec.get("errorMessage") + current = syncMap.get(positionId) + prefer = ( + current is None + or status == "synced" + or (current.get("syncStatus") != "synced" and status == "error") + ) + if prefer: + syncMap[positionId] = { + "syncStatus": status, + "syncErrorMessage": errorMessage, + } + return syncMap + + +def _enrichPositionsWithSyncStatus(items: List[Dict[str, Any]], interface, instanceId: str) -> List[Dict[str, Any]]: + """In-place enrich each position dict with `syncStatus` + `syncErrorMessage`.""" + syncMap = _buildSyncStatusByPosition(interface, instanceId) + for row in items: + info = syncMap.get(row.get("id")) or {} + row["syncStatus"] = info.get("syncStatus") + row["syncErrorMessage"] = info.get("syncErrorMessage") + return items + + @router.get("/{instanceId}/positions") @limiter.limit("30/minute") def get_positions( @@ -1205,8 +1239,10 @@ def get_positions( return [r.model_dump() if hasattr(r, 'model_dump') else r for r in items] if paginationParams and hasattr(result, 'items'): + items = _itemsToDicts(result.items) + _enrichPositionsWithSyncStatus(items, interface, instanceId) return { - "items": _itemsToDicts(result.items), + "items": items, "pagination": PaginationMetadata( currentPage=paginationParams.page or 1, pageSize=paginationParams.pageSize or 20, @@ -1216,37 +1252,30 @@ def get_positions( filters=paginationParams.filters if paginationParams else None ).model_dump(), } - items = result if isinstance(result, list) else result.items - return {"items": _itemsToDicts(items), "pagination": None} + rawItems = result if isinstance(result, list) else result.items + items = _itemsToDicts(rawItems) + _enrichPositionsWithSyncStatus(items, interface, instanceId) + return {"items": items, "pagination": None} def _handlePositionMode(instanceId, mandateId, mode, column, pagination, context): """Handle mode=filterValues and mode=ids for trustee positions.""" - from modules.routes.routeHelpers import handleIdsInMemory + from modules.routes.routeHelpers import handleIdsInMemory, handleFilterValuesInMemory, enrichRowsWithFkLabels + from .datamodelFeatureTrustee import TrusteePositionView + interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - from modules.interfaces.interfaceRbac import getDistinctColumnValuesWithRBAC - from modules.routes.routeHelpers import parseCrossFilterPagination - from fastapi.responses import JSONResponse - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) - crossFilterPagination = parseCrossFilterPagination(column, pagination) - values = getDistinctColumnValuesWithRBAC( - connector=interface.db, - modelClass=TrusteePosition, - column=column, - currentUser=interface.currentUser, - pagination=crossFilterPagination, - recordFilter=None, - mandateId=interface.mandateId, - featureInstanceId=interface.featureInstanceId, - featureCode=interface.FEATURE_CODE - ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) - if mode == "ids": - interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) result = interface.getAllPositions(None) items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in (result.items if hasattr(result, 'items') else result)] + _enrichPositionsWithSyncStatus(items, interface, instanceId) + # Use the view model so FK labels for the synthetic columns also resolve. + enrichRowsWithFkLabels(items, TrusteePositionView) + return handleFilterValuesInMemory(items, column, pagination) + if mode == "ids": + result = interface.getAllPositions(None) + items = [r.model_dump() if hasattr(r, 'model_dump') else r for r in (result.items if hasattr(result, 'items') else result)] + _enrichPositionsWithSyncStatus(items, interface, instanceId) return handleIdsInMemory(items, pagination) @@ -2402,14 +2431,12 @@ def _paginatedReadEndpoint( """ from modules.interfaces.interfaceRbac import ( getRecordsetPaginatedWithRBAC, - getDistinctColumnValuesWithRBAC, ) from modules.routes.routeHelpers import ( handleIdsInMemory, - parseCrossFilterPagination, + handleFilterValuesInMemory, enrichRowsWithFkLabels, ) - from fastapi.responses import JSONResponse mandateId = _validateInstanceAccess(instanceId, context) interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId) @@ -2417,19 +2444,21 @@ def _paginatedReadEndpoint( if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - crossFilterPagination = parseCrossFilterPagination(column, pagination) - values = getDistinctColumnValuesWithRBAC( + result = getRecordsetPaginatedWithRBAC( connector=interface.db, modelClass=modelClass, - column=column, currentUser=interface.currentUser, - pagination=crossFilterPagination, + pagination=None, recordFilter=None, mandateId=interface.mandateId, featureInstanceId=interface.featureInstanceId, featureCode=interface.FEATURE_CODE, ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) + rawItems = result.items if hasattr(result, "items") else result + items = [r.model_dump() if hasattr(r, "model_dump") else r for r in rawItems] + featureResolvers = _buildFeatureInternalResolvers(modelClass, interface.db) + enrichRowsWithFkLabels(items, modelClass, extraResolvers=featureResolvers or None) + return handleFilterValuesInMemory(items, column, pagination) if mode == "ids": result = getRecordsetPaginatedWithRBAC( diff --git a/modules/routes/routeAdminDatabaseHealth.py b/modules/routes/routeAdminDatabaseHealth.py index cddc3d73..44b9f0c1 100644 --- a/modules/routes/routeAdminDatabaseHealth.py +++ b/modules/routes/routeAdminDatabaseHealth.py @@ -18,6 +18,7 @@ from modules.system.databaseHealth import ( _cleanAllOrphans, _cleanOrphans, _getTableStats, + _isUserIdFk, _listOrphans, _scanOrphans, ) @@ -49,6 +50,14 @@ class OrphanCleanAllRequest(BaseModel): False, description="Override safety guards on every relationship. Use with extreme care.", ) + excludeUserFks: bool = Field( + False, + description=( + "Skip FK relationships pointing at UserInDB.id. Deleted-user remnants " + "(audit / billing / membership rows) are handled by a dedicated purge " + "workflow and should not be touched by generic FK cleanup." + ), + ) @router.get("/stats") @@ -68,10 +77,18 @@ def getDatabaseTableStats( def getDatabaseOrphans( request: Request, db: Optional[str] = None, + excludeUserFks: bool = False, currentUser: User = Depends(requireSysAdmin), ) -> Dict[str, Any]: - """FK orphan scan (optional filter by source database name).""" + """FK orphan scan (optional filter by source database name). + + When ``excludeUserFks=true``, results targeting ``UserInDB.id`` are + omitted from the response so the SysAdmin UI can keep deleted-user + remnants visually separate from real FK drift. + """ rows = _scanOrphans(dbFilter=db) + if excludeUserFks: + rows = [r for r in rows if not _isUserIdFk(r.get("targetTable", ""), r.get("targetColumn", ""))] return {"orphans": rows} @@ -161,17 +178,19 @@ def postDatabaseOrphansCleanAll( `skipped` (safety guard triggered, no force), or `error` (other failure). """ force = bool(body.force) if body is not None else False - results: List[dict] = _cleanAllOrphans(force=force) + excludeUserFks = bool(body.excludeUserFks) if body is not None else False + results: List[dict] = _cleanAllOrphans(force=force, excludeUserFks=excludeUserFks) skipped = sum(1 for r in results if "skipped" in r) errored = sum(1 for r in results if "error" in r) deletedTotal = sum(int(r.get("deleted", 0)) for r in results) logger.info( - "SysAdmin orphan clean-all: user=%s batches=%s deleted=%s skipped=%s errored=%s force=%s", + "SysAdmin orphan clean-all: user=%s batches=%s deleted=%s skipped=%s errored=%s force=%s excludeUserFks=%s", currentUser.username, len(results), deletedTotal, skipped, errored, force, + excludeUserFks, ) return {"results": results, "skipped": skipped, "errored": errored, "deleted": deletedTotal} diff --git a/modules/routes/routeAudit.py b/modules/routes/routeAudit.py index ed275a88..d7d58728 100644 --- a/modules/routes/routeAudit.py +++ b/modules/routes/routeAudit.py @@ -109,9 +109,8 @@ def _enrichUserAndInstanceLabels( ) -> None: """Resolve userId -> username and featureInstanceId -> label in-place. - Uses the central resolvers from routeHelpers. Returns None (not the raw ID) - for unresolvable entries so the frontend can distinguish "resolved" from - "missing". + Uses the central resolvers from routeHelpers. Falls back to ``NA()`` + for unresolvable entries so filter dropdowns still show an entry. """ from modules.routes.routeHelpers import resolveUserLabels, resolveInstanceLabels @@ -129,10 +128,10 @@ def _enrichUserAndInstanceLabels( for r in items: uid = r.get(userKey) if uid and not r.get(usernameKey) and uid in userMap: - r[usernameKey] = userMap[uid] + r[usernameKey] = userMap.get(uid) or f"NA({uid})" iid = r.get(instanceKey) if iid: - r[instanceLabelKey] = instanceMap.get(iid) + r[instanceLabelKey] = instanceMap.get(iid) or f"NA({iid})" def _requireAuditAccess(context: RequestContext): diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index b6d6f8e0..90431ba2 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -260,7 +260,7 @@ def get_files( from modules.routes.routeHelpers import ( handleIdsMode, - parseCrossFilterPagination, + handleFilterValuesInMemory, ) managementInterface = interfaceDbManagement.getInterface( @@ -272,13 +272,11 @@ def get_files( if mode == "filterValues": if not column: raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") - crossPagination = parseCrossFilterPagination(column, pagination) - recordFilter = {"sysCreatedBy": managementInterface.userId} - from fastapi.responses import JSONResponse - values = managementInterface.db.getDistinctColumnValues( - FileItem, column, crossPagination, recordFilter - ) - return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) + allFiles = managementInterface.getAllFiles() + items = allFiles if isinstance(allFiles, list) else (allFiles.items if hasattr(allFiles, "items") else []) + itemDicts = [f.model_dump() if hasattr(f, "model_dump") else (dict(f) if not isinstance(f, dict) else f) for f in items] + enrichRowsWithFkLabels(itemDicts, FileItem) + return handleFilterValuesInMemory(itemDicts, column, pagination) if mode == "ids": recordFilter = {"sysCreatedBy": managementInterface.userId} diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py index 4f4f42c3..7651afe0 100644 --- a/modules/routes/routeInvitations.py +++ b/modules/routes/routeInvitations.py @@ -94,7 +94,8 @@ class InvitationResponse(BaseModel): maxUses: int currentUses: int inviteUrl: str # Full URL for the invitation - emailSent: bool = False # Whether invitation email was sent + emailSentFlag: bool = False # Whether invitation email was sent + emailSentAt: Optional[float] = None # Timestamp when invitation email was sent (UTC) class InvitationValidation(BaseModel): @@ -236,7 +237,8 @@ def create_invitation( maxUses=data.maxUses, currentUses=0, inviteUrl="", - emailSent=False + emailSentFlag=False, + emailSentAt=None, ) else: existing_membership = rootInterface.getUserMandate(target_user_id, mandateId) @@ -259,7 +261,8 @@ def create_invitation( maxUses=data.maxUses, currentUses=0, inviteUrl="", - emailSent=False + emailSentFlag=False, + emailSentAt=None, ) invitation = Invitation( @@ -281,7 +284,8 @@ def create_invitation( inviteUrl = f"{baseUrl}/invite/{invitation.token}" # Send email if email address is provided - emailSent = False + emailSentFlag = False + emailSentAt: Optional[float] = None if email_val: try: from modules.connectors.connectorMessagingEmail import ConnectorMessagingEmail @@ -319,20 +323,22 @@ def create_invitation( subject=emailSubject, message=emailBody ) - emailSent = True + emailSentFlag = True + emailSentAt = getUtcTimestamp() logger.info(f"Invitation email sent to {email_val} for user {target_username_val or 'email-only'}") except Exception as emailError: logger.warning(f"Failed to send invitation email to {email_val}: {emailError}") # Don't fail the invitation creation if email fails - # Update the invitation record with emailSent status - if emailSent: + # Persist email-sent state on the invitation record + if emailSentFlag: rootInterface.db.recordModify( Invitation, createdRecord.get("id"), - {"emailSent": True} + {"emailSentFlag": True, "emailSentAt": emailSentAt}, ) - createdRecord["emailSent"] = True + createdRecord["emailSentFlag"] = True + createdRecord["emailSentAt"] = emailSentAt # If the target user already exists (identified by username), create an in-app notification # Only look up by username - email is not used for "existing user" since new users are invited by email @@ -384,7 +390,8 @@ def create_invitation( maxUses=createdRecord.get("maxUses", 1), currentUses=createdRecord.get("currentUses", 0), inviteUrl=inviteUrl, - emailSent=emailSent + emailSentFlag=emailSentFlag, + emailSentAt=emailSentAt, ) except HTTPException: @@ -415,7 +422,9 @@ def list_invitations( Requires Mandate-Admin role. Returns all invitations created for this mandate. NOTE: Cannot use db.getRecordsetPaginated() because: - - Computed status fields (isExpired, isUsedUp) are derived in-memory + - Computed status fields (`expiredFlag`, `usedUpFlag`) are derived from + Pydantic computed fields (`Invitation.expiredFlag` / `usedUpFlag`), + `model_dump()` evaluates them on every read. - Filtering by revoked/used/expired requires post-fetch logic - Invitation volume per mandate is typically low (< 100) When this endpoint needs FormGeneratorTable pagination, add PaginatedResponse @@ -455,11 +464,11 @@ def list_invitations( continue baseUrl = frontendUrl.rstrip("/") if frontendUrl else "" inviteUrl = f"{baseUrl}/invite/{inv.token}" if baseUrl else "" + # `model_dump()` includes the computed fields (`expiredFlag`, `usedUpFlag`) + # defined on Invitation — no manual computation needed here. items.append({ **inv.model_dump(), "inviteUrl": inviteUrl, - "isExpired": expiresAt < currentTime, - "isUsedUp": currentUses >= maxUses }) return items diff --git a/modules/routes/routeWorkflowDashboard.py b/modules/routes/routeWorkflowDashboard.py index 998c47a7..d83ce1b2 100644 --- a/modules/routes/routeWorkflowDashboard.py +++ b/modules/routes/routeWorkflowDashboard.py @@ -12,6 +12,8 @@ import asyncio import json import logging import math +import re +import time from typing import Optional, List from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException from fastapi.responses import StreamingResponse @@ -183,6 +185,331 @@ def _parsePaginationOr400(pagination: Optional[str]) -> Optional[PaginationParam ) +_RUN_STATS_SUBQUERY = """ +( + SELECT s."workflowId" AS "workflowId", + MAX(COALESCE(s."startedAt", s."sysCreatedAt")) AS "lastStartedAt", + COUNT(s."id")::bigint AS "runCount", + MAX(CASE WHEN s."status" IN ('running', 'paused') THEN s."id" END) AS "activeRunId" + FROM "AutoRun" s + GROUP BY s."workflowId" +) rs +""" + + +def _firstFkSortFieldForWorkflows(pagination) -> Optional[str]: + """First sort field that requires FK label resolution (cross-DB), or None.""" + from modules.routes.routeHelpers import _buildLabelResolversFromModel + if not pagination or not pagination.sort: + return None + resolvers = _buildLabelResolversFromModel(AutoWorkflow) + if not resolvers: + return None + for sf in pagination.sort: + sfField = sf.get("field") if isinstance(sf, dict) else getattr(sf, "field", None) + if sfField and sfField in resolvers: + return sfField + return None + + +def _batchRunStatsForWorkflowIds(db: DatabaseConnector, workflowIds: List[str]) -> dict: + """One grouped query: lastStartedAt, runCount, activeRunId per workflow.""" + if not workflowIds or not db._ensureTableExists(AutoRun): + return {} + db._ensure_connection() + sql = """ +SELECT "workflowId", + MAX(COALESCE("startedAt", "sysCreatedAt")) AS "lastStartedAt", + COUNT("id")::bigint AS "runCount", + MAX(CASE WHEN "status" IN ('running', 'paused') THEN "id" END) AS "activeRunId" +FROM "AutoRun" +WHERE "workflowId" = ANY(%s) +GROUP BY "workflowId" +""" + out: dict = {} + with db.connection.cursor() as cursor: + cursor.execute(sql, (workflowIds,)) + for row in cursor.fetchall(): + r = dict(row) + wid = r.get("workflowId") + if wid: + out[str(wid)] = r + return out + + +def _listingColSql(key: str, wfFieldNames: set) -> Optional[str]: + if key == "lastStartedAt": + return 'rs."lastStartedAt"' + if key == "runCount": + return 'COALESCE(rs."runCount", 0::bigint)' + if key == "isRunning": + return '(rs."activeRunId" IS NOT NULL)' + if key in wfFieldNames: + return f'w."{key}"' + return None + + +def _listingOrderExpr(key: str, wfFieldNames: set, wfFields: dict) -> Optional[str]: + if key == "lastStartedAt": + return 'rs."lastStartedAt"' + if key == "runCount": + return 'COALESCE(rs."runCount", 0::bigint)' + if key == "isRunning": + return 'CASE WHEN rs."activeRunId" IS NOT NULL THEN 1 ELSE 0 END' + if key in wfFieldNames: + colType = wfFields.get(key, "TEXT") + if colType == "BOOLEAN": + return f'COALESCE(w."{key}", FALSE)' + return f'w."{key}"' + return None + + +def _appendJoinedListingFilters(whereParts: list, values: list, pagination, wfFields: dict) -> None: + """Append WHERE fragments for joined workflow listing (w + rs).""" + from datetime import datetime as _dt, timezone as _tz + + wfFieldNames = set(wfFields.keys()) + validCols = wfFieldNames | {"lastStartedAt", "runCount", "isRunning"} + + if not pagination or not pagination.filters: + return + + for key, val in pagination.filters.items(): + if key == "search" and isinstance(val, str) and val.strip(): + term = f"%{val.strip()}%" + textCols = [c for c, t in wfFields.items() if t == "TEXT"] + if textCols: + orParts = [f'COALESCE(w."{c}"::TEXT, \'\') ILIKE %s' for c in textCols] + whereParts.append(f"({' OR '.join(orParts)})") + values.extend([term] * len(textCols)) + continue + + if key not in validCols: + continue + + if key == "isRunning": + if isinstance(val, dict): + op = val.get("operator", "equals") + v = val.get("value", "") + isTrue = str(v).lower() == "true" + if op in ("equals", "eq"): + whereParts.append('(rs."activeRunId" IS NOT NULL)' if isTrue else '(rs."activeRunId" IS NULL)') + elif val is None: + whereParts.append('(rs."activeRunId" IS NULL)') + else: + whereParts.append( + '(rs."activeRunId" IS NOT NULL)' if str(val).lower() == "true" else '(rs."activeRunId" IS NULL)' + ) + continue + + colRef = _listingColSql(key, wfFieldNames) + if not colRef: + continue + + colType = wfFields.get(key, "TEXT") if key in wfFieldNames else ( + "DOUBLE PRECISION" if key == "lastStartedAt" else "BIGINT" if key == "runCount" else "TEXT" + ) + + if val is None: + if key == "lastStartedAt": + whereParts.append(f'({colRef} IS NULL)') + elif key == "runCount": + whereParts.append(f'({colRef} = 0)') + else: + whereParts.append(f'({colRef} IS NULL OR {colRef}::TEXT = \'\')') + continue + + if not isinstance(val, dict): + if colType == "BOOLEAN" or key == "isRunning": + whereParts.append(f'COALESCE({colRef}, FALSE) = %s') + values.append(str(val).lower() == "true") + else: + whereParts.append(f'{colRef}::TEXT ILIKE %s') + values.append(str(val)) + continue + + op = val.get("operator", "equals") + v = val.get("value", "") + if op in ("equals", "eq"): + if colType == "BOOLEAN": + whereParts.append(f'COALESCE({colRef}, FALSE) = %s') + values.append(str(v).lower() == "true") + else: + whereParts.append(f'{colRef}::TEXT = %s') + values.append(str(v)) + elif op == "contains": + whereParts.append(f'{colRef}::TEXT ILIKE %s') + values.append(f"%{v}%") + elif op == "startsWith": + whereParts.append(f'{colRef}::TEXT ILIKE %s') + values.append(f"{v}%") + elif op == "endsWith": + whereParts.append(f'{colRef}::TEXT ILIKE %s') + values.append(f"%{v}") + elif op in ("gt", "gte", "lt", "lte"): + sqlOp = {"gt": ">", "gte": ">=", "lt": "<", "lte": "<="}[op] + if colType in ("INTEGER", "DOUBLE PRECISION", "BIGINT") or key in ("lastStartedAt", "runCount"): + try: + whereParts.append(f'{colRef}::double precision {sqlOp} %s') + values.append(float(v)) + except (ValueError, TypeError): + continue + else: + whereParts.append(f'{colRef}::TEXT {sqlOp} %s') + values.append(str(v)) + elif op == "between": + fromVal = v.get("from", "") if isinstance(v, dict) else "" + toVal = v.get("to", "") if isinstance(v, dict) else "" + if not fromVal and not toVal: + continue + isNumericCol = colType in ("INTEGER", "DOUBLE PRECISION", "BIGINT") or key in ("lastStartedAt", "runCount") + isDateVal = bool(fromVal and re.match(r"^\d{4}-\d{2}-\d{2}$", str(fromVal))) or bool( + toVal and re.match(r"^\d{4}-\d{2}-\d{2}$", str(toVal)) + ) + if isNumericCol and isDateVal: + if fromVal and toVal: + fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() + toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace( + hour=23, minute=59, second=59, tzinfo=_tz.utc + ).timestamp() + whereParts.append(f"({colRef} >= %s AND {colRef} <= %s)") + values.extend([fromTs, toTs]) + elif fromVal: + fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() + whereParts.append(f"({colRef} >= %s)") + values.append(fromTs) + else: + toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace( + hour=23, minute=59, second=59, tzinfo=_tz.utc + ).timestamp() + whereParts.append(f"({colRef} <= %s)") + values.append(toTs) + elif isNumericCol: + try: + if fromVal and toVal: + whereParts.append( + f"({colRef}::double precision >= %s AND {colRef}::double precision <= %s)" + ) + values.extend([float(fromVal), float(toVal)]) + elif fromVal: + whereParts.append(f"{colRef}::double precision >= %s") + values.append(float(fromVal)) + elif toVal: + whereParts.append(f"{colRef}::double precision <= %s") + values.append(float(toVal)) + except (ValueError, TypeError): + continue + else: + if fromVal and toVal: + whereParts.append(f"({colRef}::TEXT >= %s AND {colRef}::TEXT <= %s)") + values.extend([str(fromVal), str(toVal)]) + elif fromVal: + whereParts.append(f"{colRef}::TEXT >= %s") + values.append(str(fromVal)) + elif toVal: + whereParts.append(f"{colRef}::TEXT <= %s") + values.append(str(toVal)) + + +def _buildJoinedWorkflowWhereOrderLimit( + recordFilter: dict, + pagination, + wfFields: dict, +) -> tuple: + """WHERE / ORDER BY / LIMIT for joined AutoWorkflow + run stats listing.""" + wfFieldNames = set(wfFields.keys()) + whereParts: list = [] + values: list = [] + + for field, value in (recordFilter or {}).items(): + if value is None: + whereParts.append(f'w."{field}" IS NULL') + elif isinstance(value, list): + whereParts.append(f'w."{field}" = ANY(%s)') + values.append(value) + else: + whereParts.append(f'w."{field}" = %s') + values.append(value) + + _appendJoinedListingFilters(whereParts, values, pagination, wfFields) + + whereClause = " WHERE " + " AND ".join(whereParts) if whereParts else "" + + orderParts: list = [] + if pagination and pagination.sort: + for sf in pagination.sort: + sfField = sf.get("field") if isinstance(sf, dict) else getattr(sf, "field", None) + sfDir = sf.get("direction", "asc") if isinstance(sf, dict) else getattr(sf, "direction", "asc") + if not sfField: + continue + expr = _listingOrderExpr(sfField, wfFieldNames, wfFields) + if not expr: + continue + direction = "DESC" if str(sfDir).lower() == "desc" else "ASC" + orderParts.append(f"{expr} {direction} NULLS LAST") + if not orderParts: + orderParts.append('w."sysCreatedAt" DESC NULLS LAST') + + orderClause = " ORDER BY " + ", ".join(orderParts) + + limitClause = "" + if pagination: + offset = (pagination.page - 1) * pagination.pageSize + limitClause = f" LIMIT {pagination.pageSize} OFFSET {offset}" + + return whereClause, orderClause, limitClause, values + + +def _getWorkflowsJoinedPaginated( + db: DatabaseConnector, + recordFilter: dict, + paginationParams: PaginationParams, +) -> dict: + """SQL listing: AutoWorkflow LEFT JOIN aggregated AutoRun stats (one query + count).""" + from modules.connectors.connectorDbPostgre import getModelFields, parseRecordFields + + wfFields = getModelFields(AutoWorkflow) + whereClause, orderClause, limitClause, values = _buildJoinedWorkflowWhereOrderLimit( + recordFilter, paginationParams, wfFields, + ) + countValues = list(values) + + fromSql = f'"AutoWorkflow" w LEFT JOIN {_RUN_STATS_SUBQUERY.strip()} ON rs."workflowId" = w."id"' + + countSql = f"SELECT COUNT(*) AS cnt FROM {fromSql}{whereClause}" + dataSql = f"SELECT w.*, rs.\"lastStartedAt\", rs.\"runCount\", rs.\"activeRunId\" FROM {fromSql}{whereClause}{orderClause}{limitClause}" + + db._ensure_connection() + with db.connection.cursor() as cursor: + cursor.execute(countSql, countValues) + totalItems = int(cursor.fetchone()["cnt"]) + + cursor.execute(dataSql, values) + rawRows = [dict(row) for row in cursor.fetchall()] + + pageSize = paginationParams.pageSize if paginationParams else max(totalItems, 1) + totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 + + modelFields = AutoWorkflow.model_fields + for record in rawRows: + parseRecordFields(record, wfFields, "table AutoWorkflow joined listing") + for fieldName, fieldType in wfFields.items(): + if fieldType == "JSONB" and fieldName in record and record[fieldName] is None: + fieldInfo = modelFields.get(fieldName) + if fieldInfo: + fieldAnnotation = fieldInfo.annotation + if fieldAnnotation == list or ( + hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is list + ): + record[fieldName] = [] + elif fieldAnnotation == dict or ( + hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is dict + ): + record[fieldName] = {} + + return {"items": rawRows, "totalItems": totalItems, "totalPages": totalPages} + + def _cascadeDeleteAutoWorkflow(db: DatabaseConnector, workflowId: str) -> None: """Delete AutoWorkflow and dependent rows (same order as interfaceDbApp._cascadeDeleteGraphicalEditorData).""" wf_id = workflowId @@ -253,7 +580,7 @@ def get_workflow_runs( paginationParams = PaginationParams( page=page, pageSize=limit, - sort=[{"field": "sysCreatedAt", "direction": "desc"}], + sort=[{"field": "startedAt", "direction": "desc"}], ) from modules.routes.routeHelpers import getRecordsetPaginatedWithFkSort @@ -435,20 +762,10 @@ def get_system_workflows( sort=[{"field": "sysCreatedAt", "direction": "desc"}], ) - from modules.routes.routeHelpers import getRecordsetPaginatedWithFkSort - result = getRecordsetPaginatedWithFkSort( - db, AutoWorkflow, - pagination=paginationParams, - recordFilter=recordFilter if recordFilter else None, - ) - pageItems = result.get("items", []) if isinstance(result, dict) else result.items - totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems - totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages - from modules.routes.routeHelpers import enrichRowsWithFkLabels, resolveMandateLabels, resolveInstanceLabels - # Resolve featureCode in same pass as instance labels — need full FI object featureCodeMap: dict = {} + def _resolveInstanceLabelsWithFeatureCode(ids): from modules.interfaces.interfaceDbApp import getRootInterface as _getRI from modules.interfaces.interfaceFeatures import getFeatureInterface @@ -471,59 +788,140 @@ def get_system_workflows( userMandateIds = _getUserMandateIds(userId) adminMandateIds = _getAdminMandateIds(userId, userMandateIds) - workflowIds = [w.get("id") for w in pageItems if w.get("id")] - activeRunMap: dict = {} - runCountMap: dict = {} - lastStartedMap: dict = {} - if workflowIds and db._ensureTableExists(AutoRun): - for wfId in workflowIds: - runs = db.getRecordset(AutoRun, recordFilter={"workflowId": wfId}) - runCountMap[wfId] = len(runs) - for r in runs: - rDict = dict(r) - ts = rDict.get("sysCreatedAt") - if ts and (lastStartedMap.get(wfId) is None or ts > lastStartedMap.get(wfId)): - lastStartedMap[wfId] = ts - if rDict.get("status") in ("running", "paused"): - activeRunMap[wfId] = rDict.get("id") - - items = [] - for w in pageItems: - row = dict(w) - wMandateId = row.get("mandateId") - wfId = row.get("id") - row["isRunning"] = wfId in activeRunMap - row["activeRunId"] = activeRunMap.get(wfId) - row["runCount"] = runCountMap.get(wfId, 0) - row["lastStartedAt"] = lastStartedMap.get(wfId) - - if context.isPlatformAdmin: - row["canEdit"] = True - row["canDelete"] = True - row["canExecute"] = True - elif wMandateId and wMandateId in adminMandateIds: - row["canEdit"] = True - row["canDelete"] = True - row["canExecute"] = True + fkSortField = _firstFkSortFieldForWorkflows(paginationParams) + if fkSortField: + from modules.routes.routeHelpers import getRecordsetPaginatedWithFkSort, applyFiltersAndSort + _COMPUTED_FIELDS = {"lastStartedAt", "runCount", "isRunning"} + hasComputedFilter = bool( + paginationParams.filters + and any(k in _COMPUTED_FIELDS for k in paginationParams.filters) + ) + hasComputedSort = any( + (s.field if hasattr(s, "field") else s.get("field", "")) in _COMPUTED_FIELDS + for s in (paginationParams.sort or []) + ) + dbPagination = paginationParams + if hasComputedFilter or hasComputedSort: + dbFilters = { + k: v for k, v in (paginationParams.filters or {}).items() + if k not in _COMPUTED_FIELDS + } or None + dbSort = [ + s for s in (paginationParams.sort or []) + if (s.field if hasattr(s, "field") else s.get("field", "")) not in _COMPUTED_FIELDS + ] + dbPagination = PaginationParams.model_construct( + page=1, + pageSize=9999, + sort=dbSort or [{"field": "sysCreatedAt", "direction": "desc"}], + filters=dbFilters, + ) + result = getRecordsetPaginatedWithFkSort( + db, AutoWorkflow, + pagination=dbPagination, + recordFilter=recordFilter if recordFilter else None, + ) + pageItems = result.get("items", []) if isinstance(result, dict) else result.items + workflowIds = [w.get("id") for w in pageItems if w.get("id")] + statsById = _batchRunStatsForWorkflowIds(db, workflowIds) + items = [] + for w in pageItems: + row = dict(w) + wfId = row.get("id") + st = statsById.get(str(wfId)) if wfId else None + activeRunId = st.get("activeRunId") if st else None + row["isRunning"] = bool(activeRunId) + row["activeRunId"] = activeRunId + row["runCount"] = int(st.get("runCount") or 0) if st else 0 + row["lastStartedAt"] = float(st["lastStartedAt"]) if st and st.get("lastStartedAt") is not None else None + wMandateId = row.get("mandateId") + if context.isPlatformAdmin: + row["canEdit"] = True + row["canDelete"] = True + row["canExecute"] = True + elif wMandateId and wMandateId in adminMandateIds: + row["canEdit"] = True + row["canDelete"] = True + row["canExecute"] = True + else: + row["canEdit"] = False + row["canDelete"] = False + row["canExecute"] = False + row.pop("graph", None) + items.append(row) + enrichRowsWithFkLabels( + items, + labelResolvers={ + "mandateId": resolveMandateLabels, + "featureInstanceId": _resolveInstanceLabelsWithFeatureCode, + }, + ) + for row in items: + row["instanceLabel"] = row.pop("featureInstanceIdLabel", None) + row["mandateLabel"] = row.pop("mandateIdLabel", None) + row["featureCode"] = featureCodeMap.get(row.get("featureInstanceId")) + if hasComputedFilter or hasComputedSort: + computedFilters = { + k: v for k, v in (paginationParams.filters or {}).items() + if k in _COMPUTED_FIELDS + } + computedSort = [ + s for s in (paginationParams.sort or []) + if (s.field if hasattr(s, "field") else s.get("field", "")) in _COMPUTED_FIELDS + ] + computedPagination = PaginationParams.model_construct( + page=paginationParams.page, + pageSize=paginationParams.pageSize, + sort=computedSort or [], + filters=computedFilters or None, + ) + filtered = applyFiltersAndSort(items, computedPagination) + totalItems = filtered.get("totalItems", len(items)) + totalPages = filtered.get("totalPages", 1) + items = filtered.get("items", items) else: - row["canEdit"] = False - row["canDelete"] = False - row["canExecute"] = False - - row.pop("graph", None) - items.append(row) - - enrichRowsWithFkLabels( - items, - labelResolvers={ - "mandateId": resolveMandateLabels, - "featureInstanceId": _resolveInstanceLabelsWithFeatureCode, - }, - ) - for row in items: - row["instanceLabel"] = row.pop("featureInstanceIdLabel", None) - row["mandateLabel"] = row.pop("mandateIdLabel", None) - row["featureCode"] = featureCodeMap.get(row.get("featureInstanceId")) + totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems + totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages + else: + result = _getWorkflowsJoinedPaginated( + db, recordFilter if recordFilter else {}, paginationParams, + ) + pageItems = result.get("items", []) + totalItems = result.get("totalItems", 0) + totalPages = result.get("totalPages", 0) + items = [] + for row in pageItems: + wMandateId = row.get("mandateId") + wfId = row.get("id") + activeRunId = row.get("activeRunId") + if row.get("runCount") is not None: + row["runCount"] = int(row["runCount"]) + row["isRunning"] = bool(activeRunId) + if context.isPlatformAdmin: + row["canEdit"] = True + row["canDelete"] = True + row["canExecute"] = True + elif wMandateId and wMandateId in adminMandateIds: + row["canEdit"] = True + row["canDelete"] = True + row["canExecute"] = True + else: + row["canEdit"] = False + row["canDelete"] = False + row["canExecute"] = False + row.pop("graph", None) + items.append(row) + enrichRowsWithFkLabels( + items, + labelResolvers={ + "mandateId": resolveMandateLabels, + "featureInstanceId": _resolveInstanceLabelsWithFeatureCode, + }, + ) + for row in items: + row["instanceLabel"] = row.pop("featureInstanceIdLabel", None) + row["mandateLabel"] = row.pop("mandateIdLabel", None) + row["featureCode"] = featureCodeMap.get(row.get("featureInstanceId")) return { "items": items, @@ -593,6 +991,23 @@ def delete_system_workflow( # Filter-values endpoints (for FormGeneratorTable column filters) # --------------------------------------------------------------------------- +_SYNTHETIC_TIMESTAMP_FIELDS = {"lastStartedAt"} + + +def _isTimestampColumn(modelClass, column: str) -> bool: + """Check if a column is a timestamp field (PeriodPicker, no discrete values needed).""" + if column in _SYNTHETIC_TIMESTAMP_FIELDS: + return True + fields = getattr(modelClass, "model_fields", {}) + fieldInfo = fields.get(column) + if not fieldInfo: + return False + extra = getattr(fieldInfo, "json_schema_extra", None) + if isinstance(extra, dict): + return extra.get("frontend_type") == "timestamp" + return False + + def _enrichedFilterValues( db, context: RequestContext, modelClass, scopeFilter, column: str, ): @@ -602,6 +1017,9 @@ def _enrichedFilterValues( objects so the frontend can display human-readable labels in the dropdown without a separate source fk fetch. Non-FK columns return ``string | null``. + Timestamp columns (sysCreatedAt, lastStartedAt) return an empty list because + the frontend uses a PeriodPicker (range selector) — no discrete values needed. + ``null`` is included when rows with NULL/empty values exist (enables the "(Leer)" filter option). @@ -610,6 +1028,9 @@ def _enrichedFilterValues( from fastapi.responses import JSONResponse from modules.routes.routeHelpers import resolveMandateLabels, resolveInstanceLabels + if _isTimestampColumn(modelClass, column): + return JSONResponse(content=[]) + if column in ("mandateLabel", "mandateId"): baseFilter = scopeFilter(context) recordFilter = dict(baseFilter) if baseFilter else {} @@ -828,7 +1249,10 @@ def stop_workflow_run( currentStatus = run.get("status", "") if currentStatus in ("completed", "failed", "stopped"): return {"status": currentStatus, "runId": runId, "message": "Run already finished"} - db.recordModify(AutoRun, runId, {"status": "stopped"}) + stopUpdates = {"status": "stopped"} + if not run.get("completedAt"): + stopUpdates["completedAt"] = time.time() + db.recordModify(AutoRun, runId, stopUpdates) return {"status": "stopped", "runId": runId, "message": "Run not active in memory, marked as stopped"} return {"status": "stopping", "runId": runId, "message": "Stop signal sent"} diff --git a/modules/serviceCenter/services/serviceGeneration/renderers/_pdfFontFallback.py b/modules/serviceCenter/services/serviceGeneration/renderers/_pdfFontFallback.py new file mode 100644 index 00000000..8603c78f --- /dev/null +++ b/modules/serviceCenter/services/serviceGeneration/renderers/_pdfFontFallback.py @@ -0,0 +1,145 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""Inline emoji-font fallback for the ReportLab-based PDF renderer. + +The default ReportLab core fonts (Helvetica, Times, Courier) only cover +WinAnsi (Latin-1 + a handful of typographic glyphs). Codepoints from the +Unicode Symbols / Pictographs / Emoji blocks render as a missing-glyph +square ("tofu") or are dropped entirely. + +This module bundles a single TrueType emoji font (Noto Emoji, monochrome, +SIL Open Font License) and exposes `wrapEmojiSpansInXml` which rewrites +already-built ReportLab mini-XML so any character that the emoji font can +draw is wrapped in `...`. ReportLab's +Paragraph parser supports nested tags, so emoji spans nest cleanly +inside , , and markup produced elsewhere. + +ReportLab does not natively color emoji (CBDT/COLR/SBIX glyph tables are +not honoured by its TTF backend) — Noto Emoji is intentionally a +monochrome outline font, which is the only flavour that will render at all. +""" + +from __future__ import annotations + +import logging +import os +import re +from typing import FrozenSet, Optional + +logger = logging.getLogger(__name__) + +EMOJI_FONT_NAME = "NotoEmoji" + +_RENDERER_DIR = os.path.dirname(os.path.abspath(__file__)) +_GATEWAY_ROOT = os.path.abspath(os.path.join(_RENDERER_DIR, "..", "..", "..", "..", "..")) +_FONT_PATH = os.path.join(_GATEWAY_ROOT, "assets", "fonts", "NotoEmoji-Regular.ttf") + +# Below 0x2000 the WinAnsi-style core fonts already cover Latin-1, common +# diacritics and basic punctuation. We only swap to the emoji font for +# higher codepoints so umlauts, copyright, NBSP, etc. stay visually +# consistent with surrounding body text. +_EMOJI_RANGE_START = 0x2000 + +_supportedCodepoints: Optional[FrozenSet[int]] = None +_initAttempted = False + + +def _initialize() -> bool: + """Register the emoji TTF with ReportLab and capture its cmap. + + Lazy + idempotent: the renderer may instantiate before reportlab is + imported in the worker process, and tests that don't generate PDFs + must not pay the registration cost. + """ + global _initAttempted, _supportedCodepoints + if _initAttempted: + return _supportedCodepoints is not None + _initAttempted = True + + if not os.path.exists(_FONT_PATH): + logger.warning( + "Emoji font not found at %s — emoji codepoints in PDFs will render as tofu", + _FONT_PATH, + ) + return False + + try: + from reportlab.pdfbase import pdfmetrics + from reportlab.pdfbase.ttfonts import TTFont + except ImportError: + logger.warning("reportlab not installed; cannot register emoji font") + return False + + try: + font = TTFont(EMOJI_FONT_NAME, _FONT_PATH) + pdfmetrics.registerFont(font) + # `face.charToGlyph` is built lazily on first use; force population + # so the mapping is available for our coverage check below. + cmap = getattr(font.face, "charToGlyph", None) or {} + if not cmap: + from fontTools.ttLib import TTFont as FtTTFont + cmap = FtTTFont(_FONT_PATH).getBestCmap() + _supportedCodepoints = frozenset( + cp for cp in cmap.keys() if cp >= _EMOJI_RANGE_START + ) + logger.info( + "Registered emoji font '%s' with %d renderable codepoints (>= U+%04X)", + EMOJI_FONT_NAME, + len(_supportedCodepoints), + _EMOJI_RANGE_START, + ) + return True + except Exception as exc: + logger.warning("Failed to register emoji font: %s", exc) + _supportedCodepoints = None + return False + + +_TAG_RE = re.compile(r"<[^>]+>") + + +def wrapEmojiSpansInXml(xml: str) -> str: + """Wrap consecutive emoji codepoints with . + + Operates on already-XML-escaped ReportLab markup. Tag markers + (`<...>`) are skipped so we never insert a font tag inside another + tag's attribute list. Codepoints that the emoji font cannot draw + pass through unchanged so the default body font still gets a chance + (e.g. U+200D zero-width-joiner has no glyph in Noto Emoji and would + otherwise render as tofu inside a forced span). + """ + if not xml: + return xml + if not _initialize() or not _supportedCodepoints: + return xml + + cps = _supportedCodepoints + out: list[str] = [] + i = 0 + n = len(xml) + while i < n: + # Skip past any markup tag verbatim — emojis inside attribute + # values would be unusual but harmless; the simpler invariant + # "we never split a `<...>` token" keeps the rewrite safe. + if xml[i] == "<": + tagEnd = xml.find(">", i) + if tagEnd == -1: + out.append(xml[i:]) + break + out.append(xml[i : tagEnd + 1]) + i = tagEnd + 1 + continue + + if ord(xml[i]) in cps: + j = i + while j < n and xml[j] != "<" and ord(xml[j]) in cps: + j += 1 + out.append(f'') + out.append(xml[i:j]) + out.append("") + i = j + continue + + out.append(xml[i]) + i += 1 + return "".join(out) diff --git a/modules/serviceCenter/services/serviceGeneration/renderers/rendererPdf.py b/modules/serviceCenter/services/serviceGeneration/renderers/rendererPdf.py index a5c9dc93..df2aff10 100644 --- a/modules/serviceCenter/services/serviceGeneration/renderers/rendererPdf.py +++ b/modules/serviceCenter/services/serviceGeneration/renderers/rendererPdf.py @@ -27,6 +27,8 @@ except ImportError: import re as _re_pdf +from ._pdfFontFallback import wrapEmojiSpansInXml as _wrapEmojiSpansInXml + # A4 width in pt; margins must match SimpleDocTemplate(leftMargin/rightMargin) _PDF_MARGIN_LR_PT = 72.0 _PDF_A4_WIDTH_PT = 595.27 @@ -622,6 +624,8 @@ class RendererPdf(BaseRenderer): """Turn common markdown inline (**bold**, *italic*, `code`) into ReportLab XML. Backtick spans are extracted first so paths like `...//...` are not corrupted by markdown patterns and XML escaping stays well-formed inside . + Emoji codepoints are wrapped in ... so they render + as monochrome glyphs instead of missing-glyph squares from the WinAnsi core fonts. """ if not text: return "" @@ -635,7 +639,7 @@ class RendererPdf(BaseRenderer): out.append(f'{self._escapeReportlabXml(code)}') pos = m.end() out.append(self._applyInlineMarkdownToEscapedPlain(text[pos:])) - return "".join(out) + return _wrapEmojiSpansInXml("".join(out)) def _paragraphFromInlineMarkdown(self, text: str, style: ParagraphStyle) -> Paragraph: return Paragraph(self._markdownInlineToReportlabXml(text), style) diff --git a/modules/shared/attributeUtils.py b/modules/shared/attributeUtils.py index b949a1b4..35d94d2e 100644 --- a/modules/shared/attributeUtils.py +++ b/modules/shared/attributeUtils.py @@ -311,6 +311,61 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag attributes.append(attr_def) + # Pydantic v2 computed fields (@computed_field). These are read-only properties + # serialized into ``model_dump()`` and exposed to the frontend as ordinary + # attributes so resolveColumnTypes() can pick them up (label / type / format + # labels / options). No DB persistence — they are derived from regular fields. + computedFields = getattr(modelClass, "model_computed_fields", {}) or {} + for name, computedInfo in computedFields.items(): + jsonExtra = getattr(computedInfo, "json_schema_extra", None) or {} + if callable(jsonExtra): + jsonExtra = {} + + frontendType = jsonExtra.get("frontend_type") if isinstance(jsonExtra, dict) else None + if not frontendType: + returnType = getattr(computedInfo, "return_type", None) + typeName = getattr(returnType, "__name__", None) or str(returnType) + frontendType = "checkbox" if typeName == "bool" else "text" + + frontendVisible = ( + jsonExtra.get("frontend_visible", True) if isinstance(jsonExtra, dict) else True + ) + frontendFormat = ( + jsonExtra.get("frontend_format") if isinstance(jsonExtra, dict) else None + ) + frontendFormatLabels = ( + jsonExtra.get("frontend_format_labels") if isinstance(jsonExtra, dict) else None + ) + frontendOptions = ( + jsonExtra.get("frontend_options") if isinstance(jsonExtra, dict) else None + ) + + attrDef = { + "name": name, + "type": frontendType, + "required": False, + "description": str(getattr(computedInfo, "description", "") or ""), + "label": labels.get(name, name), + "placeholder": "", + "editable": False, + "visible": frontendVisible, + "order": len(attributes), + "readonly": True, + "options": _resolveOptionLabels(frontendOptions), + "default": None, + } + + if frontendFormat: + attrDef["frontendFormat"] = frontendFormat + if frontendFormatLabels and isinstance(frontendFormatLabels, list): + from modules.shared.i18nRegistry import resolveText + attrDef["frontendFormatLabels"] = [ + resolveText(lbl) if isinstance(lbl, (str, dict)) else str(lbl) + for lbl in frontendFormatLabels + ] + + attributes.append(attrDef) + return {"model": model_label, "attributes": attributes} diff --git a/modules/shared/fkRegistry.py b/modules/shared/fkRegistry.py index bf869bf4..9f3d63c4 100644 --- a/modules/shared/fkRegistry.py +++ b/modules/shared/fkRegistry.py @@ -82,6 +82,11 @@ class FkRelationship: targetDb: str targetTable: str targetColumn: str + # Soft references hold sentinel / lineage values that are intentionally + # not backed by a DB row (e.g. AutoWorkflow.templateSourceId can store an + # in-code template ID like "trustee-receipt-import"). The orphan scanner + # MUST skip these to avoid deleting valid records. + softFk: bool = False def _buildTableToDbMap() -> Dict[str, str]: @@ -192,6 +197,7 @@ def _discoverFkRelationships() -> List[FkRelationship]: targetDb = fkTarget.get("db", "") targetTable = fkTarget.get("table", "") targetColumn = fkTarget.get("column", "id") + softFk = bool(fkTarget.get("softFk", False)) if not targetDb or not targetTable: continue @@ -204,6 +210,7 @@ def _discoverFkRelationships() -> List[FkRelationship]: targetDb=targetDb, targetTable=targetTable, targetColumn=targetColumn, + softFk=softFk, ) ) diff --git a/modules/shared/i18nRegistry.py b/modules/shared/i18nRegistry.py index f32315d6..7e620f8d 100644 --- a/modules/shared/i18nRegistry.py +++ b/modules/shared/i18nRegistry.py @@ -226,6 +226,30 @@ def i18nModel(modelLabel: str, aiContext: str = ""): if isinstance(token, str) and token.strip(): t(token, fmtCtx, "") + # Pydantic v2 computed fields (@computed_field) — same handling as + # regular model_fields so labels and frontend_format_labels are + # registered for i18n and appear in MODEL_LABELS. + computedFields = getattr(cls, "model_computed_fields", {}) or {} + for fieldName, computedInfo in computedFields.items(): + extra = getattr(computedInfo, "json_schema_extra", None) + if callable(extra) or not isinstance(extra, dict): + attributes.setdefault(fieldName, fieldName) + continue + label = extra.get("label") + if label: + desc = getattr(computedInfo, "description", "") or "" + t(label, f"table.{className}.{fieldName}", desc) + attributes[fieldName] = label + else: + attributes.setdefault(fieldName, fieldName) + + formatLabels = extra.get("frontend_format_labels") + if isinstance(formatLabels, list): + fmtCtx = f"table.{className}.{fieldName}.format" + for token in formatLabels: + if isinstance(token, str) and token.strip(): + t(token, fmtCtx, "") + MODEL_LABELS[className] = { "model": modelLabel, "attributes": attributes, @@ -610,6 +634,7 @@ def _registerDatamodelOptionLabels(): "modules.datamodels.datamodelDataSource", "modules.datamodels.datamodelFeatureDataSource", "modules.datamodels.datamodelUiLanguage", + "modules.datamodels.datamodelViews", "modules.features.trustee.datamodelFeatureTrustee", "modules.features.neutralization.datamodelFeatureNeutralizer", ) diff --git a/modules/system/databaseHealth.py b/modules/system/databaseHealth.py index 8a902e5f..91f26225 100644 --- a/modules/system/databaseHealth.py +++ b/modules/system/databaseHealth.py @@ -339,6 +339,14 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]: try: for rel in relationships: try: + if rel.softFk: + logger.debug( + "Skipping soft FK %s.%s.%s -> %s.%s.%s", + rel.sourceDb, rel.sourceTable, rel.sourceColumn, + rel.targetDb, rel.targetTable, rel.targetColumn, + ) + continue + sourceTables = _existingTables(rel.sourceDb) if rel.sourceTable not in sourceTables: continue @@ -458,6 +466,12 @@ def _cleanOrphans(db: str, table: str, column: str, force: bool = False) -> int: ) if rel is None: raise ValueError(f"No FK relationship found for {db}.{table}.{column}") + if rel.softFk: + raise OrphanCleanupRefused( + f"Refusing cleanup: {rel.sourceDb}.{rel.sourceTable}.{rel.sourceColumn} is " + f"declared as a soft FK (sentinel / lineage reference) and is intentionally " + f"excluded from orphan deletion." + ) conn = _getConnection(rel.sourceDb) targetConn = None @@ -571,18 +585,38 @@ def _cleanOrphans(db: str, table: str, column: str, force: bool = False) -> int: return deleted -def _cleanAllOrphans(force: bool = False) -> List[dict]: +def _isUserIdFk(targetTable: str, targetColumn: str) -> bool: + """Match the UserInDB.id reference exactly (case-insensitive on table name). + + Orphans pointing at deleted users are a distinct category: they accumulate + naturally on every audit / billing / membership row when a user is deleted, + and the SysAdmin typically wants to handle them separately from "real" FK + drift. The orphan UI exposes a checkbox `excludeUserFks` that hides them + from the list and skips them in `clean-all`; this helper keeps the rule + in one place so frontend + clean-all + scan stay in lock-step. + """ + return targetTable.lower() == "userindb" and targetColumn == "id" + + +def _cleanAllOrphans(force: bool = False, excludeUserFks: bool = False) -> List[dict]: """Clean all detected orphans. Returns list of {db, table, column, deleted, [error|skipped]}. Safety: each individual cleanup re-validates target row counts at delete-time to avoid cascading wipes (e.g. one delete emptying a target table that the next iteration depends on). Without force=True, dangerous cleanups are skipped. + + When ``excludeUserFks=True``, FK relationships pointing at ``UserInDB.id`` + are skipped entirely — those orphans (deleted-user remnants in audit / + billing / membership tables) are typically handled by a dedicated user + purge workflow, not by generic FK cleanup. """ orphans = _scanOrphans() results = [] for orphan in orphans: if orphan.get("orphanCount", 0) <= 0: continue + if excludeUserFks and _isUserIdFk(orphan.get("targetTable", ""), orphan.get("targetColumn", "")): + continue try: deleted = _cleanOrphans( orphan["sourceDb"], @@ -651,6 +685,8 @@ def _listOrphans( ) if rel is None: raise ValueError(f"No FK relationship found for {db}.{table}.{column}") + if rel.softFk: + return [] safeLimit = max(1, min(int(limit), 10000)) diff --git a/tests/unit/services/test_renderer_pdf_smoke.py b/tests/unit/services/test_renderer_pdf_smoke.py index a3a3a78d..33324572 100644 --- a/tests/unit/services/test_renderer_pdf_smoke.py +++ b/tests/unit/services/test_renderer_pdf_smoke.py @@ -251,3 +251,28 @@ def test_inline_code_angle_brackets_escaped_in_font_span(): xml = renderer._markdownInlineToReportlabXml("unter `Eingabe//` speichern") assert 'name="Courier"' in xml assert "<Slug>" in xml + + +def test_emoji_codepoints_wrapped_in_emoji_font_span(): + """Emoji codepoints must be wrapped in so + ReportLab swaps to the Noto Emoji TTF instead of producing missing-glyph squares.""" + if not REPORTLAB_AVAILABLE: + pytest.skip("reportlab is not installed") + renderer = RendererPdf(services=_fakeServices()) + xml = renderer._markdownInlineToReportlabXml("Status: \U0001F600 done \U0001F389") + # Either the font registered (preferred) and wrapped, or font missing and + # text passes through unchanged. Both branches must keep the body readable. + from modules.serviceCenter.services.serviceGeneration.renderers._pdfFontFallback import ( + _initialize as _emojiInit, + ) + if _emojiInit(): + assert 'name="NotoEmoji"' in xml + assert "\U0001F600" in xml + assert "\U0001F389" in xml + else: + assert "\U0001F600" in xml + # Bold + emoji must produce nested font tag inside ... + xmlBold = renderer._markdownInlineToReportlabXml("**OK \U00002705**") + assert "" in xmlBold and "" in xmlBold + if _emojiInit(): + assert 'name="NotoEmoji"' in xmlBold