This commit is contained in:
ValueOn AG 2026-04-23 23:09:38 +02:00
parent 71f4265e06
commit b6be8f391e
15 changed files with 1052 additions and 389 deletions

View file

@ -4,11 +4,16 @@ Base class for demo configurations.
Each demo config file in this folder extends _BaseDemoConfig and provides
idempotent load() and remove() methods for setting up / tearing down
a complete demo environment (mandates, users, features, test data, etc.).
Subclasses MUST also declare ``credentials`` so the SysAdmin who triggers a
demo-load gets the initial username / password pair shown in the UI -- this
avoids the "where do I find the password?" anti-pattern of having to grep the
source code.
"""
import logging
from abc import ABC, abstractmethod
from typing import Dict, Any
from typing import Any, Dict, List
logger = logging.getLogger(__name__)
@ -20,6 +25,13 @@ class _BaseDemoConfig(ABC):
label: str = ""
description: str = ""
# Each entry describes one bootstrapped login that the demo creates.
# Shape: {"role": "Demo-Sachbearbeiter", "username": "pwg.demo",
# "email": "pwg.demo@poweron.swiss", "password": "pwg.demo.2026"}
# Surfaced via GET /api/admin/demo-config and inside the load() summary
# so the AdminDemoConfigPage can display it (no source-code grep needed).
credentials: List[Dict[str, str]] = []
@abstractmethod
def load(self, db) -> Dict[str, Any]:
"""Create all demo data (idempotent). Returns summary dict."""
@ -35,4 +47,5 @@ class _BaseDemoConfig(ABC):
"code": self.code,
"label": self.label,
"description": self.description,
"credentials": list(self.credentials or []),
}

View file

@ -64,6 +64,14 @@ class InvestorDemo2026(_BaseDemoConfig):
"Two mandates (HappyLife AG + Alpina Treuhand AG), one SysAdmin user, "
"trustee with RMA, workspace, graph editor, and neutralization."
)
credentials = [
{
"role": "SysAdmin Demo",
"username": _USER["username"],
"email": _USER["email"],
"password": _USER["password"],
}
]
# ------------------------------------------------------------------
# load
@ -101,6 +109,10 @@ class InvestorDemo2026(_BaseDemoConfig):
logger.error(f"Demo load failed: {e}", exc_info=True)
summary["errors"].append(str(e))
# Surface initial credentials so the SysAdmin doesn't have to grep the
# source code -- consumed by AdminDemoConfigPage to render a copyable
# login box in the result banner.
summary["credentials"] = list(self.credentials)
return summary
# ------------------------------------------------------------------
@ -268,10 +280,17 @@ class InvestorDemo2026(_BaseDemoConfig):
logger.error(f"Failed to create feature '{instanceLabel}' ({code}) in {mandateLabel}: {e}")
def _ensureFeatureAccess(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
"""Grant the demo user admin access to every feature instance in the mandate."""
"""Grant the demo user admin access on EVERY feature instance of the
mandate. Without an explicit ``FeatureAccess`` + ``{code}-admin`` role
the user does not see any feature tile in the UI -- so this method
ALSO heals a half-broken state by re-copying the per-feature template
roles if they are missing (e.g. when the instance was created via an
older code path that skipped ``copyTemplateRoles``).
"""
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole
from modules.datamodels.datamodelRbac import Role
from modules.interfaces.interfaceFeatures import getFeatureInterface
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or []
@ -297,7 +316,38 @@ class InvestorDemo2026(_BaseDemoConfig):
"featureInstanceId": instId,
"roleLabel": adminRoleLabel,
})
if adminRoles:
# Self-heal: if the per-feature admin role does not exist on this
# instance the template roles were never copied -- copy them now.
if not adminRoles:
logger.warning(
"Feature instance %s (%s) is missing role '%s' -- "
"re-copying template roles", instId, featureCode, adminRoleLabel,
)
try:
fi = getFeatureInterface(db)
fi._copyTemplateRoles(featureCode, mandateId, instId)
summary["created"].append(
f"Repaired template roles for {featureCode} in {mandateLabel}"
)
except Exception as repairErr:
summary["errors"].append(
f"Could not repair template roles for {featureCode} "
f"in {mandateLabel}: {repairErr}"
)
adminRoles = db.getRecordset(Role, recordFilter={
"featureInstanceId": instId,
"roleLabel": adminRoleLabel,
})
if not adminRoles:
summary["errors"].append(
f"Admin role '{adminRoleLabel}' not found for feature "
f"instance {featureCode} in {mandateLabel} -- demo user "
f"will not see this feature."
)
continue
adminRoleId = adminRoles[0].get("id")
existingRole = db.getRecordset(FeatureAccessRole, recordFilter={
"featureAccessId": featureAccessId,
@ -306,6 +356,9 @@ class InvestorDemo2026(_BaseDemoConfig):
if not existingRole:
far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId)
db.recordCreate(FeatureAccessRole, far)
summary["created"].append(
f"Role '{adminRoleLabel}' assigned to demo user in {mandateLabel}"
)
logger.info(f"Assigned {adminRoleLabel} role in {mandateLabel}")
def _ensureTrusteeRmaConfig(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict):

View file

@ -67,6 +67,14 @@ class PwgDemo2026(_BaseDemoConfig):
"Graph-Editor mit dem Pilot-Workflow für Jahresmietzinsbestätigungen "
"(als File importiert, active=false). Idempotent."
)
credentials = [
{
"role": "Demo-Sachbearbeiter",
"username": _USER["username"],
"email": _USER["email"],
"password": _USER["password"],
}
]
# ------------------------------------------------------------------
# load
@ -98,6 +106,10 @@ class PwgDemo2026(_BaseDemoConfig):
logger.error(f"PWG demo load failed: {e}", exc_info=True)
summary["errors"].append(str(e))
# Surface initial credentials so the SysAdmin doesn't have to grep the
# source code -- consumed by AdminDemoConfigPage to render a copyable
# login box in the result banner.
summary["credentials"] = list(self.credentials)
return summary
# ------------------------------------------------------------------
@ -253,9 +265,17 @@ class PwgDemo2026(_BaseDemoConfig):
summary["errors"].append(f"Feature '{instanceLabel}' in {mandateLabel}: {e}")
def _ensureFeatureAccess(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
"""Grant the demo user admin access on EVERY feature instance of the
mandate. Without an explicit ``FeatureAccess`` + ``{code}-admin`` role
the user does not see any feature tile in the UI -- so this method
ALSO heals a half-broken state by re-copying the per-feature template
roles if they are missing (e.g. when the instance was created via an
older code path that skipped ``copyTemplateRoles``).
"""
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole
from modules.datamodels.datamodelRbac import Role
from modules.interfaces.interfaceFeatures import getFeatureInterface
instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) or []
@ -280,7 +300,40 @@ class PwgDemo2026(_BaseDemoConfig):
"featureInstanceId": instId,
"roleLabel": adminRoleLabel,
})
if adminRoles:
# Self-heal: if the per-feature admin role does not exist on this
# instance the template roles were never copied -- copy them now.
if not adminRoles:
logger.warning(
"Feature instance %s (%s) is missing role '%s' -- "
"re-copying template roles", instId, featureCode, adminRoleLabel,
)
try:
fi = getFeatureInterface(db)
fi._copyTemplateRoles(featureCode, mandateId, instId)
summary["created"].append(
f"Repaired template roles for {featureCode} in {mandateLabel}"
)
except Exception as repairErr:
summary["errors"].append(
f"Could not repair template roles for {featureCode} "
f"in {mandateLabel}: {repairErr}"
)
adminRoles = db.getRecordset(Role, recordFilter={
"featureInstanceId": instId,
"roleLabel": adminRoleLabel,
})
if not adminRoles:
# Hard fail surfaced to UI -- without the admin role the user
# would silently not see the instance.
summary["errors"].append(
f"Admin role '{adminRoleLabel}' not found for feature "
f"instance {featureCode} in {mandateLabel} -- demo user "
f"will not see this feature."
)
continue
adminRoleId = adminRoles[0].get("id")
existingRole = db.getRecordset(FeatureAccessRole, recordFilter={
"featureAccessId": featureAccessId,
@ -289,6 +342,9 @@ class PwgDemo2026(_BaseDemoConfig):
if not existingRole:
far = FeatureAccessRole(featureAccessId=featureAccessId, roleId=adminRoleId)
db.recordCreate(FeatureAccessRole, far)
summary["created"].append(
f"Role '{adminRoleLabel}' assigned to demo user in {mandateLabel}"
)
def _ensureNeutralizationConfig(self, db, mandateId: Optional[str], userId: Optional[str], summary: Dict):
if not mandateId or not userId:

View file

@ -79,6 +79,16 @@ async def runSync(
async with _lockFor(featureInstanceId):
started = time.monotonic()
# CRITICAL: ensure the schema cache (especially the per-status
# ``isClosed`` map) is populated BEFORE we iterate issues. Redmine's
# /issues.json endpoint only returns ``{id, name}`` for the status
# object -- the closed/open flag lives in /issue_statuses.json. If
# the cache is empty here, every freshly-synced ticket would land
# with ``isClosed=False`` and the Stats page would be useless.
await _ensureSchemaWarm(currentUser, mandateId, featureInstanceId)
cfg = iface.getConfig(featureInstanceId) # re-read to get warm cache
full = force or cfg.lastSyncAt is None
updated_from_iso: Optional[str] = None
if not full and cfg.lastSyncAt is not None:
@ -107,6 +117,15 @@ async def runSync(
tickets_upserted += _upsertTicket(iface, featureInstanceId, mandateId, issue, now_epoch)
relations_upserted += _replaceRelations(iface, featureInstanceId, issue, now_epoch)
# Self-healing pass: re-apply ``isClosed`` to every mirrored ticket
# using the now-warm schema cache. Fixes pre-existing rows that were
# synced before the cache was populated (cheap; mirror-local only).
flags_fixed = _rebuildIsClosedFromSchema(iface, featureInstanceId, now_epoch)
if flags_fixed:
logger.info(
f"runSync({featureInstanceId}): corrected isClosed on {flags_fixed} mirror rows"
)
duration_ms = int((time.monotonic() - started) * 1000)
iface.recordSyncSuccess(
featureInstanceId,
@ -240,6 +259,80 @@ def _replaceRelations(
return inserted
# ---------------------------------------------------------------------------
# Schema cache warm-up + post-sync isClosed correction
# ---------------------------------------------------------------------------
async def _ensureSchemaWarm(
currentUser: User,
mandateId: Optional[str],
featureInstanceId: str,
) -> None:
"""Make sure ``cfg.schemaCache['statuses']`` exists with the per-status
``isClosed`` flag. Called at the start of every sync because Redmine's
``/issues.json`` doesn't expose ``is_closed`` on the inline status
object, so we MUST resolve it via the schema.
"""
iface = getInterface(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
cfg = iface.getConfig(featureInstanceId)
if cfg is None:
return
statuses = (cfg.schemaCache or {}).get("statuses") or []
if statuses:
return
# Lazy import to avoid a circular dependency at module load.
from modules.features.redmine.serviceRedmine import getProjectMeta
try:
await getProjectMeta(currentUser, mandateId, featureInstanceId, forceRefresh=True)
except Exception as e:
logger.warning(
f"_ensureSchemaWarm({featureInstanceId}): could not warm schema cache: {e} "
"-- isClosed flags may be inaccurate until next successful schema fetch."
)
def _rebuildIsClosedFromSchema(iface, featureInstanceId: str, nowEpoch: float) -> int:
"""Walk the mirror once and fix ``isClosed`` (and ``closedOnTs``) for any
ticket whose stored value disagrees with the current schema cache.
Returns the number of rows that were actually corrected. A no-op when
the schema cache has no statuses (logged once, then the caller can
decide whether to retry).
"""
cfg = iface.getConfig(featureInstanceId)
if cfg is None:
return 0
statuses = (cfg.schemaCache or {}).get("statuses") or []
if not statuses:
return 0
closed_ids = {int(s.get("id")) for s in statuses if s.get("id") is not None and s.get("isClosed")}
rows = iface.listMirroredTickets(featureInstanceId)
corrections = 0
for row in rows:
sid = row.get("statusId")
if sid is None:
continue
should_be_closed = int(sid) in closed_ids
if bool(row.get("isClosed")) == should_be_closed:
continue
# Only the closed/open flag (and the derived closedOnTs) are
# touched here -- everything else came from Redmine and stays.
update = {
"isClosed": bool(should_be_closed),
"closedOnTs": float(row.get("updatedOnTs")) if (should_be_closed and row.get("updatedOnTs") is not None) else None,
"syncedAt": nowEpoch,
}
try:
iface.upsertMirroredTicket(featureInstanceId, int(row.get("redmineId")), {**row, **update})
corrections += 1
except Exception as e:
logger.warning(
f"_rebuildIsClosedFromSchema({featureInstanceId}): could not fix ticket "
f"#{row.get('redmineId')}: {e}"
)
return corrections
# ---------------------------------------------------------------------------
# Pure helpers
# ---------------------------------------------------------------------------

View file

@ -1562,38 +1562,84 @@ async def refresh_chart_of_accounts(
return {"message": f"Chart of accounts refreshed: {len(charts)} entries", "count": len(charts)}
@router.post("/{instanceId}/accounting/sync")
@limiter.limit("5/minute")
async def sync_positions_to_accounting(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
data: Dict[str, Any] = Body(...),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Sync positions to the accounting system. Body: { positionIds: [...] }"""
mandateId = _validateInstanceAccess(instanceId, context)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE = "trusteeAccountingPush"
async def _trusteeAccountingPushJobHandler(job: Dict[str, Any], progressCb) -> Dict[str, Any]:
"""BackgroundJob handler: pushes a batch of positions to the external
accounting system. Runs in the worker without blocking the original HTTP
request, so the user can continue navigating while the sync runs.
Reads inputs from `job["payload"]` (`positionIds`) and reports incremental
progress via `progressCb(percent, message)`. The job result has the same
shape that the legacy synchronous endpoint used to return.
"""
from modules.security.rootAccess import getRootUser
from .accounting.accountingBridge import AccountingBridge
instanceId = job["featureInstanceId"]
mandateId = job["mandateId"]
payload = job.get("payload") or {}
positionIds: List[str] = list(payload.get("positionIds") or [])
if not positionIds:
return {"total": 0, "success": 0, "skipped": 0, "errors": 0, "results": []}
rootUser = getRootUser()
interface = getInterface(rootUser, mandateId=mandateId, featureInstanceId=instanceId)
bridge = AccountingBridge(interface)
positionIds = data.get("positionIds", [])
if not positionIds:
raise HTTPException(status_code=400, detail=routeApiMsg("positionIds required"))
results = []
total = len(positionIds)
progressCb(2, f"Sync wird vorbereitet ({total} Position(en))...")
# Resolve connector + plain config once to avoid decryption rate-limits
# (mirrors the optimisation in pushBatchToAccounting). We push positions
# one-by-one inside the job so we can emit incremental progress and so
# one bad row never aborts the rest.
from .accounting.accountingBridge import SyncResult
try:
connector, plainConfig, configRecord = await bridge._resolveConnectorAndConfig(instanceId)
except Exception as resolveErr:
logger.exception("Accounting push: failed to resolve connector/config")
progressCb(100, "Verbindungsaufbau fehlgeschlagen.")
raise resolveErr
if not connector or not plainConfig:
results = [SyncResult(success=False, errorMessage="No active accounting configuration found") for _ in positionIds]
progressCb(100, "Keine aktive Buchhaltungs-Konfiguration gefunden.")
return {
"total": len(results),
"success": 0,
"skipped": 0,
"errors": len(results),
"results": [r.model_dump() for r in results],
}
for index, positionId in enumerate(positionIds, start=1):
result = await bridge.pushPositionToAccounting(
instanceId,
positionId,
_resolvedConnector=connector,
_resolvedPlainConfig=plainConfig,
_resolvedConfigRecord=configRecord,
)
results.append(result)
# Reserve 5..95% for the push loop, keep the tail for summary.
pct = 5 + int(90 * index / total)
progressCb(pct, f"Position {index}/{total} verarbeitet")
results = await bridge.pushBatchToAccounting(instanceId, positionIds)
skipped = [r for r in results if not r.success and r.errorMessage and "already synced" in r.errorMessage]
failed = [r for r in results if not r.success and r not in skipped]
if skipped:
logger.info(
"Accounting sync: %s position(s) already synced, skipped",
len(skipped),
)
logger.info("Accounting sync: %s position(s) already synced, skipped", len(skipped))
if failed:
logger.warning(
"Accounting sync had %s failure(s): %s",
len(failed),
"; ".join(r.errorMessage or "unknown" for r in failed[:3]),
)
progressCb(100, "Sync abgeschlossen.")
return {
"total": len(results),
"success": sum(1 for r in results if r.success),
@ -1603,6 +1649,50 @@ async def sync_positions_to_accounting(
}
try:
from modules.serviceCenter.services.serviceBackgroundJobs import registerJobHandler as _registerPushJobHandler
_registerPushJobHandler(TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE, _trusteeAccountingPushJobHandler)
except Exception as _pushRegErr:
logger.warning("Failed to register trusteeAccountingPush job handler: %s", _pushRegErr)
@router.post("/{instanceId}/accounting/sync", status_code=status.HTTP_202_ACCEPTED)
@limiter.limit("5/minute")
async def sync_positions_to_accounting(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
data: Dict[str, Any] = Body(...),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Submit a background job that pushes positions to the accounting system.
Body: ``{ positionIds: [...] }``
Returns ``{ jobId, status: "pending" }`` immediately so the user is not
blocked while the (potentially long) external accounting calls run.
Clients poll ``GET /api/jobs/{jobId}`` until status is ``SUCCESS`` /
``ERROR`` and then read the same ``{ total, success, skipped, errors,
results }`` payload from ``job.result`` that the legacy synchronous
endpoint returned.
"""
from modules.serviceCenter.services.serviceBackgroundJobs import startJob
mandateId = _validateInstanceAccess(instanceId, context)
positionIds = data.get("positionIds", [])
if not positionIds:
raise HTTPException(status_code=400, detail=routeApiMsg("positionIds required"))
jobId = await startJob(
TRUSTEE_ACCOUNTING_PUSH_JOB_TYPE,
{"positionIds": list(positionIds)},
mandateId=mandateId,
featureInstanceId=instanceId,
triggeredBy=context.user.id if context.user else None,
)
return {"jobId": jobId, "status": "pending"}
@router.post("/{instanceId}/accounting/sync/{positionId}")
@limiter.limit("10/minute")
async def sync_single_position_to_accounting(

View file

@ -98,6 +98,11 @@ _ISO_LABELS: Dict[str, str] = {
"ur": "اردو", "uz": "Oʻzbek", "yo": "Yorùbá", "zu": "isiZulu",
}
# Priority order for the language picker: most relevant first, rest sorted by label.
# Single source of truth -- frontend fetches via GET /api/i18n/iso-choices and must
# never duplicate this list.
_ISO_PRIORITY_CODES: List[str] = ["de", "gsw", "en", "fr", "it"]
# ---------------------------------------------------------------------------
# DB helpers
@ -554,6 +559,38 @@ async def list_language_codes():
return sorted(out, key=lambda x: (not x.get("isDefault"), x["code"]))
@router.get("/iso-choices")
async def list_iso_choices():
"""Return the catalog of supported ISO 639-1/-3 language codes plus their
native labels. Single source of truth for any UI that lets the user pick a
language code (e.g. SysAdmin "add language set" dropdown). The frontend
must NOT keep its own copy of this list.
Response:
{
"priorityCodes": ["de", "gsw", "en", "fr", "it"],
"choices": [{"value": "de", "label": "de — Deutsch"}, ...]
}
"""
choices = [
{"value": code, "label": f"{code}{label}"}
for code, label in _ISO_LABELS.items()
]
def _sortKey(item):
try:
prio = _ISO_PRIORITY_CODES.index(item["value"])
return (0, prio)
except ValueError:
return (1, item["label"].lower())
choices.sort(key=_sortKey)
return {
"priorityCodes": list(_ISO_PRIORITY_CODES),
"choices": choices,
}
@router.get("/sets/{code}")
async def get_language_set(code: str):
db = _publicMgmtDb()

View file

@ -48,7 +48,7 @@ class TestDemoBootstrap:
memberships = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mid})
assert len(memberships) >= 1, f"User not member of mandate {mandate.get('label')}"
@pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "chatbot", "neutralization"])
@pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "neutralization"])
def test_happylifeFeaturesExist(self, db, mandateHappylife, featureCode):
mid = mandateHappylife.get("id")
instances = _getFeatureInstances(db, mid, featureCode)
@ -66,6 +66,13 @@ class TestDemoBootstrap:
instances = _getFeatureInstances(db, mid, "chatbot")
assert len(instances) == 0, "Alpina Treuhand should not have chatbot"
def test_happylifeNoChatbot(self, db, mandateHappylife):
"""HappyLife also should NOT have a chatbot instance — chatbot was
removed from the InvestorDemo on 2026-04-20 (see changelog)."""
mid = mandateHappylife.get("id")
instances = _getFeatureInstances(db, mid, "chatbot")
assert len(instances) == 0, "HappyLife should no longer have chatbot (removed 2026-04-20)"
class TestDemoBootstrapRma:

View file

@ -1,9 +1,11 @@
"""
T-UC3: Knowledge Chatbot.
Verifies that the chatbot feature instance exists in HappyLife AG
and that knowledge-base documents are available for upload.
Note: The actual RAG demo runs via workspace, not the chatbot's own index.
The chatbot feature instance was removed from the InvestorDemo on
2026-04-20 (see changelog) neither HappyLife nor Alpina bootstrap a
chatbot today; the actual RAG demo runs via workspace. We still verify
the knowledge-base demo files are present and that the bootstrap does
NOT (re)create chatbot instances in either mandate.
"""
import pytest
@ -13,11 +15,11 @@ from tests.demo.conftest import _getFeatureInstances
class TestChatbotSetup:
def test_chatbotInstanceHappylife(self, db, mandateHappylife):
"""HappyLife must have a chatbot instance."""
def test_chatbotNotInHappylife(self, db, mandateHappylife):
"""HappyLife should NOT have a chatbot instance (removed 2026-04-20)."""
mid = mandateHappylife.get("id")
instances = _getFeatureInstances(db, mid, "chatbot")
assert len(instances) >= 1, "No chatbot instance in HappyLife"
assert len(instances) == 0, "HappyLife should no longer bootstrap a chatbot instance"
def test_chatbotNotInAlpina(self, db, mandateAlpina):
"""Alpina should NOT have a chatbot instance."""

View file

@ -0,0 +1,226 @@
# Copyright (c) 2026 Patrick Motsch
# All rights reserved.
"""T6 — PWG-Pilot demo bootstrap & idempotency tests.
Covers AC 11 + AC 12 of the PWG-Pilot plan:
- ``PwgDemo2026.load()`` is idempotent (twice no errors).
- All expected objects exist after load (mandate, demo user,
4 feature instances, trustee seed data, imported pilot workflow with
``active=False``).
- ``remove()`` cleans up cleanly and a subsequent ``load()`` rebuilds
the demo without error (idempotency over the full lifecycle).
Mirrors the structure of ``tests/demo/test_demo_bootstrap.py`` and reuses
its session-scoped ``db`` fixture from ``tests/demo/conftest.py``.
Marked ``expensive + live`` because they hit the real Postgres databases
(``poweron_app``, ``poweron_trustee``, ``poweron_graphicaleditor``); run
them explicitly with::
pytest -m "expensive or live" tests/demo/test_pwg_demo_bootstrap.py
"""
import pytest
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.datamodels.datamodelMembership import UserMandate
from modules.datamodels.datamodelUam import Mandate, UserInDB
from tests.demo.conftest import _getFeatureInstances
pytestmark = [pytest.mark.expensive, pytest.mark.live]
# ---------------------------------------------------------------------------
# Fixtures (function-scoped so they always reflect current DB state)
# ---------------------------------------------------------------------------
@pytest.fixture(scope="session")
def pwgDemoConfig():
"""Auto-discovered ``PwgDemo2026`` instance."""
from modules.demoConfigs import _getDemoConfigByCode
cfg = _getDemoConfigByCode("pwg-demo-2026")
assert cfg is not None, (
"Demo config 'pwg-demo-2026' not found — check modules/demoConfigs/pwgDemo2026.py"
)
return cfg
@pytest.fixture
def mandatePwg(db):
records = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
assert records, "Mandate 'stiftung-pwg' not found — run pwgDemoConfig.load() first"
return records[0]
@pytest.fixture
def pwgUser(db):
records = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"})
assert records, "User 'pwg.demo' not found — run pwgDemoConfig.load() first"
return records[0]
# ---------------------------------------------------------------------------
# Bootstrap idempotency
# ---------------------------------------------------------------------------
class TestPwgDemoBootstrap:
def test_loadIsIdempotent(self, db, pwgDemoConfig):
"""Loading the PWG demo twice in a row must not raise errors."""
s1 = pwgDemoConfig.load(db)
assert len(s1.get("errors", [])) == 0, f"First load errors: {s1['errors']}"
s2 = pwgDemoConfig.load(db)
assert len(s2.get("errors", [])) == 0, f"Second load errors: {s2['errors']}"
def test_credentialsAreSurfacedFromLoadSummary(self, db, pwgDemoConfig):
s = pwgDemoConfig.load(db)
creds = s.get("credentials") or []
assert any(c.get("username") == "pwg.demo" for c in creds), (
"PWG demo must surface 'pwg.demo' credentials so the SysAdmin "
"doesn't have to grep source code for the password."
)
def test_mandateStiftungPwgExists(self, db):
records = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
assert len(records) == 1
assert records[0].get("label") == "Stiftung PWG"
assert records[0].get("enabled") is True
def test_pwgDemoUserExists(self, db):
records = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"})
assert len(records) == 1
user = records[0]
assert user.get("email") == "pwg.demo@poweron.swiss"
assert user.get("isSysAdmin") is True
assert user.get("language") == "de"
def test_pwgUserMembership(self, db, pwgUser, mandatePwg):
memberships = db.getRecordset(UserMandate, recordFilter={
"userId": pwgUser.get("id"),
"mandateId": mandatePwg.get("id"),
})
assert len(memberships) >= 1, "PWG demo user not a member of Stiftung PWG"
@pytest.mark.parametrize(
"featureCode",
["workspace", "trustee", "graphicalEditor", "neutralization"],
)
def test_pwgFeaturesExist(self, db, mandatePwg, featureCode):
instances = _getFeatureInstances(db, mandatePwg.get("id"), featureCode)
assert len(instances) >= 1, f"Feature '{featureCode}' missing in Stiftung PWG"
def test_pwgFourFeatureInstances(self, db, mandatePwg):
instances = db.getRecordset(FeatureInstance, recordFilter={
"mandateId": mandatePwg.get("id"),
}) or []
codes = sorted({i.get("featureCode") for i in instances})
assert codes == ["graphicalEditor", "neutralization", "trustee", "workspace"], (
f"Expected exactly 4 feature instances, got {codes}"
)
# ---------------------------------------------------------------------------
# Trustee seed data — 5 fictitious tenants × 12 monthly bookings each
# ---------------------------------------------------------------------------
class TestPwgTrusteeSeed:
def test_trusteeRentAccountExists(self, db, mandatePwg):
from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataAccount
instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee")
assert instances, "No trustee instance for PWG"
instId = instances[0].get("id")
from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb
trusteeDb = _openTrusteeDb()
accounts = trusteeDb.getRecordset(TrusteeDataAccount, recordFilter={
"featureInstanceId": instId,
"accountNumber": "6000",
}) or []
assert len(accounts) == 1, f"Expected exactly 1 rent account 6000, got {len(accounts)}"
assert accounts[0].get("isActive") is True
def test_trusteeFiveTenants(self, db, mandatePwg):
from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataContact
instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee")
instId = instances[0].get("id")
from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb
trusteeDb = _openTrusteeDb()
contacts = trusteeDb.getRecordset(TrusteeDataContact, recordFilter={
"featureInstanceId": instId,
}) or []
# Some installations may already have other trustee contacts, but the
# 5 PWG seed tenants must be present.
names = {c.get("name") for c in contacts}
for expected in (
"Anna Müller", "Beat Schneider", "Carla Weber",
"Daniel Frey", "Eva Lang",
):
assert expected in names, f"PWG seed tenant '{expected}' missing"
def test_trusteeMonthlyBookingsForTenant(self, db, mandatePwg):
"""Every tenant gets 12 monthly journal entries."""
from modules.features.trustee.datamodelFeatureTrustee import TrusteeDataJournalEntry
instances = _getFeatureInstances(db, mandatePwg.get("id"), "trustee")
instId = instances[0].get("id")
from modules.demoConfigs.pwgDemo2026 import _openTrusteeDb
trusteeDb = _openTrusteeDb()
entries = trusteeDb.getRecordset(TrusteeDataJournalEntry, recordFilter={
"featureInstanceId": instId,
}) or []
# 5 tenants × 12 months = 60; >= so reload doesn't false-fail.
pwgEntries = [e for e in entries if (e.get("reference") or "").startswith("PWG-")]
assert len(pwgEntries) >= 60, (
f"Expected >=60 PWG journal entries (5 tenants × 12 months), got {len(pwgEntries)}"
)
# ---------------------------------------------------------------------------
# Pilot workflow — imported envelope, must be active=False
# ---------------------------------------------------------------------------
class TestPwgPilotWorkflow:
def test_pilotWorkflowImported(self, db, mandatePwg):
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow
from modules.demoConfigs.pwgDemo2026 import _openGraphicalEditorDb
instances = _getFeatureInstances(db, mandatePwg.get("id"), "graphicalEditor")
assert instances, "No graphicalEditor instance for PWG"
instId = instances[0].get("id")
geDb = _openGraphicalEditorDb()
wfs = geDb.getRecordset(AutoWorkflow, recordFilter={
"mandateId": mandatePwg.get("id"),
"featureInstanceId": instId,
"label": "PWG Pilot: Jahresmietzinsbestätigung",
}) or []
assert len(wfs) == 1, f"Expected exactly 1 PWG pilot workflow, got {len(wfs)}"
wf = wfs[0]
# AC 10: imports must be inactive by default
assert wf.get("active") is False, "PWG pilot workflow must be imported with active=false"
graph = wf.get("graph") or {}
assert (graph.get("nodes") or []), "PWG pilot workflow has no nodes"
# ---------------------------------------------------------------------------
# Lifecycle: remove + reload (mirrors investor demo TestDemoRemoveAndReload)
# ---------------------------------------------------------------------------
class TestPwgRemoveAndReload:
def test_removeAndReload(self, db, pwgDemoConfig):
"""Remove the PWG demo, verify it is gone, then reload it."""
rs = pwgDemoConfig.remove(db)
assert len(rs.get("errors", [])) == 0, f"Remove errors: {rs['errors']}"
mandates = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
assert len(mandates) == 0, "Stiftung PWG mandate should be gone after remove"
users = db.getRecordset(UserInDB, recordFilter={"username": "pwg.demo"})
assert len(users) == 0, "pwg.demo user should be gone after remove"
ls = pwgDemoConfig.load(db)
assert len(ls.get("errors", [])) == 0, f"Reload errors: {ls['errors']}"
mandates = db.getRecordset(Mandate, recordFilter={"name": "stiftung-pwg"})
assert len(mandates) == 1, "Stiftung PWG must exist after reload"

View file

@ -166,7 +166,7 @@ class TestRbacDatabaseFiltering:
try:
mandate = Mandate(
id=testMandateId,
name="RBAC test mandate",
name="rbac-test-mandate-uc",
label="RBAC test",
)
mandatePayload = mandate.model_dump()

View file

@ -1,314 +0,0 @@
"""
Basic verification tests for Phase 1-3 implementation.
Run with: python tests/test_phase123_basic.py
Requires: gateway running on localhost:8000
"""
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
print("=" * 60)
print("PHASE 1-3 BASIC VERIFICATION")
print("=" * 60)
errors = []
passes = []
def _check(label, condition, detail=""):
if condition:
passes.append(label)
print(f" [PASS] {label}")
else:
errors.append(f"{label}: {detail}")
print(f" [FAIL] {label}{detail}")
# ── Phase 1: Data Models ──────────────────────────────────────────────────────
print("\n--- Phase 1: Data Models ---")
try:
from modules.datamodels.datamodelUam import Mandate
m = Mandate(name="test", label="test")
_check("Mandate has isSystem field", hasattr(m, "isSystem"))
_check("Mandate isSystem default False", m.isSystem is False)
_check("Mandate no mandateType field", not hasattr(m, "mandateType"))
except Exception as e:
errors.append(f"Phase 1 DataModel: {e}")
print(f" [FAIL] Phase 1 DataModel import: {e}")
try:
from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, BUILTIN_PLANS, SubscriptionPlan
_check("PENDING status exists", hasattr(SubscriptionStatusEnum, "PENDING"))
_check("BUILTIN_PLANS has TRIAL_14D", "TRIAL_14D" in BUILTIN_PLANS)
trial = BUILTIN_PLANS["TRIAL_14D"]
_check("TRIAL_14D has maxDataVolumeMB", hasattr(trial, "maxDataVolumeMB"))
_check("TRIAL_14D maxDataVolumeMB=1024", trial.maxDataVolumeMB == 1024)
_check("TRIAL_14D has includedModules", hasattr(trial, "includedModules"))
_check("TRIAL_14D includedModules=2", trial.includedModules == 2)
_check("TRIAL_14D trialDays=14", trial.trialDays == 14)
except Exception as e:
errors.append(f"Phase 1 Subscription: {e}")
print(f" [FAIL] Phase 1 Subscription: {e}")
# ── Phase 2: Scope Fields ─────────────────────────────────────────────────────
print("\n--- Phase 2: Scope Fields on Models ---")
try:
from modules.datamodels.datamodelFiles import FileItem
fi = FileItem(fileName="test.txt", mimeType="text/plain", fileHash="abc", fileSize=100)
_check("FileItem has scope field", hasattr(fi, "scope"))
_check("FileItem scope default=personal", fi.scope == "personal")
_check("FileItem has neutralize field", hasattr(fi, "neutralize"))
_check("FileItem neutralize default=False", fi.neutralize == False)
except Exception as e:
errors.append(f"Phase 2 FileItem: {e}")
print(f" [FAIL] Phase 2 FileItem: {e}")
try:
from modules.datamodels.datamodelDataSource import DataSource
ds = DataSource(connectionId="c1", sourceType="sharepoint", path="/test", label="Test")
_check("DataSource has scope field", hasattr(ds, "scope"))
_check("DataSource scope default=personal", ds.scope == "personal")
_check("DataSource has neutralize field", hasattr(ds, "neutralize"))
_check("DataSource neutralize default=False", ds.neutralize == False)
except Exception as e:
errors.append(f"Phase 2 DataSource: {e}")
print(f" [FAIL] Phase 2 DataSource: {e}")
try:
from modules.datamodels.datamodelKnowledge import FileContentIndex
fci = FileContentIndex(userId="u1", fileName="test.txt", mimeType="text/plain")
_check("FileContentIndex has scope field", hasattr(fci, "scope"))
_check("FileContentIndex scope default=personal", fci.scope == "personal")
_check("FileContentIndex has neutralizationStatus", hasattr(fci, "neutralizationStatus"))
_check("FileContentIndex neutralizationStatus default=None", fci.neutralizationStatus is None)
except Exception as e:
errors.append(f"Phase 2 FileContentIndex: {e}")
print(f" [FAIL] Phase 2 FileContentIndex: {e}")
# ── Phase 2: RAG Scope Filtering ──────────────────────────────────────────────
print("\n--- Phase 2: RAG Scope Logic ---")
try:
from modules.interfaces.interfaceDbKnowledge import KnowledgeObjects
_check("KnowledgeObjects has _getScopedFileIds", hasattr(KnowledgeObjects, "_getScopedFileIds"))
_check("KnowledgeObjects has _buildScopeFilter", hasattr(KnowledgeObjects, "_buildScopeFilter"))
import inspect
sig = inspect.signature(KnowledgeObjects._getScopedFileIds)
params = list(sig.parameters.keys())
_check("_getScopedFileIds has isSysAdmin param", "isSysAdmin" in params)
sig2 = inspect.signature(KnowledgeObjects.semanticSearch)
params2 = list(sig2.parameters.keys())
_check("semanticSearch has scope param", "scope" in params2)
_check("semanticSearch has isSysAdmin param", "isSysAdmin" in params2)
except Exception as e:
errors.append(f"Phase 2 RAG: {e}")
print(f" [FAIL] Phase 2 RAG: {e}")
# ── Phase 3: Neutralization Methods ───────────────────────────────────────────
print("\n--- Phase 3: Neutralization Integration ---")
try:
from modules.workflows.workflowManager import WorkflowManager
_check("WorkflowManager has _neutralizePromptIfRequired", hasattr(WorkflowManager, "_neutralizePromptIfRequired"))
_check("WorkflowManager has _rehydrateResponseIfNeeded", hasattr(WorkflowManager, "_rehydrateResponseIfNeeded"))
import inspect
sig_n = inspect.signature(WorkflowManager._neutralizePromptIfRequired)
_check("_neutralizePromptIfRequired is async", inspect.iscoroutinefunction(WorkflowManager._neutralizePromptIfRequired))
sig_r = inspect.signature(WorkflowManager._rehydrateResponseIfNeeded)
_check("_rehydrateResponseIfNeeded is async", inspect.iscoroutinefunction(WorkflowManager._rehydrateResponseIfNeeded))
except Exception as e:
errors.append(f"Phase 3 WorkflowManager: {e}")
print(f" [FAIL] Phase 3 WorkflowManager: {e}")
# ── Phase 3: Fail-Safe Logic ──────────────────────────────────────────────────
print("\n--- Phase 3: Fail-Safe Logic ---")
try:
import ast
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "workflows", "methods", "methodContext", "actions", "neutralizeData.py"), "r") as f:
source = f.read()
_check("neutralizeData.py has 'SKIPPING' fail-safe", "SKIPPING" in source)
_check("neutralizeData.py has 'do NOT pass original' comment", "do NOT pass original" in source.lower() or "not passing original" in source.lower())
_check("neutralizeData.py uses continue for skip", "continue" in source)
except Exception as e:
errors.append(f"Phase 3 Fail-Safe: {e}")
print(f" [FAIL] Phase 3 Fail-Safe: {e}")
# ── Phase 2: Route Endpoints ──────────────────────────────────────────────────
print("\n--- Phase 2: API Endpoints ---")
try:
import ast
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "routes", "routeDataFiles.py"), "r") as f:
source = f.read()
_check("routeDataFiles has PATCH scope endpoint", "updateFileScope" in source)
_check("routeDataFiles has PATCH neutralize endpoint", "updateFileNeutralize" in source)
_check("routeDataFiles checks global sysAdmin", "isSysAdmin" in source)
except Exception as e:
errors.append(f"Phase 2 Routes: {e}")
print(f" [FAIL] Phase 2 Routes: {e}")
# ── Phase 1: Store Endpoints ──────────────────────────────────────────────────
print("\n--- Phase 1: Store Endpoints ---")
try:
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "routes", "routeStore.py"), "r") as f:
source = f.read()
_check("routeStore has listUserMandates", "listUserMandates" in source or "list_user_mandates" in source)
_check("routeStore has getSubscriptionInfo", "getSubscriptionInfo" in source or "get_subscription_info" in source)
_check("routeStore has orphan control", "orphan" in source.lower() or "last" in source.lower())
except Exception as e:
errors.append(f"Phase 1 Store: {e}")
print(f" [FAIL] Phase 1 Store: {e}")
# ── Phase 1: Provisioning ─────────────────────────────────────────────────────
print("\n--- Phase 1: Provisioning ---")
try:
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "interfaces", "interfaceDbApp.py"), "r") as f:
source = f.read()
_check("interfaceDbApp has _provisionMandateForUser", "_provisionMandateForUser" in source)
_check("interfaceDbApp has _activatePendingSubscriptions", "_activatePendingSubscriptions" in source)
_check("interfaceDbApp has deleteMandate cascade", "deleteMandate" in source and "cascade" in source.lower())
except Exception as e:
errors.append(f"Phase 1 Provisioning: {e}")
print(f" [FAIL] Phase 1 Provisioning: {e}")
# ── Phase 1: Registration Routes ──────────────────────────────────────────────
print("\n--- Phase 1: Registration ---")
try:
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "routes", "routeSecurityLocal.py"), "r") as f:
source = f.read()
_check("routeSecurityLocal has registrationType", "registrationType" in source)
_check("routeSecurityLocal has companyName", "companyName" in source)
_check("routeSecurityLocal has onboarding endpoint", "onboarding" in source)
except Exception as e:
errors.append(f"Phase 1 Registration: {e}")
print(f" [FAIL] Phase 1 Registration: {e}")
# ── Fix 1: OnboardingWizard Integration ────────────────────────────────────────
print("\n--- Fix 1: OnboardingWizard Integration ---")
try:
loginPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"..", "frontend_nyla", "src", "pages", "Login.tsx")
with open(loginPath, "r", encoding="utf-8") as f:
source = f.read()
_check("Login.tsx imports OnboardingWizard", "OnboardingWizard" in source)
_check("Login.tsx has showOnboardingWizard state", "showOnboardingWizard" in source)
_check("Login.tsx checks isNewUser", "isNewUser" in source)
except Exception as e:
errors.append(f"Fix 1: {e}")
print(f" [FAIL] Fix 1: {e}")
# ── Fix 2: CommCoach UDB Integration ──────────────────────────────────────────
print("\n--- Fix 2: CommCoach UDB Integration ---")
try:
dossierPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"..", "frontend_nyla", "src", "pages", "views", "commcoach", "CommcoachDossierView.tsx")
with open(dossierPath, "r", encoding="utf-8") as f:
source = f.read()
_check("CommCoach imports UnifiedDataBar", "UnifiedDataBar" in source)
_check("CommCoach imports FilesTab", "FilesTab" in source)
_check("CommCoach no longer imports getDocumentsApi", "getDocumentsApi" not in source)
_check("CommCoach has UDB sidebar", "udbSidebar" in source or "UnifiedDataBar" in source)
except Exception as e:
errors.append(f"Fix 2: {e}")
print(f" [FAIL] Fix 2: {e}")
# ── Fix 3: Neutralization Backend Endpoints ───────────────────────────────────
print("\n--- Fix 3: Neutralization Backend Endpoints ---")
try:
routePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "features", "neutralization", "routeFeatureNeutralizer.py")
with open(routePath, "r") as f:
source = f.read()
_check("Neutralization has deleteAttribute endpoint", "deleteAttribute" in source or "delete_attribute" in source)
_check("Neutralization has retrigger endpoint", "retrigger" in source)
_check("Neutralization has single attribute delete", "single" in source or "attributeId" in source)
except Exception as e:
errors.append(f"Fix 3: {e}")
print(f" [FAIL] Fix 3: {e}")
# ── Fix 4: Central AI Neutralization ──────────────────────────────────────────
print("\n--- Fix 4: Central AI Neutralization ---")
try:
aiPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "serviceCenter", "services", "serviceAi", "mainServiceAi.py")
with open(aiPath, "r") as f:
source = f.read()
_check("AiService has _shouldNeutralize", "_shouldNeutralize" in source)
_check("AiService has _neutralizeRequest", "_neutralizeRequest" in source)
_check("AiService has _rehydrateResponse", "_rehydrateResponse" in source)
_check("callAi uses neutralization", "_shouldNeutralize" in source and "_neutralizeRequest" in source)
except Exception as e:
errors.append(f"Fix 4: {e}")
print(f" [FAIL] Fix 4: {e}")
# ── Fix 5: Voice Settings User Level ──────────────────────────────────────────
print("\n--- Fix 5: Voice Settings User Level ---")
try:
from modules.datamodels.datamodelUam import UserVoicePreferences
uvp = UserVoicePreferences(userId="u1")
_check("UserVoicePreferences model exists", True)
_check("UserVoicePreferences has sttLanguage", hasattr(uvp, "sttLanguage"))
_check("UserVoicePreferences default sttLanguage=de-DE", uvp.sttLanguage == "de-DE")
_check("UserVoicePreferences has ttsVoice", hasattr(uvp, "ttsVoice"))
except Exception as e:
errors.append(f"Fix 5: {e}")
print(f" [FAIL] Fix 5: {e}")
try:
voiceUserPath = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "routes", "routeVoiceUser.py",
)
with open(voiceUserPath, "r") as f:
source = f.read()
_check("Voice preferences GET endpoint", '"/preferences"' in source and "getVoicePreferences" in source)
_check("Voice preferences PUT endpoint", "updateVoicePreferences" in source)
except Exception as e:
errors.append(f"Fix 5 Routes: {e}")
print(f" [FAIL] Fix 5 Routes: {e}")
# ── Fix 6: RAG mandate-wide scope ─────────────────────────────────────────────
print("\n--- Fix 6: RAG mandate-wide scope ---")
try:
knowledgePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"modules", "serviceCenter", "services", "serviceKnowledge", "mainServiceKnowledge.py")
with open(knowledgePath, "r") as f:
source = f.read()
_check("buildAgentContext passes mandateId to semanticSearch", "mandateId=mandateId" in source)
_check("buildAgentContext has isSysAdmin param", "isSysAdmin" in source)
except Exception as e:
errors.append(f"Fix 6: {e}")
print(f" [FAIL] Fix 6: {e}")
# ── Summary ───────────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print(f"RESULTS: {len(passes)} passed, {len(errors)} failed")
print("=" * 60)
if errors:
print("\nFAILURES:")
for e in errors:
print(f" - {e}")
sys.exit(1)
else:
print("\nALL CHECKS PASSED!")
sys.exit(0)

View file

@ -112,6 +112,8 @@ class TestAggregateEndToEnd:
dateTo="2026-04-30",
bucket="month",
trackerIdsFilter=[],
categoryIdsFilter=[],
statusFilter="",
instanceId="test-instance",
)
assert dto.instanceId == "test-instance"

View file

@ -0,0 +1,383 @@
# Copyright (c) 2026 Patrick Motsch
# All rights reserved.
"""T3 — Unit tests for the workflow-CRUD agent tools.
Covers AC 5 + AC 6 of the PWG-Pilot plan:
- createWorkflow happy-path returns a workflowId.
- createWorkflow rejects missing label / instanceId.
- deleteWorkflow without ``confirm=true`` is a NO-OP and returns an error.
- deleteWorkflow with ``confirm=true`` deletes and returns success.
- updateWorkflowMetadata patches only the supplied fields.
- createWorkflowFromFile / exportWorkflowToFile happy-path round-trip.
The tools call into a feature-instance interface; we replace
``workflowTools._getInterface`` with a fake that captures interactions
without touching any database.
"""
import asyncio
import json
import uuid
from typing import Any, Dict, Optional
import pytest
from modules.serviceCenter.services.serviceAgent import workflowTools
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
class _FakeInterface:
"""In-memory stand-in for ``GraphicalEditorObjects``.
Stores workflows by id and records every method call in ``self.calls``
so tests can assert on the parameters the tool layer forwarded.
"""
def __init__(self, mandateId: str = "mand-1", featureInstanceId: str = "inst-1"):
self.mandateId = mandateId
self.featureInstanceId = featureInstanceId
self.workflows: Dict[str, Dict[str, Any]] = {}
self.calls: list = []
def createWorkflow(self, data: Dict[str, Any]) -> Dict[str, Any]:
self.calls.append(("createWorkflow", data))
wfId = data.get("id") or str(uuid.uuid4())
record = dict(data)
record["id"] = wfId
record["mandateId"] = self.mandateId
record["featureInstanceId"] = self.featureInstanceId
record.setdefault("active", False)
self.workflows[wfId] = record
return record
def updateWorkflow(self, workflowId: str, data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
self.calls.append(("updateWorkflow", workflowId, data))
existing = self.workflows.get(workflowId)
if not existing:
return None
existing.update(data)
return existing
def deleteWorkflow(self, workflowId: str) -> bool:
self.calls.append(("deleteWorkflow", workflowId))
return self.workflows.pop(workflowId, None) is not None
def getWorkflow(self, workflowId: str) -> Optional[Dict[str, Any]]:
return self.workflows.get(workflowId)
def importWorkflowFromDict(
self,
envelope: Dict[str, Any],
existingWorkflowId: Optional[str] = None,
) -> Dict[str, Any]:
self.calls.append(("importWorkflowFromDict", envelope, existingWorkflowId))
data = {
"label": envelope.get("label", "Imported"),
"description": envelope.get("description", ""),
"tags": envelope.get("tags", []),
"graph": envelope.get("graph", {"nodes": [], "connections": []}),
"invocations": envelope.get("invocations", []),
"active": False,
}
if existingWorkflowId:
updated = self.updateWorkflow(existingWorkflowId, data) or {}
return {"workflow": updated, "warnings": [], "created": False}
created = self.createWorkflow(data)
return {"workflow": created, "warnings": [], "created": True}
def exportWorkflowToDict(self, workflowId: str) -> Optional[Dict[str, Any]]:
wf = self.workflows.get(workflowId)
if not wf:
return None
return {
"$schemaVersion": "1.0",
"$kind": "poweron.workflow",
"label": wf.get("label"),
"description": wf.get("description", ""),
"tags": wf.get("tags", []),
"graph": wf.get("graph") or {"nodes": [], "connections": []},
"invocations": wf.get("invocations") or [],
}
@pytest.fixture
def fakeInterface(monkeypatch):
"""Replace ``_getInterface`` with a fixture-scoped fake."""
fake = _FakeInterface()
monkeypatch.setattr(workflowTools, "_getInterface", lambda _ctx, _iid: fake)
return fake
def _ctx(workflowId: str = "wf-1", instanceId: str = "inst-1") -> Dict[str, Any]:
"""Standard agent-tool context dict."""
return {
"workflowId": workflowId,
"featureInstanceId": instanceId,
"userId": "user-1",
"mandateId": "mand-1",
}
def _runTool(handler, params: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
return asyncio.run(handler(params, context))
def _payload(result: ToolResult) -> Dict[str, Any]:
"""Decode the tool's data string back into a dict for easy asserts."""
assert isinstance(result.data, str), "ToolResult.data must be a string per registry contract"
return json.loads(result.data)
# ---------------------------------------------------------------------------
# createWorkflow — AC 5
# ---------------------------------------------------------------------------
class TestCreateWorkflow:
def test_happyPathReturnsWorkflowId(self, fakeInterface):
result = _runTool(workflowTools._createWorkflow, {"label": "Smoke-Test"}, _ctx())
assert result.success, result.error
payload = _payload(result)
assert payload["workflowId"]
assert payload["label"] == "Smoke-Test"
assert payload["workflowId"] in fakeInterface.workflows
assert fakeInterface.workflows[payload["workflowId"]]["active"] is False
def test_missingLabelIsRejected(self, fakeInterface):
result = _runTool(workflowTools._createWorkflow, {}, _ctx())
assert not result.success
assert "label" in (result.error or "").lower()
assert fakeInterface.calls == [], "no DB call must happen on validation error"
def test_missingInstanceIdIsRejected(self, fakeInterface):
ctx = {"workflowId": "wf-1", "userId": "user-1", "mandateId": "mand-1"}
result = _runTool(workflowTools._createWorkflow, {"label": "Empty"}, ctx)
assert not result.success
assert "instanceid" in (result.error or "").lower()
def test_blankLabelIsRejected(self, fakeInterface):
result = _runTool(workflowTools._createWorkflow, {"label": " "}, _ctx())
assert not result.success
def test_initialGraphAndTagsAreForwarded(self, fakeInterface):
graph = {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []}
result = _runTool(
workflowTools._createWorkflow,
{"label": "With Graph", "tags": ["pwg"], "graph": graph, "description": "d"},
_ctx(),
)
assert result.success
wfId = _payload(result)["workflowId"]
stored = fakeInterface.workflows[wfId]
assert stored["tags"] == ["pwg"]
assert stored["description"] == "d"
assert stored["graph"]["nodes"][0]["id"] == "n1"
# ---------------------------------------------------------------------------
# deleteWorkflow — AC 6
# ---------------------------------------------------------------------------
class TestDeleteWorkflow:
def test_withoutConfirmReturnsError(self, fakeInterface):
fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"}
result = _runTool(workflowTools._deleteWorkflow, {"workflowId": "wf-x"}, _ctx())
assert not result.success
assert "confirm" in (result.error or "").lower()
# Critical: no destructive call must reach the interface
assert all(call[0] != "deleteWorkflow" for call in fakeInterface.calls)
assert "wf-x" in fakeInterface.workflows
def test_withConfirmFalseAlsoBlocks(self, fakeInterface):
fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"}
result = _runTool(
workflowTools._deleteWorkflow,
{"workflowId": "wf-x", "confirm": False},
_ctx(),
)
assert not result.success
assert "wf-x" in fakeInterface.workflows
def test_withConfirmTrueDeletes(self, fakeInterface):
fakeInterface.workflows["wf-x"] = {"id": "wf-x", "label": "L"}
result = _runTool(
workflowTools._deleteWorkflow,
{"workflowId": "wf-x", "confirm": True},
_ctx(),
)
assert result.success, result.error
assert "wf-x" not in fakeInterface.workflows
def test_unknownWorkflowReturnsError(self, fakeInterface):
result = _runTool(
workflowTools._deleteWorkflow,
{"workflowId": "wf-ghost", "confirm": True},
_ctx(),
)
assert not result.success
assert "not found" in (result.error or "").lower()
def test_missingIdsReturnError(self, fakeInterface):
result = _runTool(
workflowTools._deleteWorkflow,
{"confirm": True},
{"userId": "user-1", "mandateId": "mand-1"},
)
assert not result.success
assert "required" in (result.error or "").lower()
# ---------------------------------------------------------------------------
# updateWorkflowMetadata — supports the "rename" intent without touching graph
# ---------------------------------------------------------------------------
class TestUpdateWorkflowMetadata:
def test_renameOnlyTouchesLabel(self, fakeInterface):
fakeInterface.workflows["wf-1"] = {
"id": "wf-1",
"label": "Old Name",
"graph": {"nodes": [{"id": "n1"}], "connections": []},
}
result = _runTool(
workflowTools._updateWorkflowMetadata,
{"workflowId": "wf-1", "label": "New Name"},
_ctx(),
)
assert result.success, result.error
payload = _payload(result)
assert payload["label"] == "New Name"
assert payload["changed"] == ["label"]
# Graph must remain untouched
stored = fakeInterface.workflows["wf-1"]
assert stored["graph"]["nodes"][0]["id"] == "n1"
def test_emptyPatchIsRejected(self, fakeInterface):
fakeInterface.workflows["wf-1"] = {"id": "wf-1", "label": "L"}
result = _runTool(
workflowTools._updateWorkflowMetadata,
{"workflowId": "wf-1"},
_ctx(),
)
assert not result.success
def test_blankLabelIsRejected(self, fakeInterface):
fakeInterface.workflows["wf-1"] = {"id": "wf-1", "label": "L"}
result = _runTool(
workflowTools._updateWorkflowMetadata,
{"workflowId": "wf-1", "label": " "},
_ctx(),
)
assert not result.success
# ---------------------------------------------------------------------------
# createWorkflowFromFile / exportWorkflowToFile — round-trip via the tool layer
# ---------------------------------------------------------------------------
class TestImportExportTools:
def test_inlineEnvelopeImportCreatesWorkflow(self, fakeInterface):
envelope = {
"$schemaVersion": "1.0",
"label": "Imported PWG",
"graph": {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []},
}
result = _runTool(
workflowTools._createWorkflowFromFile,
{"envelope": envelope},
_ctx(),
)
assert result.success, result.error
payload = _payload(result)
assert payload["workflowId"]
assert payload["created"] is True
assert payload["label"] == "Imported PWG"
assert fakeInterface.workflows[payload["workflowId"]]["active"] is False
def test_importRequiresFileIdOrEnvelope(self, fakeInterface):
result = _runTool(
workflowTools._createWorkflowFromFile,
{},
_ctx(),
)
assert not result.success
assert "fileid" in (result.error or "").lower() or "envelope" in (result.error or "").lower()
def test_existingWorkflowIdReplacesGraph(self, fakeInterface):
fakeInterface.workflows["wf-1"] = {
"id": "wf-1",
"label": "Existing",
"graph": {"nodes": [], "connections": []},
}
envelope = {
"$schemaVersion": "1.0",
"label": "Replaced",
"graph": {"nodes": [{"id": "n2", "type": "trigger.manual"}], "connections": []},
}
result = _runTool(
workflowTools._createWorkflowFromFile,
{"envelope": envelope, "existingWorkflowId": "wf-1"},
_ctx(),
)
assert result.success, result.error
payload = _payload(result)
assert payload["created"] is False
assert fakeInterface.workflows["wf-1"]["graph"]["nodes"][0]["id"] == "n2"
def test_exportProducesEnvelopeWithSchemaVersion(self, fakeInterface):
fakeInterface.workflows["wf-1"] = {
"id": "wf-1",
"label": "Round-Trip",
"graph": {"nodes": [{"id": "n1", "type": "trigger.manual"}], "connections": []},
}
result = _runTool(
workflowTools._exportWorkflowToFile,
{"workflowId": "wf-1"},
_ctx(),
)
assert result.success, result.error
payload = _payload(result)
assert payload["fileName"].endswith(".workflow.json")
assert payload["schemaVersion"] == "1.0"
envelope = payload["envelope"]
assert envelope["label"] == "Round-Trip"
assert envelope["$kind"] == "poweron.workflow"
def test_exportUnknownWorkflowReturnsError(self, fakeInterface):
result = _runTool(
workflowTools._exportWorkflowToFile,
{"workflowId": "wf-ghost"},
_ctx(),
)
assert not result.success
assert "not found" in (result.error or "").lower()
# ---------------------------------------------------------------------------
# Tool definitions — make sure the new tools are registered with the toolbox
# (cheap regression test that a refactor doesn't drop one of them silently)
# ---------------------------------------------------------------------------
class TestToolDefinitions:
def test_allCrudToolsAreRegistered(self):
defs = workflowTools.getWorkflowToolDefinitions()
names = {d["name"] for d in defs}
for required in (
"createWorkflow",
"createWorkflowFromFile",
"exportWorkflowToFile",
"deleteWorkflow",
"updateWorkflowMetadata",
):
assert required in names, f"{required} missing from workflow toolbox"
def test_deleteWorkflowMarksConfirmRequired(self):
defs = {d["name"]: d for d in workflowTools.getWorkflowToolDefinitions()}
deleteSpec = defs["deleteWorkflow"]
params = deleteSpec.get("parameters", {})
assert "confirm" in (params.get("required") or []), (
"deleteWorkflow must declare confirm as required so the model "
"cannot accidentally call it without an explicit confirmation."
)

View file

@ -3,6 +3,14 @@
# All rights reserved.
"""
Test script for JSON extraction response detection and merging.
The methods under test (``_isJsonExtractionResponse``,
``_mergeJsonExtractionResponses``, etc.) are pure data-manipulation and
do NOT touch ``self._context`` / ``self._get_service`` / the DB. We
therefore bypass ``ExtractionService.__init__`` (which would require a
live ``ServiceCenterContext`` + service-resolver) by instantiating with
``__new__`` same as constructing a stub without dependency wiring.
Run: python gateway/tests/unit/services/test_json_extraction_merging.py
"""
@ -20,7 +28,7 @@ from modules.serviceCenter.services.serviceExtraction.mainServiceExtraction impo
def test_detects_json_with_code_fences():
"""Test that JSON extraction responses with markdown code fences are detected"""
print("Test 1: Detecting JSON with code fences...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test1",
@ -38,7 +46,7 @@ def test_detects_json_with_code_fences():
def test_detects_json_without_code_fences():
"""Test that JSON extraction responses without code fences are detected"""
print("Test 2: Detecting JSON without code fences...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test2",
@ -56,7 +64,7 @@ def test_detects_json_without_code_fences():
def test_rejects_non_extraction_json():
"""Test that regular JSON (without extracted_content) is rejected"""
print("Test 3: Rejecting non-extraction JSON...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test3",
@ -74,7 +82,7 @@ def test_rejects_non_extraction_json():
def test_rejects_non_json_content():
"""Test that non-JSON content is rejected"""
print("Test 4: Rejecting non-JSON content...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
content_part = ContentPart(
id="test4",
@ -92,7 +100,7 @@ def test_rejects_non_json_content():
def test_merges_tables_with_same_headers():
"""Test that tables with identical headers are merged"""
print("Test 5: Merging tables with same headers...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@ -116,18 +124,22 @@ def test_merges_tables_with_same_headers():
assert len(merged["extracted_content"]["tables"]) == 1, f"Should have one merged table, got {len(merged['extracted_content']['tables'])}"
table = merged["extracted_content"]["tables"][0]
assert table["headers"] == ["Name", "Amount"], f"Headers should match, got {table['headers']}"
# Should have 3 unique rows (Alice appears twice but should be deduplicated)
assert len(table["rows"]) == 3, f"Should have 3 unique rows, got {len(table['rows'])}"
# Per the documented merge contract ("Tables: Combines all table rows,
# ... duplicates preserved" — see _mergeJsonExtractionResponses
# docstring), identical rows from different parts are NOT deduplicated.
# Alice appears in both parts, so the merged table has 4 rows.
assert len(table["rows"]) == 4, f"Should have 4 rows (duplicates preserved), got {len(table['rows'])}"
assert ["Alice", "100"] in table["rows"], "Alice row should be present"
assert ["Bob", "200"] in table["rows"], "Bob row should be present"
assert ["Charlie", "300"] in table["rows"], "Charlie row should be present"
assert table["rows"].count(["Alice", "100"]) == 2, "Alice row must be preserved twice (no dedup)"
print(" [PASS]")
def test_merges_multiple_json_blocks_separated_by_dash():
"""Test that multiple JSON blocks separated by --- are merged"""
print("Test 6: Merging multiple JSON blocks separated by ---...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
# Create content part with multiple JSON blocks separated by ---
part1 = ContentPart(
@ -153,7 +165,7 @@ def test_merges_multiple_json_blocks_separated_by_dash():
def test_merges_text_content():
"""Test that text content from multiple parts is merged"""
print("Test 7: Merging text content...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@ -183,7 +195,7 @@ def test_merges_text_content():
def test_merges_headings_and_lists():
"""Test that headings and lists are merged"""
print("Test 8: Merging headings and lists...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@ -218,7 +230,7 @@ def test_merges_headings_and_lists():
def test_handles_empty_content_parts():
"""Test that empty content parts are handled gracefully"""
print("Test 9: Handling empty content parts...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@ -246,7 +258,7 @@ def test_handles_empty_content_parts():
def test_merges_tables_with_different_headers():
"""Test that tables with different headers are kept separate"""
print("Test 10: Keeping tables with different headers separate...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
part1 = ContentPart(
id="test1",
@ -284,7 +296,7 @@ def test_merges_tables_with_different_headers():
def test_real_world_scenario():
"""Test with a realistic scenario similar to the debug file"""
print("Test 11: Real-world scenario (multiple documents, multiple JSON blocks)...")
service = ExtractionService(None)
service = ExtractionService.__new__(ExtractionService)
# Simulate 3 documents, each with a table extraction response
part1 = ContentPart(
@ -314,24 +326,22 @@ def test_real_world_scenario():
merged = service._mergeJsonExtractionResponses([part1, part2, part3])
# Should have one merged table with all unique transactions
# Should have one merged table with all transactions
assert len(merged["extracted_content"]["tables"]) == 1, f"Should have one merged table, got {len(merged['extracted_content']['tables'])}"
table = merged["extracted_content"]["tables"][0]
assert table["headers"] == ["Transaction ID", "Date", "Amount"], "Headers should match"
# Should have 5 unique rows (TXN001 appears twice but should be deduplicated)
assert len(table["rows"]) == 5, f"Should have 5 unique rows, got {len(table['rows'])}"
# Per the documented merge contract, duplicate rows are preserved.
# TXN001 occurs in both doc1 and doc2 -> 6 rows total.
assert len(table["rows"]) == 6, f"Should have 6 rows (duplicates preserved), got {len(table['rows'])}"
# Verify all transactions are present
transaction_ids = [row[0] for row in table["rows"]]
assert "TXN001" in transaction_ids, "TXN001 should be present"
assert "TXN002" in transaction_ids, "TXN002 should be present"
assert "TXN003" in transaction_ids, "TXN003 should be present"
assert "TXN004" in transaction_ids, "TXN004 should be present"
assert "TXN005" in transaction_ids, "TXN005 should be present"
for txn in ("TXN001", "TXN002", "TXN003", "TXN004", "TXN005"):
assert txn in transaction_ids, f"{txn} should be present"
# Verify TXN001 appears only once (deduplicated)
assert transaction_ids.count("TXN001") == 1, "TXN001 should appear only once (deduplicated)"
# TXN001 must appear twice (no dedup at merge time — dedup is the
# responsibility of downstream consumers if needed).
assert transaction_ids.count("TXN001") == 2, "TXN001 must appear twice (duplicates preserved)"
print(" [PASS]")

View file

@ -34,9 +34,14 @@ class TestResolveParameterReferences:
assert resolveParameterReferences(value, node_outputs) == "b"
def test_ref_missing_node(self):
# Current runtime semantics: an unresolved ref (nodeId not in
# node_outputs) collapses to None rather than the original
# placeholder dict. The workflow engine relies on this — downstream
# nodes treat missing refs as "no value yet" rather than "literal
# placeholder" — so we lock the contract here.
node_outputs = {}
value = {"type": "ref", "nodeId": "missing", "path": ["x"]}
assert resolveParameterReferences(value, node_outputs) == value
assert resolveParameterReferences(value, node_outputs) is None
def test_value_wrapper(self):
value = {"type": "value", "value": "static text"}