plana+c implemented

This commit is contained in:
ValueOn AG 2026-04-29 21:27:08 +02:00
parent 052647a52b
commit 880fa4d787
24 changed files with 977 additions and 615 deletions

3
app.py
View file

@ -672,6 +672,9 @@ app.include_router(navigationRouter)
from modules.routes.routeWorkflowDashboard import router as workflowDashboardRouter
app.include_router(workflowDashboardRouter)
from modules.routes.routeAutomationWorkspace import router as automationWorkspaceRouter
app.include_router(automationWorkspaceRouter)
# ============================================================================
# PLUG&PLAY FEATURE ROUTERS
# Dynamically load routers from feature containers in modules/features/

View file

@ -72,7 +72,7 @@ class AutoWorkflow(PowerOnModel):
},
)
featureInstanceId: str = Field(
description="Feature instance ID",
description="Feature instance ID (GE owner instance / RBAC scope)",
json_schema_extra={
"frontend_type": "text",
"frontend_readonly": True,
@ -81,6 +81,17 @@ class AutoWorkflow(PowerOnModel):
"fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"},
},
)
targetFeatureInstanceId: Optional[str] = Field(
default=None,
description="Target feature instance for execution data scope. NULL for templates, mandatory for non-templates.",
json_schema_extra={
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": False,
"label": "Ziel-Instanz",
"fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"},
},
)
label: str = Field(
description="User-friendly workflow name",
json_schema_extra={"frontend_type": "text", "frontend_required": True, "label": "Bezeichnung"},

View file

@ -217,6 +217,8 @@ class GraphicalEditorObjects:
data["id"] = str(uuid.uuid4())
data["mandateId"] = self.mandateId
data["featureInstanceId"] = self.featureInstanceId
if not data.get("targetFeatureInstanceId") and not data.get("isTemplate"):
data["targetFeatureInstanceId"] = self.featureInstanceId
if "active" not in data or data.get("active") is None:
data["active"] = True
data["invocations"] = normalize_invocations_list(data.get("invocations"))

View file

@ -10,7 +10,7 @@ AI_NODES = [
"label": t("Prompt"),
"description": t("Prompt eingeben und KI führt aus"),
"parameters": [
{"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea",
{"name": "aiPrompt", "type": "string", "required": True, "frontendType": "templateTextarea",
"description": t("KI-Prompt")},
{"name": "resultType", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["txt", "json", "md", "csv", "xml", "html", "pdf", "docx", "xlsx", "pptx", "png", "jpg"]},

View file

@ -62,7 +62,7 @@ EMAIL_NODES = [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"},
"description": t("E-Mail-Konto")},
{"name": "context", "type": "string", "required": False, "frontendType": "textarea",
{"name": "context", "type": "string", "required": False, "frontendType": "templateTextarea",
"description": t("Kontext / Brief-Beschreibung für die KI-Komposition"), "default": ""},
{"name": "to", "type": "string", "required": False, "frontendType": "text",
"description": t("Empfänger (komma-separiert, optional für Entwurf)"), "default": ""},

View file

@ -111,6 +111,44 @@ def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
return str(instance.mandateId) if instance.mandateId else ""
def _validateTargetInstance(
workflowData: Dict[str, Any],
ownerInstanceId: str,
context: RequestContext,
) -> None:
"""Enforce targetFeatureInstanceId rules for non-template workflows.
- Templates (isTemplate=True) may omit targetFeatureInstanceId.
- Non-templates MUST have a non-empty targetFeatureInstanceId.
- If the targetFeatureInstanceId differs from the GE owner instance,
the user must also have FeatureAccess on that target instance.
"""
if workflowData.get("isTemplate"):
return
targetId = workflowData.get("targetFeatureInstanceId")
if not targetId:
return
if targetId == ownerInstanceId:
return
from modules.interfaces.interfaceDbApp import getRootInterface
rootInterface = getRootInterface()
targetInstance = rootInterface.getFeatureInstance(targetId)
if not targetInstance:
raise HTTPException(
status_code=400,
detail=routeApiMsg("targetFeatureInstanceId refers to a non-existent feature instance"),
)
targetAccess = rootInterface.getFeatureAccess(str(context.user.id), targetId)
if not targetAccess or not targetAccess.enabled:
raise HTTPException(
status_code=403,
detail=routeApiMsg("Access denied to target feature instance"),
)
@router.get("/{instanceId}/node-types")
@limiter.limit("60/minute")
def get_node_types(
@ -318,9 +356,12 @@ async def post_execute(
workflowId = body.get("workflowId")
req_nodes = graph.get("nodes") or []
workflow_for_envelope: Optional[Dict[str, Any]] = None
targetFeatureInstanceId: Optional[str] = None
if workflowId and not str(workflowId).startswith("transient-"):
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
workflow_for_envelope = iface.getWorkflow(workflowId)
if workflow_for_envelope:
targetFeatureInstanceId = workflow_for_envelope.get("targetFeatureInstanceId")
if workflowId and len(req_nodes) == 0:
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
@ -328,10 +369,18 @@ async def post_execute(
graph = wf["graph"]
logger.info("graphicalEditor execute: loaded graph from workflow %s", workflowId)
workflow_for_envelope = wf
targetFeatureInstanceId = wf.get("targetFeatureInstanceId")
if not workflowId:
import uuid
workflowId = f"transient-{uuid.uuid4().hex[:12]}"
logger.info("graphicalEditor execute: using transient workflowId=%s", workflowId)
if targetFeatureInstanceId and targetFeatureInstanceId != instanceId:
_validateTargetInstance(
{"targetFeatureInstanceId": targetFeatureInstanceId},
instanceId,
context,
)
nodes_count = len(graph.get("nodes") or [])
connections_count = len(graph.get("connections") or [])
logger.info(
@ -363,6 +412,7 @@ async def post_execute(
automation2_interface=ge_interface,
run_envelope=run_env,
label=_wfLabel,
targetFeatureInstanceId=targetFeatureInstanceId,
)
logger.info(
"graphicalEditor execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s",
@ -1371,6 +1421,7 @@ def create_workflow(
) -> dict:
"""Create a new workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
_validateTargetInstance(body, instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
created = iface.createWorkflow(body)
return created
@ -1388,6 +1439,11 @@ def update_workflow(
"""Update a workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
existing = iface.getWorkflow(workflowId)
if not existing:
raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
merged = {**existing, **body}
_validateTargetInstance(merged, instanceId, context)
updated = iface.updateWorkflow(workflowId, body)
if not updated:
raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))

View file

@ -0,0 +1,198 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Lightweight Bootstrap-Telemetrie fuer entfernte Migrationsroutinen.
Wenn eine idempotente Bootstrap-Migration (z.B. ``_migrateAndDropSysAdminRole``)
aus dem Boot-Pfad entfernt wird, koennte ein theoretischer Edge-Case (alte
DB-Restore, manueller INSERT) wieder Legacy-Daten ins System bringen. Damit das
nicht still bleibt, ruft ``initBootstrap`` nach Abschluss aller Init-Schritte
einmalig ``runLegacyDataChecks`` auf -- das logged WARN bei Restbestand.
Designprinzipien:
- KEINE Schreibzugriffe (rein lesend).
- Process-lokal gecached (``_cache``), damit identische Boots/Reloads den Check
nur einmal laufen lassen.
- Pro Check eine Recordset-Abfrage; Ausnahmen werden als WARN geloggt, nicht
re-raised, damit Telemetrie den Boot nie crasht.
"""
from __future__ import annotations
import logging
from typing import Any
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.datamodels.datamodelRbac import Role
from modules.datamodels.datamodelUam import Mandate
from modules.shared.mandateNameUtils import isValidMandateName
logger = logging.getLogger(__name__)
_alreadyRan: bool = False
def runLegacyDataChecks(db: DatabaseConnector) -> None:
"""Logged WARN, falls noch Legacy-Daten existieren, die durch entfernte
Migrationsroutinen behandelt wurden. Prozessweit nur einmal aktiv.
Aufruf: am Ende von ``initBootstrap``.
"""
global _alreadyRan
if _alreadyRan:
return
_alreadyRan = True
_checkMandateDescription(db)
_checkMandateSlugRules(db)
_checkLegacyRootMandate(db)
_checkSysadminRole(db)
_backfillTargetFeatureInstanceId()
def _safe(checkName: str, fn) -> Any:
try:
return fn()
except Exception as exc:
logger.warning(
"Legacy-data telemetry check '%s' failed: %s: %s",
checkName, type(exc).__name__, exc,
)
return None
def _checkMandateDescription(db: DatabaseConnector) -> None:
def _do() -> None:
rows = db.getRecordset(Mandate)
bad = [
r.get("id") for r in rows
if r.get("description") and not r.get("label")
]
if bad:
logger.warning(
"Legacy-data check: %d Mandate row(s) still have description "
"but empty label (removed migration: _migrateMandateDescriptionToLabel). "
"Run scripts/script_db_audit_legacy_state.py for details. IDs: %s",
len(bad), bad[:5],
)
_safe("mandate-description", _do)
def _checkMandateSlugRules(db: DatabaseConnector) -> None:
def _do() -> None:
rows = db.getRecordset(Mandate)
seen: set[str] = set()
bad: list[str] = []
for r in sorted(rows, key=lambda x: str(x.get("id", ""))):
mid = r.get("id")
if not mid:
continue
name = (r.get("name") or "").strip()
labelRaw = r.get("label")
labelEmpty = not (labelRaw or "").strip() if labelRaw is not None else True
invalid = not isValidMandateName(name)
collides = name in seen
if not invalid and not collides:
seen.add(name)
if labelEmpty or invalid or collides:
bad.append(str(mid))
if bad:
logger.warning(
"Legacy-data check: %d Mandate row(s) violate slug/label rules "
"(removed migration: _migrateMandateNameLabelSlugRules). "
"Run scripts/script_db_audit_legacy_state.py for details. IDs: %s",
len(bad), bad[:5],
)
_safe("mandate-slug-rules", _do)
def _checkLegacyRootMandate(db: DatabaseConnector) -> None:
def _do() -> None:
legacy = db.getRecordset(Mandate, recordFilter={"name": "Root"})
rootRows = db.getRecordset(Mandate, recordFilter={"name": "root"})
legacyByFlag = [r for r in rootRows if not r.get("isSystem")]
all_ = list(legacy) + legacyByFlag
if all_:
logger.warning(
"Legacy-data check: %d Root-Mandate row(s) still in legacy form "
"(removed migration: initRootMandate-legacy-branch). IDs: %s",
len(all_), [r.get("id") for r in all_][:5],
)
_safe("root-mandate-legacy", _do)
def _checkSysadminRole(db: DatabaseConnector) -> None:
def _do() -> None:
rootMandates = db.getRecordset(
Mandate, recordFilter={"name": "root", "isSystem": True}
)
if not rootMandates:
return
rootId = str(rootMandates[0].get("id"))
rows = db.getRecordset(
Role,
recordFilter={
"roleLabel": "sysadmin",
"mandateId": rootId,
"featureInstanceId": None,
},
)
if rows:
logger.warning(
"Legacy-data check: %d 'sysadmin' role(s) still present in root mandate "
"(removed migration: _migrateAndDropSysAdminRole). "
"Authority is now User.isPlatformAdmin -- migrate manually. IDs: %s",
len(rows), [r.get("id") for r in rows],
)
_safe("sysadmin-role", _do)
def _backfillTargetFeatureInstanceId() -> None:
"""Idempotent backfill: set targetFeatureInstanceId = featureInstanceId
for all non-template AutoWorkflow rows where it is still NULL.
Connects to ``poweron_graphicaleditor`` independently.
"""
def _do() -> None:
from modules.shared.configuration import APP_CONFIG
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow
dbHost = APP_CONFIG.get("DB_HOST", "localhost")
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
geDb = DatabaseConnector(
dbHost=dbHost,
dbDatabase="poweron_graphicaleditor",
dbUser=dbUser,
dbPassword=dbPassword,
dbPort=dbPort,
userId=None,
)
if not geDb._ensureTableExists(AutoWorkflow):
return
rows = geDb.getRecordset(AutoWorkflow) or []
backfilled = 0
for r in rows:
if r.get("isTemplate"):
continue
if r.get("targetFeatureInstanceId"):
continue
srcId = r.get("featureInstanceId")
if not srcId:
continue
geDb.recordModify(AutoWorkflow, r["id"], {"targetFeatureInstanceId": srcId})
backfilled += 1
if backfilled:
logger.info(
"targetFeatureInstanceId backfill: set %d non-template AutoWorkflow row(s) "
"to their featureInstanceId",
backfilled,
)
_safe("backfill-targetFeatureInstanceId", _do)

View file

@ -56,14 +56,8 @@ def initBootstrap(db: DatabaseConnector) -> None:
logger.info("Starting system bootstrap")
# Initialize root mandate
mandateId = initRootMandate(db)
# Migrate existing mandate records: description -> label
_migrateMandateDescriptionToLabel(db)
_migrateMandateNameLabelSlugRules(db)
# Clean up duplicate roles and fix corrupted templates FIRST
_deduplicateRoles(db)
# Initialize system role TEMPLATES (mandateId=None, isSystemRole=True)
@ -76,14 +70,6 @@ def initBootstrap(db: DatabaseConnector) -> None:
# This also serves as migration for existing mandates that don't have instance roles yet
_ensureAllMandatesHaveSystemRoles(db)
# Migration: eliminate the legacy ``sysadmin`` role in root mandate
# (replaced by ``User.isPlatformAdmin`` flag — see
# wiki/c-work/4-done/2026-04-sysadmin-authority-split.md).
# Idempotent: noop after first successful run.
if mandateId:
_migrateAndDropSysAdminRole(db, mandateId)
# Ensure UI rules for navigation items (admin/user/viewer roles)
_ensureUiContextRules(db)
# Initialize admin user
@ -132,6 +118,15 @@ def initBootstrap(db: DatabaseConnector) -> None:
# Ensure billing settings and accounts exist for all mandates
_bootstrapBilling()
# Telemetrie: warne falls Restbestaende der entfernten idempotenten
# Migrationen wieder auftauchen (Edge-Case: alter DB-Restore o.ae.).
# Schreibt nicht, scheitert nicht den Boot.
try:
from modules.interfaces._legacyMigrationTelemetry import runLegacyDataChecks
runLegacyDataChecks(db)
except Exception as e:
logger.warning(f"Legacy-data telemetry skipped: {e}")
def _bootstrapBilling() -> None:
"""
@ -396,21 +391,12 @@ def initRootMandate(db: DatabaseConnector) -> Optional[str]:
Returns:
Mandate ID if created or found, None otherwise
"""
# Find existing root mandate by name AND isSystem flag
existingMandates = db.getRecordset(Mandate, recordFilter={"name": "root", "isSystem": True})
if existingMandates:
mandateId = existingMandates[0].get("id")
logger.info(f"Root mandate already exists with ID {mandateId}")
return mandateId
# Check for legacy root mandates (name="Root" without isSystem flag) and migrate
legacyMandates = db.getRecordset(Mandate, recordFilter={"name": "Root"})
if legacyMandates:
mandateId = legacyMandates[0].get("id")
logger.info(f"Migrating legacy Root mandate {mandateId}: setting name='root', isSystem=True")
db.recordModify(Mandate, mandateId, {"name": "root", "isSystem": True})
return mandateId
logger.info("Creating Root mandate")
rootMandate = Mandate(name="root", label="Root", isSystem=True, enabled=True)
createdMandate = db.recordCreate(Mandate, rootMandate)
@ -419,98 +405,6 @@ def initRootMandate(db: DatabaseConnector) -> Optional[str]:
return mandateId
def _migrateMandateDescriptionToLabel(db: DatabaseConnector) -> None:
"""
Migration: Rename 'description' field to 'label' in all Mandate records.
Copies existing 'description' values to 'label' and removes the old field.
Safe to run multiple times (idempotent).
"""
allMandates = db.getRecordset(Mandate)
migratedCount = 0
for mandateRecord in allMandates:
mandateId = mandateRecord.get("id")
hasDescription = "description" in mandateRecord and mandateRecord.get("description") is not None
hasLabel = "label" in mandateRecord and mandateRecord.get("label") is not None
if hasDescription and not hasLabel:
# Copy description to label
updateData = {"label": mandateRecord["description"]}
db.recordModify(Mandate, mandateId, updateData)
migratedCount += 1
logger.info(f"Migrated mandate {mandateId}: description -> label")
if migratedCount > 0:
logger.info(f"Migrated {migratedCount} mandate(s) from description to label")
else:
logger.debug("No mandate description->label migration needed")
def _migrateMandateNameLabelSlugRules(db: DatabaseConnector) -> None:
"""
Migration: normalize Mandate.name to the slug rules ([a-z0-9-], length 2..32, single
hyphen segments) and ensure Mandate.label is non-empty.
Rules (see wiki/c-work/1-plan/2026-04-mandate-name-label-logic.md):
1. If ``label`` is empty/None set ``label := name`` (or "Mandate" when both empty).
2. If ``name`` is not a valid slug, or collides with an earlier mandate in stable id
order, allocate a unique slug from the (now non-empty) ``label`` using
``slugifyMandateName`` + ``allocateUniqueMandateSlug``.
Idempotent: a second run is a no-op because all valid names stay valid and stay unique.
Each rename and label fill-in is logged for audit.
"""
from modules.shared.mandateNameUtils import (
allocateUniqueMandateSlug,
isValidMandateName,
slugifyMandateName,
)
allRows = db.getRecordset(Mandate)
if not allRows:
return
sortedRows = sorted(allRows, key=lambda r: str(r.get("id", "")))
used: set[str] = set()
labelFills = 0
nameRenames: list[tuple[str, str, str]] = []
for rec in sortedRows:
mid = rec.get("id")
if not mid:
continue
name = (rec.get("name") or "").strip()
labelRaw = rec.get("label")
label = (labelRaw or "").strip() if labelRaw is not None else ""
if not label:
label = name if name else "Mandate"
db.recordModify(Mandate, mid, {"label": label})
labelFills += 1
logger.info(f"Mandate {mid}: filled empty label with '{label}'")
nameFits = isValidMandateName(name)
nameCollides = name in used
if nameFits and not nameCollides:
used.add(name)
continue
base = slugifyMandateName(label) or "mn"
newName = allocateUniqueMandateSlug(base, used)
used.add(newName)
if newName != name:
db.recordModify(Mandate, mid, {"name": newName})
nameRenames.append((str(mid), name, newName))
logger.info(f"Mandate {mid}: renamed name '{name}' -> '{newName}'")
if labelFills or nameRenames:
logger.info(
"Mandate name/label slug migration: %d label fill-in(s), %d name rename(s)",
labelFills, len(nameRenames),
)
else:
logger.debug("No mandate name/label slug migration needed")
def initAdminUser(db: DatabaseConnector, mandateId: Optional[str]) -> Optional[str]:
"""
Creates the Admin user if it doesn't exist.
@ -837,101 +731,6 @@ def copySystemRolesToMandate(db: DatabaseConnector, mandateId: str) -> int:
return copiedCount
def _migrateAndDropSysAdminRole(db: DatabaseConnector, mandateId: str) -> None:
"""
One-shot migration: eliminate the legacy ``sysadmin`` role in the root mandate.
Authority semantics moved to two orthogonal flags on User:
- ``isSysAdmin`` Infrastructure-Operator (RBAC bypass)
- ``isPlatformAdmin`` Cross-Mandate-Governance (no bypass)
Migration steps (idempotent):
1. Find sysadmin role(s) in root mandate. If none exist done.
2. For every UserMandateRole row referencing such a role: set
``user.isPlatformAdmin = True`` (preserves cross-mandate authority).
3. Delete those UserMandateRole rows.
4. Delete AccessRules attached to the sysadmin role.
5. Delete the sysadmin Role record.
Args:
db: Database connector instance
mandateId: Root mandate ID
"""
sysadminRoles = db.getRecordset(
Role,
recordFilter={"roleLabel": "sysadmin", "mandateId": mandateId, "featureInstanceId": None},
)
if not sysadminRoles:
logger.debug("Sysadmin role migration: no legacy sysadmin role present, nothing to do")
return
sysadminRoleIds = [str(r.get("id")) for r in sysadminRoles if r.get("id")]
logger.warning(
f"Sysadmin role migration: found {len(sysadminRoleIds)} legacy sysadmin role(s) "
f"in root mandate, migrating to isPlatformAdmin flag"
)
# 1) Promote every holder to isPlatformAdmin=True
promoted = 0
for sysadminRoleId in sysadminRoleIds:
umRoleRows = db.getRecordset(
UserMandateRole, recordFilter={"roleId": sysadminRoleId}
)
userMandateIds = [str(r.get("userMandateId")) for r in umRoleRows if r.get("userMandateId")]
if not userMandateIds:
continue
# Resolve userIds via UserMandate
userIds = set()
for umId in userMandateIds:
ums = db.getRecordset(UserMandate, recordFilter={"id": umId})
for um in ums:
uid = um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None)
if uid:
userIds.add(str(uid))
for userId in userIds:
users = db.getRecordset(UserInDB, recordFilter={"id": userId})
if not users:
continue
current = users[0].get("isPlatformAdmin", False)
if not current:
db.recordModify(UserInDB, userId, {"isPlatformAdmin": True})
promoted += 1
logger.warning(
f"Sysadmin role migration: granted isPlatformAdmin=True to user {userId}"
)
# 2) Delete UserMandateRole rows
for umRow in umRoleRows:
rowId = umRow.get("id") if isinstance(umRow, dict) else getattr(umRow, "id", None)
if rowId:
try:
db.recordDelete(UserMandateRole, str(rowId))
except Exception as e:
logger.error(f"Sysadmin role migration: failed to drop UserMandateRole {rowId}: {e}")
# 3) Delete AccessRules
accessRules = db.getRecordset(AccessRule, recordFilter={"roleId": sysadminRoleId})
for ar in accessRules:
arId = ar.get("id") if isinstance(ar, dict) else getattr(ar, "id", None)
if arId:
try:
db.recordDelete(AccessRule, str(arId))
except Exception as e:
logger.error(f"Sysadmin role migration: failed to drop AccessRule {arId}: {e}")
# 4) Delete the Role
try:
db.recordDelete(Role, sysadminRoleId)
except Exception as e:
logger.error(f"Sysadmin role migration: failed to drop Role {sysadminRoleId}: {e}")
logger.warning(
f"Sysadmin role migration: completed; promoted {promoted} user(s) to isPlatformAdmin"
)
def _getRoleId(db: DatabaseConnector, roleLabel: str) -> Optional[str]:
"""
Get role ID by label, using cache or database lookup.

View file

@ -603,41 +603,10 @@ def aggregateMandateRagTotalBytes(mandateId: str) -> int:
if rid and str(rid) not in byId:
byId[str(rid)] = row
# DEPRECATED: file-ID-correlation fallback from poweron_management.
# Only needed for pre-migration data where mandateId/featureInstanceId on the
# FileContentIndex are empty. Safe to remove once all environments are migrated.
_fallbackCount = 0
try:
from modules.datamodels.datamodelFiles import FileItem
from modules.interfaces.interfaceDbManagement import ComponentObjects
mgmtDb = ComponentObjects().db
knowledgeIf = getInterface(None)
fileIds: set = set()
for f in mgmtDb.getRecordset(FileItem, recordFilter={"mandateId": mandateId}):
fid = f.get("id") if isinstance(f, dict) else getattr(f, "id", None)
if fid:
fileIds.add(str(fid))
for instId in instIds:
for f in mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}):
fid = f.get("id") if isinstance(f, dict) else getattr(f, "id", None)
if fid:
fileIds.add(str(fid))
for fid in fileIds:
if fid in byId:
continue
row = knowledgeIf.getFileContentIndex(fid)
if row:
byId[fid] = row
_fallbackCount += 1
except Exception as e:
logger.warning("aggregateMandateRagTotalBytes fallback failed: %s", e)
total = sum(int(r.get("totalSize") or 0) for r in byId.values())
logger.info(
"aggregateMandateRagTotalBytes(%s): %d indexes, %d bytes (fallback: %d)",
mandateId, len(byId), total, _fallbackCount,
"aggregateMandateRagTotalBytes(%s): %d indexes, %d bytes",
mandateId, len(byId), total,
)
return total

View file

@ -347,6 +347,7 @@ class FeatureInterface:
"templateSourceId": templateId,
"templateScope": "instance",
"active": True,
"targetFeatureInstanceId": instanceId,
})
copied += 1
except Exception as e:

View file

@ -0,0 +1,246 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
User-facing Automation Workspace API.
Lists workflow runs the user can access (via FeatureAccess on
targetFeatureInstanceId) and provides detail views with step logs
and linked files. Designed for the "Workspace" tab under
Nutzung > Automation.
"""
import logging
import math
from typing import Optional
from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException
from slowapi import Limiter
from slowapi.util import get_remote_address
from modules.auth.authentication import getRequestContext, RequestContext
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.shared.configuration import APP_CONFIG
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
AutoRun,
AutoStepLog,
AutoWorkflow,
)
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import graphicalEditorDatabase
from modules.shared.i18nRegistry import apiRouteContext
routeApiMsg = apiRouteContext("routeAutomationWorkspace")
logger = logging.getLogger(__name__)
limiter = Limiter(key_func=get_remote_address)
router = APIRouter(prefix="/api/automations/runs", tags=["AutomationWorkspace"])
def _getDb() -> DatabaseConnector:
return DatabaseConnector(
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
dbDatabase=graphicalEditorDatabase,
dbUser=APP_CONFIG.get("DB_USER"),
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
userId=None,
)
def _getUserAccessibleInstanceIds(userId: str) -> list[str]:
"""Return all featureInstanceIds the user has enabled FeatureAccess for."""
from modules.interfaces.interfaceDbApp import getRootInterface
rootIface = getRootInterface()
allAccess = rootIface.getFeatureAccessesForUser(userId) or []
return [
a.featureInstanceId
for a in allAccess
if a.featureInstanceId and a.enabled
]
@router.get("")
@limiter.limit("60/minute")
def listWorkspaceRuns(
request: Request,
scope: str = Query("mine", description="mine = own runs, mandate = all accessible"),
status: Optional[str] = Query(None, description="Filter by run status"),
targetInstanceId: Optional[str] = Query(None, description="Filter by targetFeatureInstanceId"),
workflowId: Optional[str] = Query(None, description="Filter by workflow"),
limit: int = Query(50, ge=1, le=200),
offset: int = Query(0, ge=0),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""List workflow runs visible to the user.
scope=mine: only runs owned by the user.
scope=mandate: all runs where the user has FeatureAccess on the
workflow's targetFeatureInstanceId.
"""
db = _getDb()
if not db._ensureTableExists(AutoRun):
return {"runs": [], "total": 0, "limit": limit, "offset": offset}
userId = str(context.user.id) if context.user else None
if not userId:
raise HTTPException(status_code=401, detail=routeApiMsg("Authentication required"))
accessibleInstanceIds = _getUserAccessibleInstanceIds(userId)
if not accessibleInstanceIds:
return {"runs": [], "total": 0, "limit": limit, "offset": offset}
if not db._ensureTableExists(AutoWorkflow):
return {"runs": [], "total": 0, "limit": limit, "offset": offset}
wfFilter: dict = {}
if targetInstanceId:
if targetInstanceId not in accessibleInstanceIds:
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied to target instance"))
wfFilter["targetFeatureInstanceId"] = targetInstanceId
workflows = db.getRecordset(AutoWorkflow, recordFilter=wfFilter or None) or []
visibleWfIds: set[str] = set()
wfMap: dict = {}
for wf in workflows:
wfDict = dict(wf)
tid = wfDict.get("targetFeatureInstanceId") or wfDict.get("featureInstanceId")
if tid and tid in accessibleInstanceIds:
wfId = wfDict.get("id")
if wfId:
visibleWfIds.add(wfId)
wfMap[wfId] = wfDict
if workflowId:
if workflowId not in visibleWfIds:
return {"runs": [], "total": 0, "limit": limit, "offset": offset}
visibleWfIds = {workflowId}
if not visibleWfIds:
return {"runs": [], "total": 0, "limit": limit, "offset": offset}
allRuns = db.getRecordset(AutoRun, recordFilter={}) or []
filtered = []
for r in allRuns:
row = dict(r)
if row.get("workflowId") not in visibleWfIds:
continue
if scope == "mine" and row.get("ownerId") != userId:
continue
if status and row.get("status") != status:
continue
filtered.append(row)
filtered.sort(
key=lambda x: x.get("startedAt") or x.get("sysCreatedAt") or 0,
reverse=True,
)
total = len(filtered)
page = filtered[offset: offset + limit]
from modules.routes.routeHelpers import enrichRowsWithFkLabels, resolveMandateLabels, resolveInstanceLabels
for row in page:
wf = wfMap.get(row.get("workflowId"), {})
row["workflowLabel"] = row.get("label") or wf.get("label") or row.get("workflowId", "")
row["targetFeatureInstanceId"] = wf.get("targetFeatureInstanceId") or wf.get("featureInstanceId")
enrichRowsWithFkLabels(
page,
labelResolvers={
"mandateId": resolveMandateLabels,
"targetFeatureInstanceId": resolveInstanceLabels,
},
)
for row in page:
row["targetInstanceLabel"] = row.pop("targetFeatureInstanceIdLabel", None)
row["mandateLabel"] = row.pop("mandateIdLabel", None)
return {"runs": page, "total": total, "limit": limit, "offset": offset}
@router.get("/{runId}/detail")
@limiter.limit("60/minute")
def getWorkspaceRunDetail(
request: Request,
runId: str = Path(..., description="Run ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Get full detail for a single run: metadata, step logs, linked files."""
db = _getDb()
userId = str(context.user.id) if context.user else None
if not userId:
raise HTTPException(status_code=401, detail=routeApiMsg("Authentication required"))
if not db._ensureTableExists(AutoRun):
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
if not runs:
raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
run = dict(runs[0])
wfId = run.get("workflowId")
workflow: dict = {}
if wfId and db._ensureTableExists(AutoWorkflow):
wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfId})
if wfs:
workflow = dict(wfs[0])
tid = workflow.get("targetFeatureInstanceId") or workflow.get("featureInstanceId")
accessibleIds = _getUserAccessibleInstanceIds(userId)
isOwner = run.get("ownerId") == userId
if not isOwner and (not tid or tid not in accessibleIds) and not context.isPlatformAdmin:
raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
steps: list = []
if db._ensureTableExists(AutoStepLog):
stepRecords = db.getRecordset(AutoStepLog, recordFilter={"runId": runId}) or []
steps = [dict(s) for s in stepRecords]
steps.sort(key=lambda s: s.get("startedAt") or 0)
fileItems: list = []
try:
from modules.datamodels.datamodelFiles import FileItem
from modules.interfaces.interfaceDbManagement import ComponentObjects
mgmtDb = ComponentObjects().db
if mgmtDb._ensureTableExists(FileItem):
nodeOutputs = run.get("nodeOutputs") or {}
fileIds: set[str] = set()
for nodeId, output in nodeOutputs.items():
if not isinstance(output, dict):
continue
for key in ("fileId", "documentId", "fileIds", "documents"):
val = output.get(key)
if isinstance(val, str) and val:
fileIds.add(val)
elif isinstance(val, list):
for v in val:
if isinstance(v, str) and v:
fileIds.add(v)
elif isinstance(v, dict) and v.get("id"):
fileIds.add(v["id"])
for fid in fileIds:
try:
rec = mgmtDb.getRecord(FileItem, fid)
if rec:
fileItems.append(dict(rec))
except Exception:
pass
except Exception as e:
logger.warning("getWorkspaceRunDetail: file lookup failed: %s", e)
run["workflowLabel"] = run.get("label") or workflow.get("label") or wfId
run["targetFeatureInstanceId"] = tid
return {
"run": run,
"workflow": {
"id": workflow.get("id"),
"label": workflow.get("label"),
"targetFeatureInstanceId": tid,
"featureInstanceId": workflow.get("featureInstanceId"),
"tags": workflow.get("tags", []),
} if workflow else None,
"steps": steps,
"files": fileItems,
}

View file

@ -302,6 +302,30 @@ async def _executeWithRetry(executor, node, context, maxRetries: int = 0, retryD
raise lastError
def _substituteFeatureInstancePlaceholders(
graph: Dict[str, Any],
targetFeatureInstanceId: str,
) -> Dict[str, Any]:
"""Replace ``{{featureInstanceId}}`` placeholders in the serialised graph.
Works on the full JSON representation so that placeholders inside nested
parameter dicts, prompt strings, etc. are all caught. Already-resolved
concrete UUIDs (pre-baked by ``_copyTemplateWorkflows``) are left untouched
because the placeholder literal ``{{featureInstanceId}}`` will not match.
"""
import json as _json
raw = _json.dumps(graph)
if "{{featureInstanceId}}" not in raw:
return graph
replaced = raw.replace("{{featureInstanceId}}", targetFeatureInstanceId)
logger.debug(
"_substituteFeatureInstancePlaceholders: resolved %d occurrence(s) -> %s",
raw.count("{{featureInstanceId}}"),
targetFeatureInstanceId,
)
return _json.loads(replaced)
async def executeGraph(
graph: Dict[str, Any],
services: Any,
@ -315,6 +339,7 @@ async def executeGraph(
runId: Optional[str] = None,
run_envelope: Optional[Dict[str, Any]] = None,
label: Optional[str] = None,
targetFeatureInstanceId: Optional[str] = None,
) -> Dict[str, Any]:
"""
Execute automation2 graph. Returns { success, nodeOutputs, error?, stopped? }.
@ -322,14 +347,16 @@ async def executeGraph(
pauses the run, and returns { success: False, paused: True, taskId, runId }.
For resume: pass initialNodeOutputs (with result for the human node) and startAfterNodeId.
For fresh runs: pass run_envelope (unified start payload for the start node); normalized with userId into context.runEnvelope.
targetFeatureInstanceId: resolves {{featureInstanceId}} placeholders in the graph JSON before execution.
"""
logger.info(
"executeGraph start: instanceId=%s workflowId=%s userId=%s mandateId=%s resume=%s",
"executeGraph start: instanceId=%s workflowId=%s userId=%s mandateId=%s resume=%s targetInstance=%s",
instanceId,
workflowId,
userId,
mandateId,
startAfterNodeId is not None,
targetFeatureInstanceId,
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(services)
@ -338,6 +365,9 @@ async def executeGraph(
materializeFeatureInstanceRefs,
)
if targetFeatureInstanceId:
graph = _substituteFeatureInstancePlaceholders(graph, targetFeatureInstanceId)
# Phase-5 Schicht-4: typed-ref envelopes are materialized FIRST so the
# subsequent connection-ref pass and validation see the canonical shape.
graph = materializeFeatureInstanceRefs(graph)

View file

@ -243,6 +243,7 @@ class WorkflowScheduler:
runEnv = normalize_run_envelope(runEnv, user_id=str(eventUser.id) if eventUser else None)
_wfLabel = wf.get("label") if isinstance(wf, dict) else getattr(wf, "label", None)
_targetInstanceId = wf.get("targetFeatureInstanceId") if isinstance(wf, dict) else getattr(wf, "targetFeatureInstanceId", None)
result = await executeGraph(
graph=wf["graph"],
@ -254,6 +255,7 @@ class WorkflowScheduler:
automation2_interface=iface,
run_envelope=runEnv,
label=_wfLabel,
targetFeatureInstanceId=_targetInstanceId,
)
logger.info(
"WorkflowScheduler: executed workflow %s success=%s paused=%s",

View file

@ -0,0 +1,19 @@
# Archived one-shot scripts
Diese Scripts haben einmal eine konkrete Daten- oder Code-Migration ausgefuehrt
und werden nicht mehr aktiv aufgerufen. Sie bleiben hier liegen, falls jemand
spaeter auf einem alten DB-Dump oder einem alten Branch nochmal denselben Stand
herstellen muss.
KEIN aktives Tool. Nicht aus CI, nicht aus Docs verlinken. Bei Aufraeumarbeiten
(z.B. nach 6 Monaten ohne Anwendung) loeschen.
## Inhalt
| Datei | Migrationsthema | Archiviert am | Begruendung |
|-------|-----------------|---------------|-------------|
| `check_orphan_featureinstance.py` | Vor-Ort-Check mit hardcoded FeatureInstance-/Mandate-UUIDs | 2026-04-29 | Ad-hoc fuer einen konkreten Vorfall |
| `script_db_cleanup_duplicate_roles.py` | Cleanup doppelter Roles wegen `IS NULL`-Bug in `connectorDbPostgre` | 2026-04-29 | Bug ist laengst gefixt, Cleanup ueberall durchgelaufen |
| `migrate_async_to_sync.py` | One-shot Codemod `async def` -> `def` fuer FastAPI-Routes | 2026-04-29 | Refactor abgeschlossen |
| `i18n_rekey_plaintext_keys.py` | Frontend `t('dot.notation')` -> `t('Klartext')` Rekey | 2026-04-29 | Frontend-Migration abgeschlossen (siehe `wiki/c-work/4-done/2026-04-ui-i18n-dynamic-language-sets.md`) |
| `script_db_migrate_accessrules_objectkeys.py` | AccessRule-Items: kurz -> vollqualifiziert (Navigation-API) | 2026-04-29 | Navigation-API live, MIGRATION_MAP nur fuer trustee+realestate hardcoded |

View file

@ -1,25 +0,0 @@
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
import psycopg2, psycopg2.extras
from modules.shared.configuration import APP_CONFIG
c = psycopg2.connect(
host=APP_CONFIG.get('DB_HOST','localhost'),
user=APP_CONFIG.get('DB_USER'),
password=APP_CONFIG.get('DB_PASSWORD_SECRET'),
port=int(APP_CONFIG.get('DB_PORT',5432)),
dbname='poweron_app',
)
cur = c.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute('SELECT id, name, label, enabled, "deletedAt", "sysCreatedAt" FROM "Mandate" ORDER BY "sysCreatedAt"')
print("All Mandates in poweron_app:")
for r in cur.fetchall():
print(f" id={r['id']} name={r['name']} label={r['label']} enabled={r['enabled']} deletedAt={r['deletedAt']}")
cur.execute('SELECT COUNT(*) AS n FROM "FeatureInstance" WHERE "featureCode" = %s', ("redmine",))
print(f"\nTotal redmine FeatureInstances in poweron_app: {cur.fetchone()['n']}")
cur.execute('SELECT id, "mandateId", label, enabled FROM "FeatureInstance" WHERE "featureCode" = %s ORDER BY "sysCreatedAt"', ("redmine",))
for r in cur.fetchall():
print(f" fi={r['id']} mandate={r['mandateId']} label={r['label']} enabled={r['enabled']}")

View file

@ -0,0 +1,382 @@
#!/usr/bin/env python3
"""Audit-Skript fuer Legacy-Bestaende vor Bootstrap-Cleanup (Plan C).
Prueft fuer jede der 5 Bootstrap-Migrationsroutinen, ob noch Restbestand
existiert. Wenn alle Checks 0 / GREEN liefern, kann die jeweilige Routine
sicher aus ``interfaceBootstrap.py`` / ``interfaceDbKnowledge.py`` entfernt
werden.
Checks:
1. Mandate.description != NULL und Mandate.label leer
-> _migrateMandateDescriptionToLabel
2. Mandate.label leer ODER Mandate.name verstoesst gegen Slug-Regeln
-> _migrateMandateNameLabelSlugRules
3. Mandate mit name='Root' und isSystem=False
-> initRootMandate Legacy-Zweig
4. Role mit roleLabel='sysadmin' im Root-Mandat
-> _migrateAndDropSysAdminRole
5. FileContentIndex mit leerem mandateId UND leerem featureInstanceId
-> aggregateMandateRagTotalBytes Fallback-Block
Verwendung:
python -m scripts.script_db_audit_legacy_state # text-output
python -m scripts.script_db_audit_legacy_state --json # JSON-output
python -m scripts.script_db_audit_legacy_state --purge-rag-orphans
# loescht FileContentIndex-Rows ohne mandateId UND ohne featureInstanceId
# (Voraussetzung fuer Removal des aggregateMandateRagTotalBytes-Fallback)
Exit-Code:
0 alle Checks GREEN (Removal sicher)
1 mind. ein Check RED (erst Daten bereinigen)
2 Skript-Fehler (DB nicht erreichbar etc.)
Lese-Zugriffe sind die Default. Schreibzugriffe NUR mit explizitem
``--purge-*``-Flag.
"""
from __future__ import annotations
import argparse
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional
_gatewayDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if _gatewayDir not in sys.path:
sys.path.insert(0, _gatewayDir)
from dotenv import load_dotenv
_envPath = os.path.join(_gatewayDir, "env_dev.env")
if os.path.exists(_envPath):
load_dotenv(_envPath)
from modules.datamodels.datamodelUam import Mandate
from modules.datamodels.datamodelRbac import Role
from modules.datamodels.datamodelKnowledge import FileContentIndex
from modules.security.rootAccess import getRootDbAppConnector
from modules.interfaces.interfaceDbKnowledge import KnowledgeObjects
from modules.shared.mandateNameUtils import isValidMandateName
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger(__name__)
@dataclass
class _CheckResult:
"""Ergebnis eines einzelnen Audit-Checks."""
name: str
routine: str
location: str
count: int
status: str
samples: List[Dict[str, Any]] = field(default_factory=list)
error: Optional[str] = None
def toDict(self) -> Dict[str, Any]:
return {
"name": self.name,
"routine": self.routine,
"location": self.location,
"count": self.count,
"status": self.status,
"samples": self.samples,
"error": self.error,
}
def _getAppDb():
return getRootDbAppConnector()
def _getKnowledgeDb():
return KnowledgeObjects().db
def _checkMandateDescription(db) -> _CheckResult:
"""Mandate.description noch vorhanden und label leer?"""
rows = db.getRecordset(Mandate)
legacy = [
{
"id": r.get("id"),
"name": r.get("name"),
"description": str(r.get("description"))[:60] if r.get("description") else None,
"label": r.get("label"),
}
for r in rows
if r.get("description") and not r.get("label")
]
return _CheckResult(
name="mandate-description-to-label",
routine="_migrateMandateDescriptionToLabel",
location="interfaces/interfaceBootstrap.py:422-445",
count=len(legacy),
status="GREEN" if not legacy else "RED",
samples=legacy[:5],
)
def _checkMandateSlugRules(db) -> _CheckResult:
"""Mandate.name verletzt Slug-Regeln ODER Mandate.label leer?"""
rows = db.getRecordset(Mandate)
legacy = []
seen: set[str] = set()
for r in sorted(rows, key=lambda x: str(x.get("id", ""))):
name = (r.get("name") or "").strip()
labelRaw = r.get("label")
labelEmpty = not (labelRaw or "").strip() if labelRaw is not None else True
nameInvalid = not isValidMandateName(name)
nameCollides = name in seen
if not nameInvalid and not nameCollides:
seen.add(name)
if labelEmpty or nameInvalid or nameCollides:
legacy.append(
{
"id": r.get("id"),
"name": name,
"label": r.get("label"),
"labelEmpty": labelEmpty,
"nameInvalid": nameInvalid,
"nameCollides": nameCollides,
}
)
return _CheckResult(
name="mandate-name-slug-rules",
routine="_migrateMandateNameLabelSlugRules",
location="interfaces/interfaceBootstrap.py:448-511",
count=len(legacy),
status="GREEN" if not legacy else "RED",
samples=legacy[:5],
)
def _checkRootMandateLegacy(db) -> _CheckResult:
"""Mandate mit name='Root' (case-sensitive) ODER isSystem=False fuer root?"""
legacyByName = db.getRecordset(Mandate, recordFilter={"name": "Root"})
rows = db.getRecordset(Mandate, recordFilter={"name": "root"})
legacyByFlag = [r for r in rows if not r.get("isSystem")]
combined = list(legacyByName) + legacyByFlag
samples = [
{
"id": r.get("id"),
"name": r.get("name"),
"isSystem": r.get("isSystem"),
}
for r in combined
]
return _CheckResult(
name="root-mandate-legacy",
routine="initRootMandate-legacy-branch",
location="interfaces/interfaceBootstrap.py:406-412",
count=len(samples),
status="GREEN" if not samples else "RED",
samples=samples[:5],
)
def _checkSysadminRole(db) -> _CheckResult:
"""Legacy 'sysadmin'-Rolle im Root-Mandat?"""
rootMandates = db.getRecordset(Mandate, recordFilter={"name": "root", "isSystem": True})
if not rootMandates:
return _CheckResult(
name="sysadmin-role",
routine="_migrateAndDropSysAdminRole",
location="interfaces/interfaceBootstrap.py:840-932",
count=0,
status="GREEN",
samples=[],
error="kein Root-Mandat gefunden -- Check uebersprungen (kann nicht relevant sein)",
)
rootId = str(rootMandates[0].get("id"))
rows = db.getRecordset(
Role,
recordFilter={"roleLabel": "sysadmin", "mandateId": rootId, "featureInstanceId": None},
)
samples = [{"id": r.get("id"), "roleLabel": r.get("roleLabel")} for r in rows]
return _CheckResult(
name="sysadmin-role",
routine="_migrateAndDropSysAdminRole",
location="interfaces/interfaceBootstrap.py:840-932",
count=len(samples),
status="GREEN" if not samples else "RED",
samples=samples[:5],
)
def _checkRagFallback(knowDb) -> _CheckResult:
"""FileContentIndex-Rows ohne mandateId UND ohne featureInstanceId?"""
rows = knowDb.getRecordset(FileContentIndex)
legacy = [
{
"id": r.get("id"),
"fileName": r.get("fileName"),
"totalSize": r.get("totalSize"),
}
for r in rows
if not (r.get("mandateId") or "").strip() and not (r.get("featureInstanceId") or "").strip()
]
return _CheckResult(
name="rag-fallback-orphan-index",
routine="aggregateMandateRagTotalBytes-fallback",
location="interfaces/interfaceDbKnowledge.py:609-635",
count=len(legacy),
status="GREEN" if not legacy else "RED",
samples=legacy[:5],
)
def _runChecks() -> List[_CheckResult]:
appDb = _getAppDb()
knowDb = _getKnowledgeDb()
appChecks: List[Callable[[Any], _CheckResult]] = [
_checkMandateDescription,
_checkMandateSlugRules,
_checkRootMandateLegacy,
_checkSysadminRole,
]
results: List[_CheckResult] = []
for fn in appChecks:
try:
results.append(fn(appDb))
except Exception as exc:
results.append(
_CheckResult(
name=fn.__name__,
routine="?",
location="?",
count=-1,
status="ERROR",
error=f"{type(exc).__name__}: {exc}",
)
)
try:
results.append(_checkRagFallback(knowDb))
except Exception as exc:
results.append(
_CheckResult(
name="rag-fallback-orphan-index",
routine="aggregateMandateRagTotalBytes-fallback",
location="interfaces/interfaceDbKnowledge.py:609-635",
count=-1,
status="ERROR",
error=f"{type(exc).__name__}: {exc}",
)
)
return results
def _printText(results: List[_CheckResult]) -> None:
print("=" * 78)
print("BOOTSTRAP-MIGRATIONS LEGACY-STATE-AUDIT")
print("=" * 78)
for r in results:
marker = {
"GREEN": "[OK]",
"RED": "[!!]",
"ERROR": "[ERR]",
}.get(r.status, "[?]")
print(f"\n{marker} {r.name}")
print(f" Routine : {r.routine}")
print(f" Location: {r.location}")
print(f" Count : {r.count}")
print(f" Status : {r.status}")
if r.error:
print(f" Note : {r.error}")
if r.samples:
print(f" Samples : (max 5)")
for s in r.samples:
print(f" {s}")
print("\n" + "=" * 78)
greens = sum(1 for r in results if r.status == "GREEN")
reds = sum(1 for r in results if r.status == "RED")
errs = sum(1 for r in results if r.status == "ERROR")
print(f"SUMMARY: {greens} GREEN {reds} RED {errs} ERROR ({len(results)} total)")
if reds == 0 and errs == 0:
print("VERDICT: alle Migrationsroutinen koennen entfernt werden.")
elif errs > 0:
print("VERDICT: Audit unvollstaendig (Fehler) -- bitte Skript fixen.")
else:
print("VERDICT: erst Daten bereinigen, dann Routinen entfernen.")
print("=" * 78)
def _purgeRagOrphans() -> int:
"""Loescht alle FileContentIndex-Rows ohne mandateId UND ohne featureInstanceId.
Returns: Anzahl geloeschter Rows.
"""
knowDb = _getKnowledgeDb()
rows = knowDb.getRecordset(FileContentIndex)
orphans = [
r for r in rows
if not (r.get("mandateId") or "").strip()
and not (r.get("featureInstanceId") or "").strip()
]
if not orphans:
print("Keine RAG-Orphans gefunden -- nichts zu purgen.")
return 0
print(f"Purge {len(orphans)} RAG-Orphan(s):")
deleted = 0
for r in orphans:
rid = r.get("id")
try:
knowDb.recordDelete(FileContentIndex, str(rid))
deleted += 1
print(f" geloescht: {rid} {r.get('fileName')}")
except Exception as exc:
print(f" FEHLER {rid}: {type(exc).__name__}: {exc}", file=sys.stderr)
print(f"Purge abgeschlossen: {deleted}/{len(orphans)} geloescht.")
return deleted
def main() -> int:
parser = argparse.ArgumentParser(
description="Audit-Skript fuer Legacy-Bestaende (Bootstrap-Cleanup Plan C)"
)
parser.add_argument("--json", action="store_true", help="JSON-Output statt Text")
parser.add_argument(
"--purge-rag-orphans",
action="store_true",
help="WRITE: loescht FileContentIndex-Rows ohne mandateId UND featureInstanceId",
)
args = parser.parse_args()
if args.purge_rag_orphans:
try:
_purgeRagOrphans()
except Exception as exc:
print(f"FATAL: Purge fehlgeschlagen -- {type(exc).__name__}: {exc}", file=sys.stderr)
return 2
print()
try:
results = _runChecks()
except Exception as exc:
print(f"FATAL: konnte Audit nicht starten -- {type(exc).__name__}: {exc}", file=sys.stderr)
return 2
if args.json:
print(json.dumps([r.toDict() for r in results], indent=2, default=str))
else:
_printText(results)
if any(r.status == "ERROR" for r in results):
return 2
if any(r.status == "RED" for r in results):
return 1
return 0
if __name__ == "__main__":
sys.exit(main())

View file

@ -1,133 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Unit tests for ``_migrateMandateNameLabelSlugRules`` in interfaceBootstrap.
Covers:
- legacy ``name``/``label`` rows get fixed (label fill, slug rename),
- collisions across legacy rows resolve via -2/-3 suffixes in stable id order,
- valid rows are left untouched (idempotency),
- second invocation is a no-op.
"""
from typing import Any, Dict, List, Optional
import pytest
from modules.datamodels.datamodelUam import Mandate
from modules.interfaces.interfaceBootstrap import _migrateMandateNameLabelSlugRules
from modules.shared.mandateNameUtils import isValidMandateName
class _FakeDb:
"""Minimal connector simulating getRecordset(Mandate)+recordModify(Mandate, id, data)."""
def __init__(self, rows: List[Dict[str, Any]]):
self.rows: List[Dict[str, Any]] = [dict(r) for r in rows]
self.modifyCalls: List[Dict[str, Any]] = []
def getRecordset(self, model, recordFilter: Optional[Dict[str, Any]] = None):
if model is not Mandate:
return []
if not recordFilter:
return [dict(r) for r in self.rows]
out = []
for r in self.rows:
if all(r.get(k) == v for k, v in recordFilter.items()):
out.append(dict(r))
return out
def recordModify(self, model, recordId: str, data: Dict[str, Any]):
self.modifyCalls.append({"id": str(recordId), "data": dict(data)})
for r in self.rows:
if str(r.get("id")) == str(recordId):
r.update(data)
return r
return None
def _row(mid: str, name: Any, label: Any = None) -> Dict[str, Any]:
return {"id": mid, "name": name, "label": label}
class TestMigrationFillsLabel:
def test_emptyLabelGetsNameAsLabel(self):
db = _FakeDb([_row("a1", "good-name", None)])
_migrateMandateNameLabelSlugRules(db)
assert db.rows[0]["label"] == "good-name"
assert db.rows[0]["name"] == "good-name"
def test_emptyLabelAndEmptyNameFallsBackToMandate(self):
db = _FakeDb([_row("a1", "", "")])
_migrateMandateNameLabelSlugRules(db)
assert db.rows[0]["label"] == "Mandate"
assert isValidMandateName(db.rows[0]["name"])
class TestMigrationRenamesInvalidNames:
def test_invalidNameGetsSlugFromLabel(self):
db = _FakeDb([_row("a1", "Home patrick", "Home Patrick")])
_migrateMandateNameLabelSlugRules(db)
assert db.rows[0]["name"] == "home-patrick"
assert db.rows[0]["label"] == "Home Patrick"
def test_umlautsTransliterated(self):
db = _FakeDb([_row("a1", "Müller AG", "Müller AG")])
_migrateMandateNameLabelSlugRules(db)
assert db.rows[0]["name"] == "mueller-ag"
class TestMigrationCollisions:
def test_collisionsResolveByStableIdOrder(self):
rows = [
_row("z1", "Home patrick", "Home Patrick"),
_row("a1", "home-patrick", "Home Patrick Two"),
]
db = _FakeDb(rows)
_migrateMandateNameLabelSlugRules(db)
byId = {r["id"]: r for r in db.rows}
assert byId["a1"]["name"] == "home-patrick"
assert byId["z1"]["name"] == "home-patrick-2"
def test_threeWayCollisionGetsThirdSuffix(self):
rows = [
_row("id-aaa", "home-patrick", "Home Patrick"),
_row("id-bbb", "Home patrick", "Home Patrick"),
_row("id-ccc", "home patrick", "Home Patrick"),
]
db = _FakeDb(rows)
_migrateMandateNameLabelSlugRules(db)
names = sorted(r["name"] for r in db.rows)
assert names == ["home-patrick", "home-patrick-2", "home-patrick-3"]
class TestMigrationIdempotency:
def test_secondRunIsNoop(self):
rows = [
_row("a1", "home-patrick", "Home Patrick"),
_row("b1", "Home Müller", ""),
]
db = _FakeDb(rows)
_migrateMandateNameLabelSlugRules(db)
assert all(isValidMandateName(r["name"]) for r in db.rows)
firstChanges = list(db.modifyCalls)
db.modifyCalls.clear()
_migrateMandateNameLabelSlugRules(db)
assert db.modifyCalls == [], (
f"expected no further changes after first migration, got {db.modifyCalls}; "
f"firstRun changes: {firstChanges}"
)
def test_validRowsLeftUntouched(self):
rows = [_row("a1", "root", "Root"), _row("b1", "alpina-treuhand", "Alpina Treuhand AG")]
db = _FakeDb(rows)
_migrateMandateNameLabelSlugRules(db)
assert db.modifyCalls == []
class TestMigrationEmpty:
def test_emptyDbDoesNothing(self):
db = _FakeDb([])
_migrateMandateNameLabelSlugRules(db)
assert db.modifyCalls == []

View file

@ -1,209 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Unit tests for the one-shot sysadmin role -> isPlatformAdmin migration.
Covers acceptance criteria from
``wiki/c-work/4-done/2026-04-sysadmin-authority-split.md``:
- AC#4 -> Existing sysadmin role-holders are promoted to ``isPlatformAdmin=True``
and the legacy role is removed (Role + UserMandateRole + AccessRules)
when the gateway boots.
- AC#10 -> The migration is idempotent and removes ALL artefacts (Role,
AccessRules, UserMandateRole) of the legacy ``sysadmin`` role.
Strategy: use an in-memory fake ``DatabaseConnector`` that records calls
and returns deterministic recordsets for ``Role``/``UserMandateRole``/
``UserMandate``/``UserInDB``/``AccessRule`` lookups.
"""
from __future__ import annotations
from typing import Any, Dict, List
from unittest.mock import Mock
from modules.interfaces.interfaceBootstrap import _migrateAndDropSysAdminRole
from modules.datamodels.datamodelMembership import UserMandate, UserMandateRole
from modules.datamodels.datamodelRbac import AccessRule, Role
from modules.datamodels.datamodelUam import UserInDB
_ROOT_MANDATE_ID = "root-mandate-id"
_SYSADMIN_ROLE_ID = "sysadmin-role-id"
_USER_MANDATE_ID = "user-mandate-id"
_USER_ID = "legacy-user-id"
_UMR_ROW_ID = "umr-row-id"
_ACCESS_RULE_ID = "access-rule-id"
def _buildFakeDb(
*,
sysadminRoles: List[Dict[str, Any]],
umRoleRows: List[Dict[str, Any]],
userMandateRows: List[Dict[str, Any]],
users: List[Dict[str, Any]],
accessRules: List[Dict[str, Any]],
) -> Mock:
"""Build a fake ``DatabaseConnector`` that maps model -> recordset."""
deletes: List[tuple] = []
modifies: List[tuple] = []
def _getRecordset(model, recordFilter=None, **_): # noqa: ANN001
recordFilter = recordFilter or {}
if model is Role:
label = recordFilter.get("roleLabel")
mandateId = recordFilter.get("mandateId")
if label == "sysadmin" and mandateId == _ROOT_MANDATE_ID:
return list(sysadminRoles)
return []
if model is UserMandateRole:
wanted = recordFilter.get("roleId")
return [r for r in umRoleRows if r.get("roleId") == wanted]
if model is UserMandate:
wanted = recordFilter.get("id")
return [r for r in userMandateRows if r.get("id") == wanted]
if model is UserInDB:
wanted = recordFilter.get("id")
return [r for r in users if r.get("id") == wanted]
if model is AccessRule:
wanted = recordFilter.get("roleId")
return [r for r in accessRules if r.get("roleId") == wanted]
return []
def _recordModify(model, recordId, payload): # noqa: ANN001
modifies.append((model, recordId, payload))
# Reflect the change so a subsequent migration call is idempotent.
if model is UserInDB:
for u in users:
if u.get("id") == recordId:
u.update(payload)
return True
def _recordDelete(model, recordId): # noqa: ANN001
deletes.append((model, recordId))
if model is UserMandateRole:
umRoleRows[:] = [r for r in umRoleRows if r.get("id") != recordId]
elif model is AccessRule:
accessRules[:] = [r for r in accessRules if r.get("id") != recordId]
elif model is Role:
sysadminRoles[:] = [r for r in sysadminRoles if r.get("id") != recordId]
return True
db = Mock()
db.getRecordset = Mock(side_effect=_getRecordset)
db.recordModify = Mock(side_effect=_recordModify)
db.recordDelete = Mock(side_effect=_recordDelete)
db._modifies = modifies # exposed for assertions
db._deletes = deletes
return db
def _seed():
return {
"sysadminRoles": [{"id": _SYSADMIN_ROLE_ID, "roleLabel": "sysadmin",
"mandateId": _ROOT_MANDATE_ID}],
"umRoleRows": [{"id": _UMR_ROW_ID, "roleId": _SYSADMIN_ROLE_ID,
"userMandateId": _USER_MANDATE_ID}],
"userMandateRows": [{"id": _USER_MANDATE_ID, "userId": _USER_ID,
"mandateId": _ROOT_MANDATE_ID}],
"users": [{"id": _USER_ID, "username": "legacy",
"isSysAdmin": False, "isPlatformAdmin": False}],
"accessRules": [{"id": _ACCESS_RULE_ID, "roleId": _SYSADMIN_ROLE_ID}],
}
# ---------------------------------------------------------------------------
# AC #4 — promote + drop on first run
# ---------------------------------------------------------------------------
def testMigrationPromotesUserAndDropsArtefacts():
"""AC#4: legacy holder is promoted; Role+AccessRule+UMR are deleted."""
seed = _seed()
db = _buildFakeDb(**seed)
_migrateAndDropSysAdminRole(db, _ROOT_MANDATE_ID)
# User got isPlatformAdmin=True
assert seed["users"][0]["isPlatformAdmin"] is True
assert any(
m[0] is UserInDB and m[2] == {"isPlatformAdmin": True}
for m in db._modifies
), "Expected UserInDB.isPlatformAdmin promotion call"
# All three artefact tables had their rows deleted.
deletedModels = {m[0] for m in db._deletes}
assert UserMandateRole in deletedModels, "UserMandateRole row not deleted"
assert AccessRule in deletedModels, "AccessRule row not deleted"
assert Role in deletedModels, "Sysadmin Role record not deleted"
# And the seeded lists are empty after the migration.
assert seed["umRoleRows"] == []
assert seed["accessRules"] == []
assert seed["sysadminRoles"] == []
# ---------------------------------------------------------------------------
# AC #10 — idempotent: a second run is a no-op
# ---------------------------------------------------------------------------
def testMigrationIsIdempotent():
"""AC#10: a second invocation finds no sysadmin role and exits silently."""
seed = _seed()
db = _buildFakeDb(**seed)
_migrateAndDropSysAdminRole(db, _ROOT_MANDATE_ID)
firstModifies = list(db._modifies)
firstDeletes = list(db._deletes)
_migrateAndDropSysAdminRole(db, _ROOT_MANDATE_ID)
# No additional writes on the second call.
assert db._modifies == firstModifies, (
"Second migration call must not perform additional writes"
)
assert db._deletes == firstDeletes, (
"Second migration call must not perform additional deletes"
)
def testMigrationSkipsAlreadyPromotedUsers():
"""If a user already has ``isPlatformAdmin=True``, no redundant write."""
seed = _seed()
seed["users"][0]["isPlatformAdmin"] = True # already promoted
db = _buildFakeDb(**seed)
_migrateAndDropSysAdminRole(db, _ROOT_MANDATE_ID)
# No promotion write for an already-promoted user.
promotionWrites = [
m for m in db._modifies
if m[0] is UserInDB and m[2].get("isPlatformAdmin") is True
]
assert promotionWrites == [], (
"Should not re-write isPlatformAdmin if user already has it"
)
# But role + access-rule cleanup still happens.
deletedModels = {m[0] for m in db._deletes}
assert Role in deletedModels
assert AccessRule in deletedModels
assert UserMandateRole in deletedModels
def testMigrationOnEmptyDbIsNoop():
"""No legacy sysadmin role at all -> no calls, no errors."""
db = _buildFakeDb(
sysadminRoles=[],
umRoleRows=[],
userMandateRows=[],
users=[],
accessRules=[],
)
_migrateAndDropSysAdminRole(db, _ROOT_MANDATE_ID)
assert db._modifies == []
assert db._deletes == []

View file

@ -66,6 +66,17 @@ class TestResolveParameterReferences:
value = "Land: {{n1.country}}"
assert resolveParameterReferences(value, node_outputs) == "Land: CH"
def test_legacy_string_template_loop_current_item_nested(self):
"""Same shape as executionEngine sets on loop node id during body iteration."""
node_outputs = {
"loop93": {
"currentItem": {"subject": "Hello", "body": {"content": "World"}},
"currentIndex": 0,
},
}
value = "Subj: {{loop93.currentItem.subject}} Body: {{loop93.currentItem.body.content}}"
assert resolveParameterReferences(value, node_outputs) == "Subj: Hello Body: World"
class TestWildcardIteration:
"""Phase-4 typed Bindings-Resolver: ``*`` segment iterates over a list.