teamsbot
This commit is contained in:
parent
b6be8f391e
commit
794ba36f27
76 changed files with 8899 additions and 779 deletions
|
|
@ -37,7 +37,8 @@
|
||||||
"y": 200,
|
"y": 200,
|
||||||
"title": "Pro Scan-Dokument",
|
"title": "Pro Scan-Dokument",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"level": 1,
|
"items": {"type": "ref", "nodeId": "n2", "path": ["files"]},
|
||||||
|
"level": "auto",
|
||||||
"concurrency": 1
|
"concurrency": 1
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -77,7 +77,7 @@ Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJK
|
||||||
# Teamsbot Browser Bot Service
|
# Teamsbot Browser Bot Service
|
||||||
# For local testing: run the bot locally with `npm run dev` in service-teams-browser-bot
|
# For local testing: run the bot locally with `npm run dev` in service-teams-browser-bot
|
||||||
# The bot will connect back to localhost:8000 via WebSocket
|
# The bot will connect back to localhost:8000 via WebSocket
|
||||||
TEAMSBOT_BROWSER_BOT_URL = https://cae-poweron-shared.redwater-53d21339.switzerlandnorth.azurecontainerapps.io
|
TEAMSBOT_BROWSER_BOT_URL = http://localhost:4100
|
||||||
|
|
||||||
# Debug Configuration
|
# Debug Configuration
|
||||||
APP_DEBUG_CHAT_WORKFLOW_ENABLED = True
|
APP_DEBUG_CHAT_WORKFLOW_ENABLED = True
|
||||||
|
|
|
||||||
|
|
@ -22,9 +22,24 @@ class WorkflowActionParameter(BaseModel):
|
||||||
json_schema_extra={"label": "Name"},
|
json_schema_extra={"label": "Name"},
|
||||||
)
|
)
|
||||||
type: str = Field(
|
type: str = Field(
|
||||||
description="Python type as string: 'str', 'int', 'bool', 'List[str]', etc.",
|
description=(
|
||||||
|
"Type reference. Either a primitive ('str', 'int', 'bool', 'float', 'Any', "
|
||||||
|
"'List[str]', 'Dict[str,Any]', …) or a PORT_TYPE_CATALOG schema name "
|
||||||
|
"(e.g. 'ConnectionRef', 'FeatureInstanceRef', 'DocumentList', "
|
||||||
|
"'TrusteeProcessResult'). Catalog types are validated by "
|
||||||
|
"_actionSignatureValidator at startup."
|
||||||
|
),
|
||||||
json_schema_extra={"label": "Typ"},
|
json_schema_extra={"label": "Typ"},
|
||||||
)
|
)
|
||||||
|
uiHint: Optional[str] = Field(
|
||||||
|
None,
|
||||||
|
description=(
|
||||||
|
"Optional UI rendering hint for adapters. "
|
||||||
|
"Free-form (e.g. 'textarea', 'cron', 'fieldBuilder'). "
|
||||||
|
"Adapters can override; defaults derive from frontendType when absent."
|
||||||
|
),
|
||||||
|
json_schema_extra={"label": "UI-Hinweis"},
|
||||||
|
)
|
||||||
frontendType: FrontendType = Field(
|
frontendType: FrontendType = Field(
|
||||||
description="UI rendering type (from global FrontendType enum)",
|
description="UI rendering type (from global FrontendType enum)",
|
||||||
json_schema_extra={"label": "Frontend-Typ"},
|
json_schema_extra={"label": "Frontend-Typ"},
|
||||||
|
|
@ -80,6 +95,16 @@ class WorkflowActionDefinition(BaseModel):
|
||||||
description="Parameter schema definitions",
|
description="Parameter schema definitions",
|
||||||
json_schema_extra={"label": "Parameter"},
|
json_schema_extra={"label": "Parameter"},
|
||||||
)
|
)
|
||||||
|
outputType: str = Field(
|
||||||
|
"ActionResult",
|
||||||
|
description=(
|
||||||
|
"PORT_TYPE_CATALOG schema name produced by this action "
|
||||||
|
"(e.g. 'TrusteeProcessResult', 'EmailDraft', 'DocumentList'). "
|
||||||
|
"Defaults to 'ActionResult' for fire-and-forget actions. "
|
||||||
|
"Validated by _actionSignatureValidator at startup."
|
||||||
|
),
|
||||||
|
json_schema_extra={"label": "Ausgabe-Typ"},
|
||||||
|
)
|
||||||
execute: Optional[Callable] = Field(
|
execute: Optional[Callable] = Field(
|
||||||
None,
|
None,
|
||||||
description="Execution function - async function that takes parameters dict and returns ActionResult. Set dynamically.",
|
description="Execution function - async function that takes parameters dict and returns ActionResult. Set dynamically.",
|
||||||
|
|
|
||||||
205
modules/features/graphicalEditor/adapterValidator.py
Normal file
205
modules/features/graphicalEditor/adapterValidator.py
Normal file
|
|
@ -0,0 +1,205 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Adapter Validator — enforces 5 drift rules between Schicht-3 NodeAdapters
|
||||||
|
and the Schicht-2 Actions they bind to.
|
||||||
|
|
||||||
|
This is the CI-safety net described in the typed-action-architecture plan:
|
||||||
|
any drift between an Editor-Node Adapter and the underlying Action signature
|
||||||
|
must be caught at build time, never silently in production.
|
||||||
|
|
||||||
|
Rules
|
||||||
|
-----
|
||||||
|
1. Every `userParams[].actionArg` exists as a parameter in the bound Action.
|
||||||
|
2. Every required Action parameter is covered by either `userParams` or
|
||||||
|
`contextParams` (i.e. no required arg is silently unset).
|
||||||
|
3. Every Action parameter type exists in PORT_TYPE_CATALOG (or is a primitive).
|
||||||
|
4. The Action `outputType` exists in PORT_TYPE_CATALOG (or is a primitive).
|
||||||
|
5. Every method-bound STATIC node has an Adapter (no orphan node ids).
|
||||||
|
|
||||||
|
Rules 3+4 are already enforced by `_actionSignatureValidator` in Phase 2 —
|
||||||
|
this module composes with it so the report covers both layers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Any, Dict, List, Mapping
|
||||||
|
|
||||||
|
from modules.features.graphicalEditor.nodeAdapter import (
|
||||||
|
NodeAdapter,
|
||||||
|
_adapterFromLegacyNode,
|
||||||
|
_isMethodBoundNode,
|
||||||
|
)
|
||||||
|
from modules.workflows.methods._actionSignatureValidator import _validateTypeRef
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AdapterValidationReport:
|
||||||
|
"""Aggregated drift report across all adapters."""
|
||||||
|
|
||||||
|
errors: List[str] = field(default_factory=list)
|
||||||
|
warnings: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def isHealthy(self) -> bool:
|
||||||
|
return not self.errors
|
||||||
|
|
||||||
|
def merge(self, other: "AdapterValidationReport") -> None:
|
||||||
|
self.errors.extend(other.errors)
|
||||||
|
self.warnings.extend(other.warnings)
|
||||||
|
|
||||||
|
|
||||||
|
def _validateAdapterAgainstAction(
|
||||||
|
adapter: NodeAdapter,
|
||||||
|
actionDef: Any,
|
||||||
|
) -> AdapterValidationReport:
|
||||||
|
"""Apply rules 1-4 to a single Adapter / Action pair.
|
||||||
|
|
||||||
|
`actionDef` is duck-typed so tests can pass dataclasses; production passes
|
||||||
|
a `WorkflowActionDefinition` Pydantic model.
|
||||||
|
"""
|
||||||
|
report = AdapterValidationReport()
|
||||||
|
actionParams: Mapping[str, Any] = getattr(actionDef, "parameters", {}) or {}
|
||||||
|
outputType: str = getattr(actionDef, "outputType", "ActionResult") or "ActionResult"
|
||||||
|
|
||||||
|
# Rule 1: every userParam.actionArg exists in the Action
|
||||||
|
declaredArgs = {up.actionArg for up in adapter.userParams}
|
||||||
|
for arg in declaredArgs:
|
||||||
|
if arg not in actionParams:
|
||||||
|
report.errors.append(
|
||||||
|
f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': "
|
||||||
|
f"userParams.actionArg '{arg}' does not exist in action parameters "
|
||||||
|
f"(known: {sorted(actionParams.keys())})"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Rule 2: every required Action arg is covered (userParams OR contextParams)
|
||||||
|
coveredArgs = declaredArgs | set(adapter.contextParams.keys())
|
||||||
|
for paramName, paramDef in actionParams.items():
|
||||||
|
isRequired = bool(getattr(paramDef, "required", False))
|
||||||
|
if isRequired and paramName not in coveredArgs:
|
||||||
|
report.errors.append(
|
||||||
|
f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': "
|
||||||
|
f"required action arg '{paramName}' is neither in userParams nor contextParams"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Rule 3: every Action parameter type exists in catalog (re-runs Phase-2 rule)
|
||||||
|
for paramName, paramDef in actionParams.items():
|
||||||
|
typeRef = getattr(paramDef, "type", None)
|
||||||
|
if not typeRef:
|
||||||
|
report.errors.append(
|
||||||
|
f"action '{adapter.bindsAction}.{paramName}': missing 'type' on parameter"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
for err in _validateTypeRef(typeRef):
|
||||||
|
report.errors.append(
|
||||||
|
f"action '{adapter.bindsAction}.{paramName}': {err}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Rule 4: Action outputType exists in catalog (or is a generic fire-and-forget type)
|
||||||
|
if outputType not in {"ActionResult", "Transit"}:
|
||||||
|
for err in _validateTypeRef(outputType):
|
||||||
|
report.errors.append(
|
||||||
|
f"action '{adapter.bindsAction}'.outputType: {err}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
|
||||||
|
def _validateAllAdapters(
|
||||||
|
staticNodes: List[Mapping[str, Any]],
|
||||||
|
actionsRegistry: Mapping[str, Mapping[str, Any]],
|
||||||
|
) -> AdapterValidationReport:
|
||||||
|
"""Run rules 1-5 across all method-bound static node definitions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
staticNodes: list of legacy node-dicts (`STATIC_NODE_TYPES`).
|
||||||
|
actionsRegistry: mapping of method-shortname -> {actionName: WorkflowActionDefinition}.
|
||||||
|
Built from live `methods` registry or test-stubbed methods.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Aggregated drift report. `isHealthy` is True only if every method-bound
|
||||||
|
node has a matching Action and all 5 rules pass.
|
||||||
|
"""
|
||||||
|
report = AdapterValidationReport()
|
||||||
|
seenAdapterIds: set[str] = set()
|
||||||
|
|
||||||
|
for node in staticNodes:
|
||||||
|
if not _isMethodBoundNode(node):
|
||||||
|
continue
|
||||||
|
|
||||||
|
adapter = _adapterFromLegacyNode(node)
|
||||||
|
if adapter is None:
|
||||||
|
report.errors.append(
|
||||||
|
f"node '{node.get('id')}' is method-bound but adapter projection failed"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
seenAdapterIds.add(adapter.nodeId)
|
||||||
|
|
||||||
|
methodName = str(node.get("_method") or "")
|
||||||
|
actionName = str(node.get("_action") or "")
|
||||||
|
methodActions = actionsRegistry.get(methodName) or {}
|
||||||
|
actionDef = methodActions.get(actionName)
|
||||||
|
if actionDef is None:
|
||||||
|
report.errors.append(
|
||||||
|
f"adapter '{adapter.nodeId}' bindsAction '{adapter.bindsAction}': "
|
||||||
|
f"action not found in registry (method '{methodName}' has actions: "
|
||||||
|
f"{sorted(methodActions.keys())})"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
report.merge(_validateAdapterAgainstAction(adapter, actionDef))
|
||||||
|
|
||||||
|
# Rule 5: every Action with dynamicMode=False MUST have an Editor Adapter.
|
||||||
|
# dynamicMode=True actions are agent-only and may legitimately lack one.
|
||||||
|
boundActions: set[str] = set()
|
||||||
|
for node in staticNodes:
|
||||||
|
if not _isMethodBoundNode(node):
|
||||||
|
continue
|
||||||
|
boundActions.add(f"{node.get('_method')}.{node.get('_action')}")
|
||||||
|
|
||||||
|
for methodName, actions in actionsRegistry.items():
|
||||||
|
for actionName, actionDef in actions.items():
|
||||||
|
if bool(getattr(actionDef, "dynamicMode", False)):
|
||||||
|
continue
|
||||||
|
fqn = f"{methodName}.{actionName}"
|
||||||
|
if fqn not in boundActions:
|
||||||
|
report.warnings.append(
|
||||||
|
f"action '{fqn}' has no Editor adapter "
|
||||||
|
f"(set dynamicMode=True if intended as agent-only)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
|
||||||
|
def _formatAdapterReport(report: AdapterValidationReport) -> str:
|
||||||
|
"""Format a report for human-readable logging."""
|
||||||
|
lines: List[str] = []
|
||||||
|
if report.isHealthy and not report.warnings:
|
||||||
|
lines.append("Adapter validator: all healthy.")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
if report.errors:
|
||||||
|
lines.append(f"Adapter validator: {len(report.errors)} ERROR(s)")
|
||||||
|
for e in report.errors:
|
||||||
|
lines.append(f" ERROR: {e}")
|
||||||
|
if report.warnings:
|
||||||
|
lines.append(f"Adapter validator: {len(report.warnings)} WARNING(s)")
|
||||||
|
for w in report.warnings:
|
||||||
|
lines.append(f" WARN: {w}")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def _buildActionsRegistryFromMethods(
|
||||||
|
methodInstances: Mapping[str, Any],
|
||||||
|
) -> Dict[str, Dict[str, Any]]:
|
||||||
|
"""Convenience: turn `{shortName: methodInstance}` into the registry shape.
|
||||||
|
|
||||||
|
`methodInstance._actions` is a dict of action-name -> WorkflowActionDefinition.
|
||||||
|
"""
|
||||||
|
registry: Dict[str, Dict[str, Any]] = {}
|
||||||
|
for shortName, instance in methodInstances.items():
|
||||||
|
actions = getattr(instance, "_actions", None)
|
||||||
|
if isinstance(actions, dict):
|
||||||
|
registry[shortName] = dict(actions)
|
||||||
|
return registry
|
||||||
172
modules/features/graphicalEditor/nodeAdapter.py
Normal file
172
modules/features/graphicalEditor/nodeAdapter.py
Normal file
|
|
@ -0,0 +1,172 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Schicht-3 Adapter Layer — projects Schicht-2 Actions into Editor-Node form.
|
||||||
|
|
||||||
|
Architecture (see wiki/c-work/1-plan/2026-04-typed-action-architecture.md):
|
||||||
|
- Schicht 1: Types Catalog (portTypes.PORT_TYPE_CATALOG)
|
||||||
|
- Schicht 2: Methods/Actions (modules/workflows/methods/method*) - source of truth
|
||||||
|
for Backend capabilities (parameter types, output types).
|
||||||
|
- Schicht 3: Adapters (this module) - Editor-Node + AI-Agent-Tool wrappers around
|
||||||
|
Actions. References Action signature, never duplicates types.
|
||||||
|
- Schicht 4: Workflow-Bindings + Agent-Tool-Calls (instance-level wiring).
|
||||||
|
|
||||||
|
This module defines the in-code Adapter representation (NodeAdapter,
|
||||||
|
UserParamMapping) and the projection helpers that convert between the
|
||||||
|
legacy node-dict wire format and the typed Adapter view.
|
||||||
|
|
||||||
|
Wire-format compatibility: the legacy dicts in nodeDefinitions/*.py remain
|
||||||
|
the wire format consumed by the frontend until Phase 4. This module exposes
|
||||||
|
an Adapter VIEW over those dicts so the validator and AI-tool generator can
|
||||||
|
operate on a clean, typed structure without breaking consumers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Any, Dict, List, Mapping, Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class UserParamMapping:
|
||||||
|
"""Maps an Action argument into a Node's user-facing parameter.
|
||||||
|
|
||||||
|
The Action signature is the source of truth for type/required/description.
|
||||||
|
This mapping carries Editor-specific overrides (label, UI hints, conditional
|
||||||
|
visibility) but never re-declares the type.
|
||||||
|
"""
|
||||||
|
|
||||||
|
actionArg: str
|
||||||
|
label: Optional[Any] = None
|
||||||
|
description: Optional[Any] = None
|
||||||
|
uiHint: Optional[str] = None
|
||||||
|
frontendOptions: Optional[Any] = None
|
||||||
|
visibleWhen: Optional[Dict[str, Any]] = None
|
||||||
|
defaultValue: Any = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class NodeAdapter:
|
||||||
|
"""Schicht-3 Editor-Node adapter — binds to a Schicht-2 Action.
|
||||||
|
|
||||||
|
All type information for `userParams` is inherited from the bound Action.
|
||||||
|
The adapter only carries Editor-specific concerns (UI labels, port topology,
|
||||||
|
icon/color metadata).
|
||||||
|
"""
|
||||||
|
|
||||||
|
nodeId: str
|
||||||
|
bindsAction: str
|
||||||
|
category: str
|
||||||
|
label: Any
|
||||||
|
description: Any
|
||||||
|
userParams: List[UserParamMapping] = field(default_factory=list)
|
||||||
|
contextParams: Dict[str, str] = field(default_factory=dict)
|
||||||
|
inputs: int = 1
|
||||||
|
outputs: int = 1
|
||||||
|
inputAccepts: List[List[str]] = field(default_factory=list)
|
||||||
|
outputLabels: Optional[List[Any]] = None
|
||||||
|
meta: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
def _isMethodBoundNode(node: Mapping[str, Any]) -> bool:
|
||||||
|
"""True if a legacy node dict is bound to a Schicht-2 Action."""
|
||||||
|
return bool(node.get("_method") and node.get("_action"))
|
||||||
|
|
||||||
|
|
||||||
|
def _bindsActionFromLegacy(node: Mapping[str, Any]) -> Optional[str]:
|
||||||
|
"""Build the canonical 'method.action' identifier from a legacy node dict.
|
||||||
|
|
||||||
|
Returns None for framework-primitive nodes (trigger/flow/input/data).
|
||||||
|
"""
|
||||||
|
method = node.get("_method")
|
||||||
|
action = node.get("_action")
|
||||||
|
if not method or not action:
|
||||||
|
return None
|
||||||
|
return f"{method}.{action}"
|
||||||
|
|
||||||
|
|
||||||
|
def _userParamFromLegacyParam(legacyParam: Mapping[str, Any]) -> UserParamMapping:
|
||||||
|
"""Project a legacy parameter dict into a UserParamMapping view.
|
||||||
|
|
||||||
|
The view carries only Editor-overrides; type/required come from the Action.
|
||||||
|
"""
|
||||||
|
return UserParamMapping(
|
||||||
|
actionArg=str(legacyParam.get("name", "")),
|
||||||
|
label=legacyParam.get("label"),
|
||||||
|
description=legacyParam.get("description"),
|
||||||
|
uiHint=legacyParam.get("frontendType"),
|
||||||
|
frontendOptions=legacyParam.get("frontendOptions"),
|
||||||
|
visibleWhen=_extractVisibleWhen(legacyParam.get("frontendOptions")),
|
||||||
|
defaultValue=legacyParam.get("default"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _extractVisibleWhen(frontendOptions: Any) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Extract conditional-visibility hint from legacy frontendOptions.showWhen."""
|
||||||
|
if not isinstance(frontendOptions, dict):
|
||||||
|
return None
|
||||||
|
dependsOn = frontendOptions.get("dependsOn")
|
||||||
|
showWhen = frontendOptions.get("showWhen")
|
||||||
|
if not dependsOn or not showWhen:
|
||||||
|
return None
|
||||||
|
return {"actionArg": str(dependsOn), "in": list(showWhen) if isinstance(showWhen, (list, tuple)) else [showWhen]}
|
||||||
|
|
||||||
|
|
||||||
|
def _adapterFromLegacyNode(node: Mapping[str, Any]) -> Optional[NodeAdapter]:
|
||||||
|
"""Build a NodeAdapter view from a legacy node dict.
|
||||||
|
|
||||||
|
Returns None for framework-primitive nodes (no _method/_action binding).
|
||||||
|
Pure projection — no validation, no Action-signature lookup.
|
||||||
|
"""
|
||||||
|
if not _isMethodBoundNode(node):
|
||||||
|
return None
|
||||||
|
|
||||||
|
bindsAction = _bindsActionFromLegacy(node)
|
||||||
|
if not bindsAction:
|
||||||
|
return None
|
||||||
|
|
||||||
|
inputAccepts = _projectInputAccepts(node)
|
||||||
|
|
||||||
|
return NodeAdapter(
|
||||||
|
nodeId=str(node.get("id", "")),
|
||||||
|
bindsAction=bindsAction,
|
||||||
|
category=str(node.get("category", "")),
|
||||||
|
label=node.get("label", ""),
|
||||||
|
description=node.get("description", ""),
|
||||||
|
userParams=[_userParamFromLegacyParam(p) for p in (node.get("parameters") or [])],
|
||||||
|
contextParams={},
|
||||||
|
inputs=int(node.get("inputs", 1)),
|
||||||
|
outputs=int(node.get("outputs", 1)),
|
||||||
|
inputAccepts=inputAccepts,
|
||||||
|
outputLabels=node.get("outputLabels"),
|
||||||
|
meta=dict(node.get("meta") or {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _projectInputAccepts(node: Mapping[str, Any]) -> List[List[str]]:
|
||||||
|
"""Convert legacy `inputPorts` dict-of-dicts into a per-port `accepts` list."""
|
||||||
|
inputPorts = node.get("inputPorts") or {}
|
||||||
|
if not isinstance(inputPorts, dict):
|
||||||
|
return []
|
||||||
|
inputs = int(node.get("inputs", 0) or 0)
|
||||||
|
if inputs <= 0:
|
||||||
|
return []
|
||||||
|
out: List[List[str]] = []
|
||||||
|
for portIdx in range(inputs):
|
||||||
|
portCfg = inputPorts.get(portIdx) or inputPorts.get(str(portIdx)) or {}
|
||||||
|
accepts = portCfg.get("accepts") if isinstance(portCfg, dict) else None
|
||||||
|
out.append(list(accepts) if isinstance(accepts, (list, tuple)) else [])
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def _projectAllAdapters(staticNodes: List[Mapping[str, Any]]) -> Dict[str, NodeAdapter]:
|
||||||
|
"""Project a list of legacy node dicts into a {nodeId: NodeAdapter} map.
|
||||||
|
|
||||||
|
Framework-primitive nodes (no Action binding) are silently skipped.
|
||||||
|
"""
|
||||||
|
out: Dict[str, NodeAdapter] = {}
|
||||||
|
for node in staticNodes:
|
||||||
|
adapter = _adapterFromLegacyNode(node)
|
||||||
|
if adapter is not None:
|
||||||
|
out[adapter.nodeId] = adapter
|
||||||
|
return out
|
||||||
|
|
@ -12,19 +12,19 @@ AI_NODES = [
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea",
|
{"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea",
|
||||||
"description": t("KI-Prompt")},
|
"description": t("KI-Prompt")},
|
||||||
{"name": "outputFormat", "type": "string", "required": False, "frontendType": "select",
|
{"name": "resultType", "type": "string", "required": False, "frontendType": "select",
|
||||||
"frontendOptions": {"options": ["text", "json", "emailDraft"]},
|
"frontendOptions": {"options": ["txt", "json", "md", "csv", "xml", "html", "pdf", "docx", "xlsx", "pptx", "png", "jpg"]},
|
||||||
"description": t("Ausgabeformat"), "default": "text"},
|
"description": t("Ausgabeformat"), "default": "txt"},
|
||||||
{"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
|
{"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
|
||||||
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
|
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
|
||||||
{"name": "context", "type": "string", "required": False, "frontendType": "hidden",
|
|
||||||
"description": t("Kontext-Daten (via Wire oder DataRef)"), "default": ""},
|
|
||||||
{"name": "simpleMode", "type": "boolean", "required": False, "frontendType": "checkbox",
|
{"name": "simpleMode", "type": "boolean", "required": False, "frontendType": "checkbox",
|
||||||
"description": t("Einfacher Modus"), "default": True},
|
"description": t("Einfacher Modus"), "default": True},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["DocumentList", "AiResult", "TextResult", "Transit"]}},
|
"inputPorts": {0: {"accepts": [
|
||||||
|
"DocumentList", "AiResult", "TextResult", "Transit", "LoopItem", "ActionResult",
|
||||||
|
]}},
|
||||||
"outputPorts": {0: {"schema": "AiResult"}},
|
"outputPorts": {0: {"schema": "AiResult"}},
|
||||||
"meta": {"icon": "mdi-robot", "color": "#9C27B0", "usesAi": True},
|
"meta": {"icon": "mdi-robot", "color": "#9C27B0", "usesAi": True},
|
||||||
"_method": "ai",
|
"_method": "ai",
|
||||||
|
|
@ -53,9 +53,11 @@ AI_NODES = [
|
||||||
"label": t("Dokument zusammenfassen"),
|
"label": t("Dokument zusammenfassen"),
|
||||||
"description": t("Dokumentinhalt zusammenfassen"),
|
"description": t("Dokumentinhalt zusammenfassen"),
|
||||||
"parameters": [
|
"parameters": [
|
||||||
|
{"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
|
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
|
||||||
{"name": "summaryLength", "type": "string", "required": False, "frontendType": "select",
|
{"name": "summaryLength", "type": "string", "required": False, "frontendType": "select",
|
||||||
"frontendOptions": {"options": ["short", "medium", "long"]},
|
"frontendOptions": {"options": ["brief", "medium", "detailed"]},
|
||||||
"description": t("Kurz, mittel oder lang"), "default": "medium"},
|
"description": t("Kurz, mittel oder ausführlich"), "default": "medium"},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
@ -71,9 +73,10 @@ AI_NODES = [
|
||||||
"label": t("Dokument übersetzen"),
|
"label": t("Dokument übersetzen"),
|
||||||
"description": t("Dokument in Zielsprache übersetzen"),
|
"description": t("Dokument in Zielsprache übersetzen"),
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{"name": "targetLanguage", "type": "string", "required": True, "frontendType": "select",
|
{"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
"frontendOptions": {"options": ["en", "de", "fr", "it", "es", "pt", "nl"]},
|
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
|
||||||
"description": t("Zielsprache")},
|
{"name": "targetLanguage", "type": "string", "required": True, "frontendType": "text",
|
||||||
|
"description": t("Zielsprache (z.B. de, en, French)")},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
@ -89,8 +92,10 @@ AI_NODES = [
|
||||||
"label": t("Dokument konvertieren"),
|
"label": t("Dokument konvertieren"),
|
||||||
"description": t("Dokument in anderes Format konvertieren"),
|
"description": t("Dokument in anderes Format konvertieren"),
|
||||||
"parameters": [
|
"parameters": [
|
||||||
|
{"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
|
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
|
||||||
{"name": "targetFormat", "type": "string", "required": True, "frontendType": "select",
|
{"name": "targetFormat", "type": "string", "required": True, "frontendType": "select",
|
||||||
"frontendOptions": {"options": ["pdf", "docx", "txt", "html", "md"]},
|
"frontendOptions": {"options": ["docx", "pdf", "xlsx", "csv", "txt", "html", "json", "md"]},
|
||||||
"description": t("Zielformat")},
|
"description": t("Zielformat")},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
|
|
@ -126,9 +131,9 @@ AI_NODES = [
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
|
{"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
|
||||||
"description": t("Code-Generierungs-Prompt")},
|
"description": t("Code-Generierungs-Prompt")},
|
||||||
{"name": "language", "type": "string", "required": False, "frontendType": "select",
|
{"name": "resultType", "type": "string", "required": False, "frontendType": "select",
|
||||||
"frontendOptions": {"options": ["python", "javascript", "typescript", "java", "csharp", "go"]},
|
"frontendOptions": {"options": ["py", "js", "ts", "html", "java", "cpp", "txt", "json", "csv", "xml"]},
|
||||||
"description": t("Programmiersprache"), "default": "python"},
|
"description": t("Datei-Endung der erzeugten Code-Datei"), "default": "py"},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
|
||||||
|
|
@ -94,8 +94,6 @@ CLICKUP_NODES = [
|
||||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||||
"frontendOptions": {"authority": "clickup"},
|
"frontendOptions": {"authority": "clickup"},
|
||||||
"description": t("ClickUp-Verbindung")},
|
"description": t("ClickUp-Verbindung")},
|
||||||
{"name": "teamId", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("Workspace")},
|
|
||||||
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList",
|
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList",
|
||||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||||
"description": t("Pfad zur Liste")},
|
"description": t("Pfad zur Liste")},
|
||||||
|
|
@ -144,10 +142,8 @@ CLICKUP_NODES = [
|
||||||
"description": t("Task-ID")},
|
"description": t("Task-ID")},
|
||||||
{"name": "path", "type": "string", "required": False, "frontendType": "text",
|
{"name": "path", "type": "string", "required": False, "frontendType": "text",
|
||||||
"description": t("Oder Pfad")},
|
"description": t("Oder Pfad")},
|
||||||
{"name": "taskUpdateEntries", "type": "object", "required": False, "frontendType": "keyValueRows",
|
|
||||||
"description": t("Zu ändernde Felder")},
|
|
||||||
{"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json",
|
{"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json",
|
||||||
"description": t("JSON für API")},
|
"description": t("JSON-Body für PUT /task/{id}, z.B. {\"name\":\"...\",\"status\":\"...\"}")},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
@ -172,6 +168,8 @@ CLICKUP_NODES = [
|
||||||
"description": t("Oder Pfad")},
|
"description": t("Oder Pfad")},
|
||||||
{"name": "fileName", "type": "string", "required": False, "frontendType": "text",
|
{"name": "fileName", "type": "string", "required": False, "frontendType": "text",
|
||||||
"description": t("Dateiname")},
|
"description": t("Dateiname")},
|
||||||
|
{"name": "content", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
|
"description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
|
||||||
|
|
@ -10,14 +10,13 @@ CONTEXT_NODES = [
|
||||||
"label": t("Inhalt extrahieren"),
|
"label": t("Inhalt extrahieren"),
|
||||||
"description": t("Dokumentstruktur extrahieren ohne KI (Seiten, Abschnitte, Bilder, Tabellen)"),
|
"description": t("Dokumentstruktur extrahieren ohne KI (Seiten, Abschnitte, Bilder, Tabellen)"),
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{"name": "outputDetail", "type": "string", "required": False, "frontendType": "select",
|
{"name": "documentList", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
"frontendOptions": {"options": ["full", "structure", "references"]},
|
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
|
||||||
"description": t("Detailgrad: full = alles, structure = Skelett, references = Dateireferenzen"),
|
{"name": "extractionOptions", "type": "object", "required": False, "frontendType": "json",
|
||||||
"default": "full"},
|
"description": t(
|
||||||
{"name": "includeImages", "type": "boolean", "required": False, "frontendType": "checkbox",
|
"Extraktions-Optionen (JSON), z.B. {\"includeImages\": true, \"includeTables\": true, "
|
||||||
"description": t("Bilder extrahieren"), "default": True},
|
"\"outputDetail\": \"full\"}"),
|
||||||
{"name": "includeTables", "type": "boolean", "required": False, "frontendType": "checkbox",
|
"default": {}},
|
||||||
"description": t("Tabellen extrahieren"), "default": True},
|
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
|
||||||
|
|
@ -16,27 +16,11 @@ DATA_NODES = [
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["Transit"]}},
|
"inputPorts": {0: {"accepts": ["Transit", "AiResult", "LoopItem"]}},
|
||||||
"outputPorts": {0: {"schema": "AggregateResult"}},
|
"outputPorts": {0: {"schema": "AggregateResult"}},
|
||||||
"executor": "data",
|
"executor": "data",
|
||||||
"meta": {"icon": "mdi-playlist-plus", "color": "#607D8B", "usesAi": False},
|
"meta": {"icon": "mdi-playlist-plus", "color": "#607D8B", "usesAi": False},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "data.transform",
|
|
||||||
"category": "data",
|
|
||||||
"label": t("Umwandeln"),
|
|
||||||
"description": t("Daten umstrukturieren"),
|
|
||||||
"parameters": [
|
|
||||||
{"name": "mappings", "type": "json", "required": True, "frontendType": "mappingTable",
|
|
||||||
"description": t("Feld-Zuordnungen"), "default": []},
|
|
||||||
],
|
|
||||||
"inputs": 1,
|
|
||||||
"outputs": 1,
|
|
||||||
"inputPorts": {0: {"accepts": ["Transit"]}},
|
|
||||||
"outputPorts": {0: {"schema": "ActionResult", "dynamic": True, "deriveFrom": "mappings"}},
|
|
||||||
"executor": "data",
|
|
||||||
"meta": {"icon": "mdi-swap-horizontal-bold", "color": "#607D8B", "usesAi": False},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "data.filter",
|
"id": "data.filter",
|
||||||
"category": "data",
|
"category": "data",
|
||||||
|
|
|
||||||
|
|
@ -17,14 +17,8 @@ EMAIL_NODES = [
|
||||||
"description": t("Ordner"), "default": "Inbox"},
|
"description": t("Ordner"), "default": "Inbox"},
|
||||||
{"name": "limit", "type": "number", "required": False, "frontendType": "number",
|
{"name": "limit", "type": "number", "required": False, "frontendType": "number",
|
||||||
"description": t("Max E-Mails"), "default": 100},
|
"description": t("Max E-Mails"), "default": 100},
|
||||||
{"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("Nur von dieser Adresse"), "default": ""},
|
|
||||||
{"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("Betreff muss enthalten"), "default": ""},
|
|
||||||
{"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
|
|
||||||
"description": t("Nur mit Anhängen"), "default": False},
|
|
||||||
{"name": "filter", "type": "string", "required": False, "frontendType": "text",
|
{"name": "filter", "type": "string", "required": False, "frontendType": "text",
|
||||||
"description": t("Erweitert: Filter-Text"), "default": ""},
|
"description": t("Filter-Ausdruck (z.B. 'from:max@example.com hasAttachment:true betreff')"), "default": ""},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
@ -43,24 +37,12 @@ EMAIL_NODES = [
|
||||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||||
"frontendOptions": {"authority": "msft"},
|
"frontendOptions": {"authority": "msft"},
|
||||||
"description": t("E-Mail-Konto Verbindung")},
|
"description": t("E-Mail-Konto Verbindung")},
|
||||||
{"name": "query", "type": "string", "required": False, "frontendType": "text",
|
{"name": "query", "type": "string", "required": True, "frontendType": "text",
|
||||||
"description": t("Suchbegriff"), "default": ""},
|
"description": t("Suchausdruck (z.B. 'from:max@example.com hasAttachments:true Rechnung')")},
|
||||||
{"name": "folder", "type": "string", "required": False, "frontendType": "text",
|
{"name": "folder", "type": "string", "required": False, "frontendType": "text",
|
||||||
"description": t("Ordner"), "default": "Inbox"},
|
"description": t("Ordner"), "default": "All"},
|
||||||
{"name": "limit", "type": "number", "required": False, "frontendType": "number",
|
{"name": "limit", "type": "number", "required": False, "frontendType": "number",
|
||||||
"description": t("Max E-Mails"), "default": 100},
|
"description": t("Max E-Mails"), "default": 100},
|
||||||
{"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("Von Adresse"), "default": ""},
|
|
||||||
{"name": "toAddress", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("An Adresse"), "default": ""},
|
|
||||||
{"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("Betreff enthält"), "default": ""},
|
|
||||||
{"name": "bodyContains", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("Inhalt enthält"), "default": ""},
|
|
||||||
{"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
|
|
||||||
"description": t("Mit Anhängen"), "default": False},
|
|
||||||
{"name": "filter", "type": "string", "required": False, "frontendType": "text",
|
|
||||||
"description": t("Erweitert: KQL-Filter"), "default": ""},
|
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
@ -74,22 +56,24 @@ EMAIL_NODES = [
|
||||||
"id": "email.draftEmail",
|
"id": "email.draftEmail",
|
||||||
"category": "email",
|
"category": "email",
|
||||||
"label": t("E-Mail entwerfen"),
|
"label": t("E-Mail entwerfen"),
|
||||||
"description": t("E-Mail-Entwurf erstellen"),
|
"description": t(
|
||||||
|
"AI-gestützt einen E-Mail-Entwurf aus Kontext und optionalen Dokumenten erstellen"),
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
|
||||||
"frontendOptions": {"authority": "msft"},
|
"frontendOptions": {"authority": "msft"},
|
||||||
"description": t("E-Mail-Konto")},
|
"description": t("E-Mail-Konto")},
|
||||||
{"name": "subject", "type": "string", "required": True, "frontendType": "text",
|
{"name": "context", "type": "string", "required": False, "frontendType": "textarea",
|
||||||
"description": t("Betreff")},
|
"description": t("Kontext / Brief-Beschreibung für die KI-Komposition"), "default": ""},
|
||||||
{"name": "body", "type": "string", "required": True, "frontendType": "textarea",
|
|
||||||
"description": t("Inhalt")},
|
|
||||||
{"name": "to", "type": "string", "required": False, "frontendType": "text",
|
{"name": "to", "type": "string", "required": False, "frontendType": "text",
|
||||||
"description": t("Empfänger"), "default": ""},
|
"description": t("Empfänger (komma-separiert, optional für Entwurf)"), "default": ""},
|
||||||
{"name": "attachments", "type": "json", "required": False, "frontendType": "attachmentBuilder",
|
{"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
|
||||||
"description": t(
|
"description": t("Anhang-Dokumente (via Wire oder DataRef)"), "default": ""},
|
||||||
"Anhänge: Liste von { contentRef | csvFromVariable | base64Content, name, mimeType }. "
|
{"name": "emailContent", "type": "string", "required": False, "frontendType": "hidden",
|
||||||
"Per Wire befüllbar (z.B. CSV aus data.consolidate)."),
|
"description": t("Direkt vorbereiteter Inhalt {subject, body, to} (via Wire — überspringt KI)"),
|
||||||
"default": []},
|
"default": ""},
|
||||||
|
{"name": "emailStyle", "type": "string", "required": False, "frontendType": "select",
|
||||||
|
"frontendOptions": {"options": ["formal", "casual", "business"]},
|
||||||
|
"description": t("Stil"), "default": "business"},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,9 @@ FLOW_NODES = [
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["Transit", "UdmDocument"]}},
|
"inputPorts": {0: {"accepts": [
|
||||||
|
"Transit", "UdmDocument", "EmailList", "DocumentList", "FileList", "TaskList", "ActionResult",
|
||||||
|
]}},
|
||||||
"outputPorts": {0: {"schema": "LoopItem"}},
|
"outputPorts": {0: {"schema": "LoopItem"}},
|
||||||
"executor": "flow",
|
"executor": "flow",
|
||||||
"meta": {"icon": "mdi-repeat", "color": "#FF9800", "usesAi": False},
|
"meta": {"icon": "mdi-repeat", "color": "#FF9800", "usesAi": False},
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ INPUT_NODES = [
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["Transit"]}},
|
"inputPorts": {0: {"accepts": ["Transit"]}},
|
||||||
"outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "fields"}},
|
"outputPorts": {0: {"schema": {"kind": "fromGraph", "parameter": "fields"}}},
|
||||||
"executor": "input",
|
"executor": "input",
|
||||||
"meta": {"icon": "mdi-form-textbox", "color": "#9C27B0", "usesAi": False},
|
"meta": {"icon": "mdi-form-textbox", "color": "#9C27B0", "usesAi": False},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ SHAREPOINT_NODES = [
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
|
"inputPorts": {0: {"accepts": ["FileList", "Transit", "LoopItem"]}},
|
||||||
"outputPorts": {0: {"schema": "DocumentList"}},
|
"outputPorts": {0: {"schema": "DocumentList"}},
|
||||||
"meta": {"icon": "mdi-file-document", "color": "#0078D4", "usesAi": False},
|
"meta": {"icon": "mdi-file-document", "color": "#0078D4", "usesAi": False},
|
||||||
"_method": "sharepoint",
|
"_method": "sharepoint",
|
||||||
|
|
@ -61,6 +61,8 @@ SHAREPOINT_NODES = [
|
||||||
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder",
|
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder",
|
||||||
"frontendOptions": {"dependsOn": "connectionReference"},
|
"frontendOptions": {"dependsOn": "connectionReference"},
|
||||||
"description": t("Zielordner-Pfad")},
|
"description": t("Zielordner-Pfad")},
|
||||||
|
{"name": "content", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
|
"description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
|
|
@ -106,7 +108,7 @@ SHAREPOINT_NODES = [
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
|
"inputPorts": {0: {"accepts": ["FileList", "Transit", "LoopItem"]}},
|
||||||
"outputPorts": {0: {"schema": "DocumentList"}},
|
"outputPorts": {0: {"schema": "DocumentList"}},
|
||||||
"meta": {"icon": "mdi-download", "color": "#0078D4", "usesAi": False},
|
"meta": {"icon": "mdi-download", "color": "#0078D4", "usesAi": False},
|
||||||
"_method": "sharepoint",
|
"_method": "sharepoint",
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ TRIGGER_NODES = [
|
||||||
"inputs": 0,
|
"inputs": 0,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {},
|
"inputPorts": {},
|
||||||
"outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "formFields"}},
|
"outputPorts": {0: {"schema": {"kind": "fromGraph", "parameter": "formFields"}}},
|
||||||
"executor": "trigger",
|
"executor": "trigger",
|
||||||
"meta": {"icon": "mdi-form-select", "color": "#9C27B0", "usesAi": False},
|
"meta": {"icon": "mdi-form-select", "color": "#9C27B0", "usesAi": False},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -46,8 +46,11 @@ TRUSTEE_NODES = [
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
|
"inputPorts": {0: {"accepts": ["DocumentList", "Transit", "AiResult", "LoopItem", "ActionResult"]}},
|
||||||
"outputPorts": {0: {"schema": "DocumentList"}},
|
# Runtime returns ActionResult.isSuccess(documents=[...]) — see
|
||||||
|
# actions/extractFromFiles.py. Declaring DocumentList here was adapter
|
||||||
|
# drift and broke the DataPicker for downstream nodes.
|
||||||
|
"outputPorts": {0: {"schema": "ActionResult"}},
|
||||||
"meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50", "usesAi": True},
|
"meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50", "usesAi": True},
|
||||||
"_method": "trustee",
|
"_method": "trustee",
|
||||||
"_action": "extractFromFiles",
|
"_action": "extractFromFiles",
|
||||||
|
|
@ -58,14 +61,17 @@ TRUSTEE_NODES = [
|
||||||
"label": t("Dokumente verarbeiten"),
|
"label": t("Dokumente verarbeiten"),
|
||||||
"description": t("TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen."),
|
"description": t("TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen."),
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
|
# Type matches what producers actually emit: ActionResult.documents
|
||||||
"description": t("Automatisch via Wire-Verbindung befüllt")},
|
# is `List[ActionDocument]` (see datamodelChat.ActionResult). The
|
||||||
|
# DataPicker uses this string to filter compatible upstream paths.
|
||||||
|
{"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
|
||||||
|
"description": t("Dokumentenliste eines Upstream-Producers (z.B. trustee.extractFromFiles → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")},
|
||||||
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
|
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
"description": t("Trustee Feature-Instanz-ID")},
|
"description": t("Trustee Feature-Instanz-ID")},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
|
"inputPorts": {0: {"accepts": ["ActionResult", "DocumentList", "Transit"]}},
|
||||||
"outputPorts": {0: {"schema": "ActionResult"}},
|
"outputPorts": {0: {"schema": "ActionResult"}},
|
||||||
"meta": {"icon": "mdi-file-document-check", "color": "#4CAF50", "usesAi": False},
|
"meta": {"icon": "mdi-file-document-check", "color": "#4CAF50", "usesAi": False},
|
||||||
"_method": "trustee",
|
"_method": "trustee",
|
||||||
|
|
@ -77,14 +83,17 @@ TRUSTEE_NODES = [
|
||||||
"label": t("In Buchhaltung synchronisieren"),
|
"label": t("In Buchhaltung synchronisieren"),
|
||||||
"description": t("Trustee-Positionen in Buchhaltungssystem übertragen."),
|
"description": t("Trustee-Positionen in Buchhaltungssystem übertragen."),
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
|
# Type matches what producers actually emit: ActionResult.documents
|
||||||
"description": t("Automatisch via Wire-Verbindung befüllt")},
|
# is `List[ActionDocument]` (see datamodelChat.ActionResult). The
|
||||||
|
# DataPicker uses this string to filter compatible upstream paths.
|
||||||
|
{"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
|
||||||
|
"description": t("Verarbeitete Dokumentenliste eines Upstream-Producers (z.B. trustee.processDocuments → documents); via expliziten DataRef im Graph zu binden — Pick-not-Push, kein Auto-Wire")},
|
||||||
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
|
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
|
||||||
"description": t("Trustee Feature-Instanz-ID")},
|
"description": t("Trustee Feature-Instanz-ID")},
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["Transit"]}},
|
"inputPorts": {0: {"accepts": ["ActionResult", "DocumentList", "Transit"]}},
|
||||||
"outputPorts": {0: {"schema": "ActionResult"}},
|
"outputPorts": {0: {"schema": "ActionResult"}},
|
||||||
"meta": {"icon": "mdi-calculator", "color": "#4CAF50", "usesAi": False},
|
"meta": {"icon": "mdi-calculator", "color": "#4CAF50", "usesAi": False},
|
||||||
"_method": "trustee",
|
"_method": "trustee",
|
||||||
|
|
@ -122,7 +131,7 @@ TRUSTEE_NODES = [
|
||||||
],
|
],
|
||||||
"inputs": 1,
|
"inputs": 1,
|
||||||
"outputs": 1,
|
"outputs": 1,
|
||||||
"inputPorts": {0: {"accepts": ["Transit", "AiResult", "ConsolidateResult"]}},
|
"inputPorts": {0: {"accepts": ["Transit", "AiResult", "ConsolidateResult", "UdmDocument"]}},
|
||||||
"outputPorts": {0: {"schema": "ActionResult"}},
|
"outputPorts": {0: {"schema": "ActionResult"}},
|
||||||
"meta": {"icon": "mdi-database-search", "color": "#4CAF50", "usesAi": False},
|
"meta": {"icon": "mdi-database-search", "color": "#4CAF50", "usesAi": False},
|
||||||
"_method": "trustee",
|
"_method": "trustee",
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,10 @@ Nodes are defined first; IO/method actions are used at execution time.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, List, Any
|
from typing import Dict, List, Any, Optional
|
||||||
|
|
||||||
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
|
from modules.features.graphicalEditor.nodeAdapter import _bindsActionFromLegacy
|
||||||
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES
|
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES
|
||||||
from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText
|
from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText
|
||||||
|
|
||||||
|
|
@ -41,12 +42,21 @@ def _pickFromLangMap(d: Any, lang: str) -> Any:
|
||||||
|
|
||||||
|
|
||||||
def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
|
def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
|
||||||
"""Apply request language via resolveText (t() keys + multilingual dicts)."""
|
"""Apply request language via resolveText (t() keys + multilingual dicts).
|
||||||
|
|
||||||
|
Also exposes Schicht-3 metadata (`bindsAction`) derived from the legacy
|
||||||
|
`_method`/`_action` pair, so frontend consumers can resolve back to the
|
||||||
|
Schicht-2 Action signature without parsing internal underscore-prefixed
|
||||||
|
fields.
|
||||||
|
"""
|
||||||
lang = normalizePrimaryLanguageTag(language, "en")
|
lang = normalizePrimaryLanguageTag(language, "en")
|
||||||
|
bindsAction = _bindsActionFromLegacy(node)
|
||||||
out = dict(node)
|
out = dict(node)
|
||||||
for key in list(out.keys()):
|
for key in list(out.keys()):
|
||||||
if key.startswith("_"):
|
if key.startswith("_"):
|
||||||
del out[key]
|
del out[key]
|
||||||
|
if bindsAction:
|
||||||
|
out["bindsAction"] = bindsAction
|
||||||
lbl = node.get("label")
|
lbl = node.get("label")
|
||||||
if lbl is not None:
|
if lbl is not None:
|
||||||
out["label"] = resolveText(lbl, lang) or node.get("id", "")
|
out["label"] = resolveText(lbl, lang) or node.get("id", "")
|
||||||
|
|
@ -124,3 +134,46 @@ def getNodeTypeToMethodAction() -> Dict[str, tuple]:
|
||||||
if method and action:
|
if method and action:
|
||||||
mapping[node["id"]] = (method, action)
|
mapping[node["id"]] = (method, action)
|
||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
|
def validateAdaptersAgainstMethods(methodInstances: Optional[Dict[str, Any]] = None) -> Optional[str]:
|
||||||
|
"""Run the Schicht-3 Adapter validator (5 drift rules) against the live methods.
|
||||||
|
|
||||||
|
Intended to be called once at startup after methodDiscovery has populated
|
||||||
|
the methods registry. Returns a human-readable report (None when healthy)
|
||||||
|
so the caller decides whether to log, raise, or surface to operators.
|
||||||
|
|
||||||
|
Pass `methodInstances` directly for testability; defaults to importing
|
||||||
|
the live registry from `methodDiscovery.methods`.
|
||||||
|
"""
|
||||||
|
from modules.features.graphicalEditor.adapterValidator import (
|
||||||
|
_buildActionsRegistryFromMethods,
|
||||||
|
_formatAdapterReport,
|
||||||
|
_validateAllAdapters,
|
||||||
|
)
|
||||||
|
|
||||||
|
if methodInstances is None:
|
||||||
|
try:
|
||||||
|
from modules.workflows.processing.shared.methodDiscovery import methods
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Adapter validator skipped: cannot import methodDiscovery (%s)", exc)
|
||||||
|
return None
|
||||||
|
|
||||||
|
methodInstances = {}
|
||||||
|
for fullName, info in (methods or {}).items():
|
||||||
|
shortName = fullName.replace("Method", "").lower() if fullName[:1].isupper() else fullName
|
||||||
|
instance = info.get("instance") if isinstance(info, dict) else None
|
||||||
|
if instance is not None:
|
||||||
|
methodInstances[shortName] = instance
|
||||||
|
|
||||||
|
if not methodInstances:
|
||||||
|
return None
|
||||||
|
|
||||||
|
actionsRegistry = _buildActionsRegistryFromMethods(methodInstances)
|
||||||
|
report = _validateAllAdapters(list(STATIC_NODE_TYPES), actionsRegistry)
|
||||||
|
formatted = _formatAdapterReport(report)
|
||||||
|
if not report.isHealthy:
|
||||||
|
logger.warning("[adapterValidator] %s", formatted)
|
||||||
|
elif report.warnings:
|
||||||
|
logger.info("[adapterValidator] %s", formatted)
|
||||||
|
return formatted
|
||||||
|
|
|
||||||
|
|
@ -4,13 +4,14 @@
|
||||||
Typed Port System for the Graphical Editor.
|
Typed Port System for the Graphical Editor.
|
||||||
|
|
||||||
Defines PortSchema, PORT_TYPE_CATALOG, SYSTEM_VARIABLES,
|
Defines PortSchema, PORT_TYPE_CATALOG, SYSTEM_VARIABLES,
|
||||||
output normalizers, input extractors, and Transit helpers.
|
output normalizers, and Transit helpers.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Any, Callable, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
@ -25,9 +26,14 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class PortField(BaseModel):
|
class PortField(BaseModel):
|
||||||
name: str
|
name: str
|
||||||
type: str # str, int, bool, List[str], List[Document], Dict[str,Any]
|
type: str # str, int, bool, List[str], List[Document], Dict[str,Any], ConnectionRef, …
|
||||||
description: str = ""
|
description: str = ""
|
||||||
required: bool = True
|
required: bool = True
|
||||||
|
enumValues: Optional[List[str]] = None
|
||||||
|
# Marks this field as the discriminator for a Ref-Schema (e.g. ConnectionRef.authority,
|
||||||
|
# FeatureInstanceRef.featureCode). Pickers/validators use it to filter compatible
|
||||||
|
# producers by sub-type. Type must be "str" when discriminator is True.
|
||||||
|
discriminator: bool = False
|
||||||
|
|
||||||
|
|
||||||
class PortSchema(BaseModel):
|
class PortSchema(BaseModel):
|
||||||
|
|
@ -57,13 +63,113 @@ class OutputPortDef(BaseModel):
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
|
PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
# Refs (handles to external resources, pickable by user)
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
"ConnectionRef": PortSchema(name="ConnectionRef", fields=[
|
||||||
|
PortField(name="id", type="str", description="UserConnection.id (UUID)"),
|
||||||
|
PortField(name="authority", type="str", discriminator=True,
|
||||||
|
description="Auth-Provider-Code: msft | clickup | google | …"),
|
||||||
|
PortField(name="label", type="str", required=False, description="Anzeigename"),
|
||||||
|
]),
|
||||||
|
"FeatureInstanceRef": PortSchema(name="FeatureInstanceRef", fields=[
|
||||||
|
PortField(name="id", type="str", description="FeatureInstance.id (UUID)"),
|
||||||
|
PortField(name="featureCode", type="str", discriminator=True,
|
||||||
|
description="Feature-Modul-Code: trustee | redmine | clickup | sharepoint | …"),
|
||||||
|
PortField(name="label", type="str", required=False, description="Anzeigename"),
|
||||||
|
PortField(name="mandateId", type="str", required=False, description="Zugehöriger Mandant"),
|
||||||
|
]),
|
||||||
|
"ClickUpListRef": PortSchema(name="ClickUpListRef", fields=[
|
||||||
|
PortField(name="listId", type="str", description="ClickUp-Listen-ID"),
|
||||||
|
PortField(name="name", type="str", required=False, description="Listenname"),
|
||||||
|
PortField(name="spaceId", type="str", required=False, description="Space-ID"),
|
||||||
|
PortField(name="folderId", type="str", required=False, description="Ordner-ID"),
|
||||||
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
|
description="ClickUp-Verbindung"),
|
||||||
|
]),
|
||||||
|
"PromptTemplateRef": PortSchema(name="PromptTemplateRef", fields=[
|
||||||
|
PortField(name="id", type="str", description="Prompt-Template-ID"),
|
||||||
|
PortField(name="name", type="str", required=False, description="Anzeigename"),
|
||||||
|
PortField(name="version", type="str", required=False, description="Version / Tag"),
|
||||||
|
]),
|
||||||
|
"SharePointFolderRef": PortSchema(name="SharePointFolderRef", fields=[
|
||||||
|
PortField(name="siteUrl", type="str", required=False, description="SharePoint Site"),
|
||||||
|
PortField(name="driveId", type="str", required=False, description="Drive ID"),
|
||||||
|
PortField(name="folderPath", type="str", required=False, description="Ordnerpfad"),
|
||||||
|
PortField(name="label", type="str", required=False, description="Kurzlabel für Picker"),
|
||||||
|
]),
|
||||||
|
"SharePointFileRef": PortSchema(name="SharePointFileRef", fields=[
|
||||||
|
PortField(name="siteUrl", type="str", required=False, description="SharePoint Site"),
|
||||||
|
PortField(name="driveId", type="str", required=False, description="Drive ID"),
|
||||||
|
PortField(name="filePath", type="str", required=False, description="Dateipfad"),
|
||||||
|
PortField(name="fileName", type="str", required=False, description="Dateiname"),
|
||||||
|
PortField(name="label", type="str", required=False, description="Kurzlabel"),
|
||||||
|
]),
|
||||||
|
"Document": PortSchema(name="Document", fields=[
|
||||||
|
PortField(name="id", type="str", required=False, description="Dokument-/Datei-ID"),
|
||||||
|
PortField(name="name", type="str", required=False, description="Anzeigename"),
|
||||||
|
PortField(name="mimeType", type="str", required=False, description="MIME-Typ"),
|
||||||
|
PortField(name="sizeBytes", type="int", required=False, description="Grösse"),
|
||||||
|
PortField(name="downloadUrl", type="str", required=False, description="Download-URL"),
|
||||||
|
PortField(name="filePath", type="str", required=False, description="Logischer Pfad"),
|
||||||
|
]),
|
||||||
|
"FileItem": PortSchema(name="FileItem", fields=[
|
||||||
|
PortField(name="id", type="str", required=False, description="Datei-ID"),
|
||||||
|
PortField(name="name", type="str", required=False, description="Name"),
|
||||||
|
PortField(name="path", type="str", required=False, description="Pfad"),
|
||||||
|
PortField(name="mimeType", type="str", required=False, description="MIME"),
|
||||||
|
PortField(name="sizeBytes", type="int", required=False, description="Grösse"),
|
||||||
|
]),
|
||||||
|
"EmailItem": PortSchema(name="EmailItem", fields=[
|
||||||
|
PortField(name="id", type="str", required=False, description="Message-ID"),
|
||||||
|
PortField(name="subject", type="str", required=False, description="Betreff"),
|
||||||
|
PortField(name="fromAddress", type="str", required=False, description="Absender"),
|
||||||
|
PortField(name="toAddresses", type="List[str]", required=False, description="Empfänger"),
|
||||||
|
PortField(name="receivedAt", type="str", required=False, description="Empfangen am"),
|
||||||
|
PortField(name="hasAttachments", type="bool", required=False, description="Hat Anhänge"),
|
||||||
|
PortField(name="bodyPreview", type="str", required=False, description="Vorschau"),
|
||||||
|
]),
|
||||||
|
"TaskItem": PortSchema(name="TaskItem", fields=[
|
||||||
|
PortField(name="id", type="str", required=False, description="Task-ID"),
|
||||||
|
PortField(name="title", type="str", required=False, description="Titel"),
|
||||||
|
PortField(name="status", type="str", required=False, description="Status"),
|
||||||
|
PortField(name="assignee", type="str", required=False, description="Assignee"),
|
||||||
|
PortField(name="dueDate", type="str", required=False, description="Fälligkeit"),
|
||||||
|
PortField(name="listId", type="str", required=False, description="ClickUp-Liste"),
|
||||||
|
]),
|
||||||
|
"QueryResult": PortSchema(name="QueryResult", fields=[
|
||||||
|
PortField(name="rows", type="List[Any]", description="Ergebniszeilen"),
|
||||||
|
PortField(name="columns", type="List[str]", required=False, description="Spaltennamen"),
|
||||||
|
PortField(name="count", type="int", required=False, description="Zeilenanzahl"),
|
||||||
|
]),
|
||||||
|
"UdmPage": PortSchema(name="UdmPage", fields=[
|
||||||
|
PortField(name="pageNumber", type="int", required=False, description="Seitennummer"),
|
||||||
|
PortField(name="blocks", type="List[Any]", required=False, description="ContentBlocks"),
|
||||||
|
]),
|
||||||
|
"UdmBlock": PortSchema(name="UdmBlock", fields=[
|
||||||
|
PortField(name="kind", type="str", required=False, description="Block-Typ"),
|
||||||
|
PortField(name="text", type="str", required=False, description="Textinhalt"),
|
||||||
|
PortField(name="children", type="List[Any]", required=False, description="Unterblöcke"),
|
||||||
|
]),
|
||||||
"DocumentList": PortSchema(name="DocumentList", fields=[
|
"DocumentList": PortSchema(name="DocumentList", fields=[
|
||||||
PortField(name="documents", type="List[Document]",
|
PortField(name="documents", type="List[Document]",
|
||||||
description="Dokumentenliste"),
|
description="Dokumentenliste"),
|
||||||
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
|
description="Verbindung, mit der die Liste erzeugt wurde"),
|
||||||
|
PortField(name="source", type="SharePointFolderRef", required=False,
|
||||||
|
description="Herkunftsordner / Quelle"),
|
||||||
|
PortField(name="count", type="int", required=False,
|
||||||
|
description="Anzahl Dokumente"),
|
||||||
]),
|
]),
|
||||||
"FileList": PortSchema(name="FileList", fields=[
|
"FileList": PortSchema(name="FileList", fields=[
|
||||||
PortField(name="files", type="List[File]",
|
PortField(name="files", type="List[FileItem]",
|
||||||
description="Dateiliste"),
|
description="Dateiliste"),
|
||||||
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
|
description="Verbindung"),
|
||||||
|
PortField(name="source", type="SharePointFolderRef", required=False,
|
||||||
|
description="Listen-Kontext"),
|
||||||
|
PortField(name="count", type="int", required=False,
|
||||||
|
description="Anzahl Dateien"),
|
||||||
]),
|
]),
|
||||||
"EmailDraft": PortSchema(name="EmailDraft", fields=[
|
"EmailDraft": PortSchema(name="EmailDraft", fields=[
|
||||||
PortField(name="subject", type="str",
|
PortField(name="subject", type="str",
|
||||||
|
|
@ -76,14 +182,26 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
|
||||||
description="CC"),
|
description="CC"),
|
||||||
PortField(name="attachments", type="List[Document]", required=False,
|
PortField(name="attachments", type="List[Document]", required=False,
|
||||||
description="Anhänge"),
|
description="Anhänge"),
|
||||||
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
|
description="Outlook-/Graph-Verbindung"),
|
||||||
]),
|
]),
|
||||||
"EmailList": PortSchema(name="EmailList", fields=[
|
"EmailList": PortSchema(name="EmailList", fields=[
|
||||||
PortField(name="emails", type="List[Email]",
|
PortField(name="emails", type="List[EmailItem]",
|
||||||
description="E-Mails"),
|
description="E-Mails"),
|
||||||
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
|
description="Verbindung"),
|
||||||
|
PortField(name="count", type="int", required=False,
|
||||||
|
description="Anzahl"),
|
||||||
]),
|
]),
|
||||||
"TaskList": PortSchema(name="TaskList", fields=[
|
"TaskList": PortSchema(name="TaskList", fields=[
|
||||||
PortField(name="tasks", type="List[Task]",
|
PortField(name="tasks", type="List[TaskItem]",
|
||||||
description="Aufgaben"),
|
description="Aufgaben"),
|
||||||
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
|
description="Verbindung"),
|
||||||
|
PortField(name="listId", type="str", required=False,
|
||||||
|
description="ClickUp-Listen-ID"),
|
||||||
|
PortField(name="count", type="int", required=False,
|
||||||
|
description="Anzahl"),
|
||||||
]),
|
]),
|
||||||
"TaskResult": PortSchema(name="TaskResult", fields=[
|
"TaskResult": PortSchema(name="TaskResult", fields=[
|
||||||
PortField(name="success", type="bool",
|
PortField(name="success", type="bool",
|
||||||
|
|
@ -143,11 +261,29 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
|
||||||
PortField(name="merged", type="Dict",
|
PortField(name="merged", type="Dict",
|
||||||
description="Zusammengeführte Daten"),
|
description="Zusammengeführte Daten"),
|
||||||
]),
|
]),
|
||||||
|
"ActionDocument": PortSchema(name="ActionDocument", fields=[
|
||||||
|
PortField(name="documentName", type="str",
|
||||||
|
description="Dokumentname"),
|
||||||
|
PortField(name="documentData", type="Any",
|
||||||
|
description="Inhalt / Rohdaten (z.B. JSON-String, Bytes)"),
|
||||||
|
PortField(name="mimeType", type="str",
|
||||||
|
description="MIME-Typ"),
|
||||||
|
PortField(name="fileId", type="str", required=False,
|
||||||
|
description="Persistierte FileItem.id (vom Engine ergänzt)"),
|
||||||
|
PortField(name="fileName", type="str", required=False,
|
||||||
|
description="Persistierter Dateiname (vom Engine ergänzt)"),
|
||||||
|
]),
|
||||||
"ActionResult": PortSchema(name="ActionResult", fields=[
|
"ActionResult": PortSchema(name="ActionResult", fields=[
|
||||||
PortField(name="success", type="bool",
|
PortField(name="success", type="bool",
|
||||||
description="Erfolg"),
|
description="Erfolg"),
|
||||||
PortField(name="error", type="str", required=False,
|
PortField(name="error", type="str", required=False,
|
||||||
description="Fehler"),
|
description="Fehler"),
|
||||||
|
# `documents` is populated for every action that returns ActionResult
|
||||||
|
# (see datamodelChat.ActionResult.documents and actionNodeExecutor.out).
|
||||||
|
# Without it in the catalog the DataPicker cannot offer downstream
|
||||||
|
# bindings like `processDocuments → documents → *` for syncToAccounting.
|
||||||
|
PortField(name="documents", type="List[ActionDocument]", required=False,
|
||||||
|
description="Erzeugte Dokumente (immer befüllt für Trustee/AI/Email/...)"),
|
||||||
PortField(name="data", type="Dict", required=False,
|
PortField(name="data", type="Dict", required=False,
|
||||||
description="Ergebnisdaten"),
|
description="Ergebnisdaten"),
|
||||||
]),
|
]),
|
||||||
|
|
@ -156,7 +292,11 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
|
||||||
PortField(name="id", type="str", description="Dokument-ID"),
|
PortField(name="id", type="str", description="Dokument-ID"),
|
||||||
PortField(name="sourceType", type="str", description="Quellformat (pdf, docx, …)"),
|
PortField(name="sourceType", type="str", description="Quellformat (pdf, docx, …)"),
|
||||||
PortField(name="sourcePath", type="str", description="Quellpfad"),
|
PortField(name="sourcePath", type="str", description="Quellpfad"),
|
||||||
PortField(name="children", type="List[Any]", description="StructuralNodes"),
|
PortField(name="children", type="List[Any]", description="StructuralNodes / Seiten"),
|
||||||
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
|
description="Optionale Verbindungsreferenz"),
|
||||||
|
PortField(name="source", type="SharePointFileRef", required=False,
|
||||||
|
description="Optionale Datei-Herkunft"),
|
||||||
]),
|
]),
|
||||||
"UdmNodeList": PortSchema(name="UdmNodeList", fields=[
|
"UdmNodeList": PortSchema(name="UdmNodeList", fields=[
|
||||||
PortField(name="nodes", type="List[Any]", description="UDM StructuralNodes oder ContentBlocks"),
|
PortField(name="nodes", type="List[Any]", description="UDM StructuralNodes oder ContentBlocks"),
|
||||||
|
|
@ -167,9 +307,287 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
|
||||||
PortField(name="mode", type="str", description="Konsolidierungsmodus"),
|
PortField(name="mode", type="str", description="Konsolidierungsmodus"),
|
||||||
PortField(name="count", type="int", description="Anzahl verarbeiteter Elemente"),
|
PortField(name="count", type="int", description="Anzahl verarbeiteter Elemente"),
|
||||||
]),
|
]),
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
# Shared sub-types (used inside Result schemas)
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
"ProcessError": PortSchema(name="ProcessError", fields=[
|
||||||
|
PortField(name="documentId", type="str", required=False,
|
||||||
|
description="Betroffenes Dokument (falls zuordbar)"),
|
||||||
|
PortField(name="stage", type="str",
|
||||||
|
description="Pipeline-Stufe: extract | parse | sync | validate | …"),
|
||||||
|
PortField(name="message", type="str", description="Fehlermeldung"),
|
||||||
|
PortField(name="code", type="str", required=False, description="Fehler-Code"),
|
||||||
|
]),
|
||||||
|
"JournalLine": PortSchema(name="JournalLine", fields=[
|
||||||
|
PortField(name="id", type="str", required=False, description="Buchungszeilen-ID"),
|
||||||
|
PortField(name="bookingDate", type="str", description="Buchungsdatum (ISO)"),
|
||||||
|
PortField(name="account", type="str", description="Konto"),
|
||||||
|
PortField(name="contraAccount", type="str", required=False, description="Gegenkonto"),
|
||||||
|
PortField(name="amount", type="float", description="Betrag"),
|
||||||
|
PortField(name="currency", type="str", required=False, description="Währung"),
|
||||||
|
PortField(name="text", type="str", required=False, description="Buchungstext"),
|
||||||
|
PortField(name="reference", type="str", required=False, description="Beleg-Referenz"),
|
||||||
|
]),
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
# Trustee Action Results
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
"TrusteeRefreshResult": PortSchema(name="TrusteeRefreshResult", fields=[
|
||||||
|
PortField(name="syncCounts", type="Dict[str,int]",
|
||||||
|
description="Tabellen → Anzahl synchronisierter Datensätze"),
|
||||||
|
PortField(name="oldestBookingDate", type="str", required=False,
|
||||||
|
description="Ältestes Buchungsdatum (ISO)"),
|
||||||
|
PortField(name="newestBookingDate", type="str", required=False,
|
||||||
|
description="Neuestes Buchungsdatum (ISO)"),
|
||||||
|
PortField(name="durationMs", type="int", required=False,
|
||||||
|
description="Dauer in Millisekunden"),
|
||||||
|
PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
|
||||||
|
description="Trustee-Instanz"),
|
||||||
|
PortField(name="errors", type="List[ProcessError]", required=False,
|
||||||
|
description="Fehler-Liste"),
|
||||||
|
]),
|
||||||
|
"TrusteeProcessResult": PortSchema(name="TrusteeProcessResult", fields=[
|
||||||
|
PortField(name="documents", type="List[Document]",
|
||||||
|
description="Verarbeitete Dokumente mit angereicherten Daten"),
|
||||||
|
PortField(name="processedCount", type="int", required=False,
|
||||||
|
description="Anzahl erfolgreich verarbeiteter Dokumente"),
|
||||||
|
PortField(name="failedCount", type="int", required=False,
|
||||||
|
description="Anzahl fehlgeschlagener Dokumente"),
|
||||||
|
PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
|
||||||
|
description="Trustee-Instanz"),
|
||||||
|
PortField(name="errors", type="List[ProcessError]", required=False,
|
||||||
|
description="Fehler-Liste"),
|
||||||
|
]),
|
||||||
|
"TrusteeSyncResult": PortSchema(name="TrusteeSyncResult", fields=[
|
||||||
|
PortField(name="syncedCount", type="int",
|
||||||
|
description="Erfolgreich in das Buchhaltungssystem übertragene Datensätze"),
|
||||||
|
PortField(name="failedCount", type="int", required=False,
|
||||||
|
description="Fehlgeschlagene Übertragungen"),
|
||||||
|
PortField(name="journalLines", type="List[JournalLine]", required=False,
|
||||||
|
description="Erzeugte Buchungszeilen"),
|
||||||
|
PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
|
||||||
|
description="Ziel-Trustee-Instanz"),
|
||||||
|
PortField(name="errors", type="List[ProcessError]", required=False,
|
||||||
|
description="Fehler-Liste"),
|
||||||
|
]),
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
# Redmine Action Results
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
"RedmineTicket": PortSchema(name="RedmineTicket", fields=[
|
||||||
|
PortField(name="id", type="str", description="Ticket-ID"),
|
||||||
|
PortField(name="subject", type="str", description="Betreff"),
|
||||||
|
PortField(name="description", type="str", required=False, description="Beschreibung"),
|
||||||
|
PortField(name="status", type="str", description="Status-Name"),
|
||||||
|
PortField(name="tracker", type="str", required=False,
|
||||||
|
description="Tracker (Bug, Feature, Task, …)"),
|
||||||
|
PortField(name="priority", type="str", required=False, description="Priorität"),
|
||||||
|
PortField(name="assignee", type="str", required=False, description="Zugewiesen an"),
|
||||||
|
PortField(name="author", type="str", required=False, description="Autor"),
|
||||||
|
PortField(name="project", type="str", required=False, description="Projekt"),
|
||||||
|
PortField(name="createdOn", type="str", required=False, description="Erstellt (ISO)"),
|
||||||
|
PortField(name="updatedOn", type="str", required=False, description="Aktualisiert (ISO)"),
|
||||||
|
PortField(name="dueDate", type="str", required=False, description="Fälligkeitsdatum"),
|
||||||
|
PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
|
||||||
|
description="Redmine-Instanz"),
|
||||||
|
]),
|
||||||
|
"RedmineTicketList": PortSchema(name="RedmineTicketList", fields=[
|
||||||
|
PortField(name="tickets", type="List[RedmineTicket]", description="Ticket-Liste"),
|
||||||
|
PortField(name="count", type="int", required=False, description="Anzahl Tickets"),
|
||||||
|
PortField(name="filters", type="Dict[str,Any]", required=False,
|
||||||
|
description="Angewendete Filter"),
|
||||||
|
PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
|
||||||
|
description="Redmine-Instanz"),
|
||||||
|
]),
|
||||||
|
"RedmineStats": PortSchema(name="RedmineStats", fields=[
|
||||||
|
PortField(name="kpis", type="Dict[str,Any]",
|
||||||
|
description="Key Performance Indicators"),
|
||||||
|
PortField(name="throughput", type="Dict[str,Any]", required=False,
|
||||||
|
description="Durchsatz pro Zeitraum"),
|
||||||
|
PortField(name="statusDistribution", type="Dict[str,int]", required=False,
|
||||||
|
description="Tickets pro Status"),
|
||||||
|
PortField(name="backlog", type="Dict[str,Any]", required=False,
|
||||||
|
description="Backlog-Statistik"),
|
||||||
|
PortField(name="featureInstance", type="FeatureInstanceRef", required=False,
|
||||||
|
description="Redmine-Instanz"),
|
||||||
|
]),
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
# ClickUp / SharePoint / Email helper results
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
"TaskAttachmentRef": PortSchema(name="TaskAttachmentRef", fields=[
|
||||||
|
PortField(name="taskId", type="str", description="Aufgaben-ID"),
|
||||||
|
PortField(name="attachmentId", type="str", required=False, description="Attachment-ID"),
|
||||||
|
PortField(name="fileName", type="str", required=False, description="Dateiname"),
|
||||||
|
PortField(name="url", type="str", required=False, description="Download-URL"),
|
||||||
|
]),
|
||||||
|
"AttachmentSpec": PortSchema(name="AttachmentSpec", fields=[
|
||||||
|
PortField(name="source", type="str",
|
||||||
|
description="Quellart: path | document | url",
|
||||||
|
enumValues=["path", "document", "url"]),
|
||||||
|
PortField(name="ref", type="str",
|
||||||
|
description="Referenzwert (Pfad / Document.id / URL)"),
|
||||||
|
PortField(name="fileName", type="str", required=False,
|
||||||
|
description="Override-Dateiname"),
|
||||||
|
PortField(name="mimeType", type="str", required=False, description="MIME-Override"),
|
||||||
|
]),
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
# Expressions (replace string-typed condition / cron params)
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
"CronExpression": PortSchema(name="CronExpression", fields=[
|
||||||
|
PortField(name="expression", type="str",
|
||||||
|
description="Cron-Ausdruck (5 oder 6 Felder)"),
|
||||||
|
PortField(name="timezone", type="str", required=False,
|
||||||
|
description="IANA Timezone (z.B. Europe/Zurich)"),
|
||||||
|
]),
|
||||||
|
"ConditionExpression": PortSchema(name="ConditionExpression", fields=[
|
||||||
|
PortField(name="expression", type="str", description="Boolescher Ausdruck"),
|
||||||
|
PortField(name="syntax", type="str", required=False,
|
||||||
|
description="jmespath | jsonlogic | python | template",
|
||||||
|
enumValues=["jmespath", "jsonlogic", "python", "template"]),
|
||||||
|
]),
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
# Semantic primitives (give meaning to scalar str values)
|
||||||
|
# -----------------------------------------------------------------
|
||||||
|
"DateTime": PortSchema(name="DateTime", fields=[
|
||||||
|
PortField(name="iso", type="str", description="ISO-8601 Datum/Zeit"),
|
||||||
|
PortField(name="timezone", type="str", required=False,
|
||||||
|
description="IANA Timezone"),
|
||||||
|
]),
|
||||||
|
"Url": PortSchema(name="Url", fields=[
|
||||||
|
PortField(name="url", type="str", description="Vollständige URL"),
|
||||||
|
PortField(name="label", type="str", required=False, description="Anzeigename"),
|
||||||
|
]),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Catalog validator
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Primitives accepted as PortField.type in addition to catalog schema names.
|
||||||
|
PRIMITIVE_TYPES: frozenset = frozenset({
|
||||||
|
"str", "int", "bool", "float", "Any", "Dict", "List",
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
def _stripContainer(typeStr: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Extract referenced type names from a PortField.type string.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
"str" -> ["str"]
|
||||||
|
"List[Document]" -> ["Document"]
|
||||||
|
"Dict[str,Any]" -> ["str", "Any"]
|
||||||
|
"ConnectionRef" -> ["ConnectionRef"]
|
||||||
|
"List[ProcessError]" -> ["ProcessError"]
|
||||||
|
"""
|
||||||
|
s = (typeStr or "").strip()
|
||||||
|
if not s:
|
||||||
|
return []
|
||||||
|
if "[" in s and s.endswith("]"):
|
||||||
|
# outer container ignored, inner parts split by comma
|
||||||
|
inner = s[s.index("[") + 1 : -1]
|
||||||
|
parts = [p.strip() for p in inner.split(",") if p.strip()]
|
||||||
|
return parts or [s]
|
||||||
|
return [s]
|
||||||
|
|
||||||
|
|
||||||
|
def _isKnownType(typeName: str) -> bool:
|
||||||
|
return typeName in PRIMITIVE_TYPES or typeName in PORT_TYPE_CATALOG
|
||||||
|
|
||||||
|
|
||||||
|
def _validateCatalog() -> List[str]:
|
||||||
|
"""
|
||||||
|
Validate PORT_TYPE_CATALOG integrity.
|
||||||
|
|
||||||
|
Returns a list of error messages. Empty list means catalog is healthy.
|
||||||
|
|
||||||
|
Checks:
|
||||||
|
1. Every PortField.type references either a primitive or a known schema.
|
||||||
|
2. Discriminator fields exist, are typed "str", and at most one per schema.
|
||||||
|
3. No cyclic references via required schema-typed fields
|
||||||
|
(optional fields may form cycles intentionally, e.g. provenance).
|
||||||
|
4. Schema name in catalog key matches PortSchema.name.
|
||||||
|
"""
|
||||||
|
errors: List[str] = []
|
||||||
|
|
||||||
|
# Check 4: key consistency
|
||||||
|
for key, schema in PORT_TYPE_CATALOG.items():
|
||||||
|
if schema.name != key:
|
||||||
|
errors.append(f"Catalog key '{key}' does not match schema.name '{schema.name}'")
|
||||||
|
|
||||||
|
# Check 1 + 2: type refs and discriminators
|
||||||
|
for schemaName, schema in PORT_TYPE_CATALOG.items():
|
||||||
|
discriminatorCount = 0
|
||||||
|
for field in schema.fields:
|
||||||
|
for refName in _stripContainer(field.type):
|
||||||
|
if not _isKnownType(refName):
|
||||||
|
errors.append(
|
||||||
|
f"{schemaName}.{field.name}: unknown type '{refName}' "
|
||||||
|
f"(not a primitive and not in catalog)"
|
||||||
|
)
|
||||||
|
if field.discriminator:
|
||||||
|
discriminatorCount += 1
|
||||||
|
if field.type != "str":
|
||||||
|
errors.append(
|
||||||
|
f"{schemaName}.{field.name}: discriminator must be 'str', got '{field.type}'"
|
||||||
|
)
|
||||||
|
if discriminatorCount > 1:
|
||||||
|
errors.append(
|
||||||
|
f"{schemaName}: has {discriminatorCount} discriminator fields, max 1 allowed"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check 3: cycles via required schema-typed fields
|
||||||
|
def _requiredSchemaRefs(name: str) -> List[str]:
|
||||||
|
sch = PORT_TYPE_CATALOG.get(name)
|
||||||
|
if not sch:
|
||||||
|
return []
|
||||||
|
out: List[str] = []
|
||||||
|
for field in sch.fields:
|
||||||
|
if not field.required:
|
||||||
|
continue
|
||||||
|
for ref in _stripContainer(field.type):
|
||||||
|
if ref in PORT_TYPE_CATALOG:
|
||||||
|
out.append(ref)
|
||||||
|
return out
|
||||||
|
|
||||||
|
def _hasCycle(start: str) -> Optional[List[str]]:
|
||||||
|
stack: List[str] = [start]
|
||||||
|
path: List[str] = []
|
||||||
|
visiting: set = set()
|
||||||
|
|
||||||
|
def _dfs(name: str) -> Optional[List[str]]:
|
||||||
|
if name in visiting:
|
||||||
|
return path + [name]
|
||||||
|
visiting.add(name)
|
||||||
|
path.append(name)
|
||||||
|
for ref in _requiredSchemaRefs(name):
|
||||||
|
if ref == start and len(path) > 0:
|
||||||
|
return path + [ref]
|
||||||
|
cycle = _dfs(ref)
|
||||||
|
if cycle:
|
||||||
|
return cycle
|
||||||
|
path.pop()
|
||||||
|
visiting.discard(name)
|
||||||
|
return None
|
||||||
|
|
||||||
|
return _dfs(start)
|
||||||
|
|
||||||
|
for schemaName in PORT_TYPE_CATALOG.keys():
|
||||||
|
cycle = _hasCycle(schemaName)
|
||||||
|
if cycle and cycle[0] == schemaName:
|
||||||
|
errors.append(
|
||||||
|
f"{schemaName}: cyclic required-ref chain: {' -> '.join(cycle)}"
|
||||||
|
)
|
||||||
|
break # one cycle is enough — avoid spamming
|
||||||
|
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# SYSTEM_VARIABLES
|
# SYSTEM_VARIABLES
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
@ -259,6 +677,8 @@ def _defaultForType(typeStr: str) -> Any:
|
||||||
return 0
|
return 0
|
||||||
if typeStr == "str":
|
if typeStr == "str":
|
||||||
return ""
|
return ""
|
||||||
|
if typeStr in PORT_TYPE_CATALOG:
|
||||||
|
return {}
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -272,210 +692,6 @@ def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Input extractors (one per input port type)
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def _extractEmailDraft(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract EmailDraft fields from upstream output."""
|
|
||||||
result = {}
|
|
||||||
if upstream.get("responseData") and isinstance(upstream["responseData"], dict):
|
|
||||||
rd = upstream["responseData"]
|
|
||||||
for key in ("subject", "body", "to", "cc"):
|
|
||||||
if key in rd:
|
|
||||||
result[key] = rd[key]
|
|
||||||
if not result:
|
|
||||||
for key in ("subject", "body", "to", "cc"):
|
|
||||||
if key in upstream:
|
|
||||||
result[key] = upstream[key]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _extractDocuments(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract documents from upstream output."""
|
|
||||||
docs = upstream.get("documents") or upstream.get("documentList") or []
|
|
||||||
if not docs and isinstance(upstream.get("data"), dict):
|
|
||||||
docs = upstream["data"].get("documents") or upstream["data"].get("documentList") or []
|
|
||||||
# input.upload format
|
|
||||||
if not docs:
|
|
||||||
files = upstream.get("files") or []
|
|
||||||
fileObj = upstream.get("file")
|
|
||||||
fileIds = upstream.get("fileIds") or []
|
|
||||||
if fileObj:
|
|
||||||
docs = [fileObj]
|
|
||||||
elif files:
|
|
||||||
docs = files
|
|
||||||
elif fileIds:
|
|
||||||
docs = [{"validationMetadata": {"fileId": fid}} for fid in fileIds]
|
|
||||||
normalized = docs if isinstance(docs, list) else [docs]
|
|
||||||
return {"documents": normalized, "documentList": normalized} if docs else {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractText(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract text from upstream output."""
|
|
||||||
text = upstream.get("text") or upstream.get("response") or upstream.get("context") or ""
|
|
||||||
if not text and upstream.get("payload"):
|
|
||||||
import json
|
|
||||||
payload = upstream["payload"]
|
|
||||||
text = json.dumps(payload, ensure_ascii=False) if isinstance(payload, dict) else str(payload)
|
|
||||||
return {"text": str(text)} if text else {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractEmailList(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract email list from upstream output."""
|
|
||||||
emails = upstream.get("emails") or []
|
|
||||||
if not emails:
|
|
||||||
docs = upstream.get("documents") or upstream.get("documentList") or []
|
|
||||||
if docs:
|
|
||||||
import json
|
|
||||||
for doc in docs:
|
|
||||||
raw = doc.get("documentData") if isinstance(doc, dict) else None
|
|
||||||
if raw:
|
|
||||||
try:
|
|
||||||
data = json.loads(raw) if isinstance(raw, str) else raw
|
|
||||||
if isinstance(data, dict):
|
|
||||||
found = (data.get("emails", {}).get("emails", [])
|
|
||||||
or data.get("searchResults", {}).get("results", []))
|
|
||||||
if found:
|
|
||||||
emails = found
|
|
||||||
break
|
|
||||||
except (json.JSONDecodeError, TypeError):
|
|
||||||
pass
|
|
||||||
return {"emails": emails} if emails else {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractTaskList(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract task list from upstream output."""
|
|
||||||
tasks = upstream.get("tasks") or []
|
|
||||||
if not tasks:
|
|
||||||
docs = upstream.get("documents") or upstream.get("documentList") or []
|
|
||||||
if docs:
|
|
||||||
import json
|
|
||||||
for doc in docs:
|
|
||||||
raw = doc.get("documentData") if isinstance(doc, dict) else None
|
|
||||||
if raw:
|
|
||||||
try:
|
|
||||||
data = json.loads(raw) if isinstance(raw, str) else raw
|
|
||||||
if isinstance(data, dict) and "tasks" in data:
|
|
||||||
tasks = data["tasks"]
|
|
||||||
break
|
|
||||||
except (json.JSONDecodeError, TypeError):
|
|
||||||
pass
|
|
||||||
return {"tasks": tasks} if tasks else {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractFileList(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract file list from upstream output."""
|
|
||||||
files = upstream.get("files") or []
|
|
||||||
return {"files": files} if files else {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractFormPayload(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract form payload from upstream output."""
|
|
||||||
payload = upstream.get("payload")
|
|
||||||
if payload and isinstance(payload, dict):
|
|
||||||
return {"payload": payload}
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractAiResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract AI result fields from upstream output."""
|
|
||||||
result = {}
|
|
||||||
for key in ("prompt", "response", "responseData", "context", "documents"):
|
|
||||||
if key in upstream:
|
|
||||||
result[key] = upstream[key]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _extractBoolResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract bool result from upstream output."""
|
|
||||||
result = upstream.get("result")
|
|
||||||
if isinstance(result, bool):
|
|
||||||
return {"result": result, "reason": upstream.get("reason", "")}
|
|
||||||
approved = upstream.get("approved")
|
|
||||||
if isinstance(approved, bool):
|
|
||||||
return {"result": approved, "reason": upstream.get("reason", "")}
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractTaskResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract task result from upstream output."""
|
|
||||||
result = {}
|
|
||||||
if "taskId" in upstream:
|
|
||||||
result["taskId"] = upstream["taskId"]
|
|
||||||
if "task" in upstream:
|
|
||||||
result["task"] = upstream["task"]
|
|
||||||
elif "clickupTask" in upstream:
|
|
||||||
result["task"] = upstream["clickupTask"]
|
|
||||||
if "success" in upstream:
|
|
||||||
result["success"] = upstream["success"]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _extractAggregateResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract aggregate result from upstream output."""
|
|
||||||
items = upstream.get("items") or []
|
|
||||||
return {"items": items, "count": len(items)}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractMergeResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract merge result from upstream output."""
|
|
||||||
return {
|
|
||||||
"inputs": upstream.get("inputs", {}),
|
|
||||||
"first": upstream.get("first"),
|
|
||||||
"merged": upstream.get("merged", {}),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractUdmDocument(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract UdmDocument fields from upstream output."""
|
|
||||||
if upstream.get("children") is not None and upstream.get("sourceType"):
|
|
||||||
return upstream
|
|
||||||
udm = upstream.get("udm")
|
|
||||||
if isinstance(udm, dict) and udm.get("children") is not None:
|
|
||||||
return udm
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractUdmNodeList(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract UdmNodeList fields from upstream output."""
|
|
||||||
nodes = upstream.get("nodes")
|
|
||||||
if isinstance(nodes, list):
|
|
||||||
return {"nodes": nodes, "count": len(nodes)}
|
|
||||||
children = upstream.get("children")
|
|
||||||
if isinstance(children, list):
|
|
||||||
return {"nodes": children, "count": len(children)}
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _extractConsolidateResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
|
||||||
"""Extract ConsolidateResult fields from upstream output."""
|
|
||||||
result = {}
|
|
||||||
for key in ("result", "mode", "count"):
|
|
||||||
if key in upstream:
|
|
||||||
result[key] = upstream[key]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
INPUT_EXTRACTORS: Dict[str, Callable] = {
|
|
||||||
"EmailDraft": _extractEmailDraft,
|
|
||||||
"DocumentList": _extractDocuments,
|
|
||||||
"TextResult": _extractText,
|
|
||||||
"EmailList": _extractEmailList,
|
|
||||||
"TaskList": _extractTaskList,
|
|
||||||
"FileList": _extractFileList,
|
|
||||||
"FormPayload": _extractFormPayload,
|
|
||||||
"AiResult": _extractAiResult,
|
|
||||||
"BoolResult": _extractBoolResult,
|
|
||||||
"TaskResult": _extractTaskResult,
|
|
||||||
"AggregateResult": _extractAggregateResult,
|
|
||||||
"MergeResult": _extractMergeResult,
|
|
||||||
"UdmDocument": _extractUdmDocument,
|
|
||||||
"UdmNodeList": _extractUdmNodeList,
|
|
||||||
"ConsolidateResult": _extractConsolidateResult,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# Transit helpers
|
# Transit helpers
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
@ -522,27 +738,83 @@ def _resolveTransitChain(
|
||||||
# Schema derivation for dynamic outputs
|
# Schema derivation for dynamic outputs
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
|
def _derive_form_payload_schema_from_param(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]:
|
||||||
"""Derive output schema from form field definitions."""
|
"""Derive output schema from a field-builder JSON list (``fields``, ``formFields``, …)."""
|
||||||
fields_param = (node.get("parameters") or {}).get("fields")
|
fields_param = (node.get("parameters") or {}).get(param_key)
|
||||||
if not fields_param or not isinstance(fields_param, list):
|
if not fields_param or not isinstance(fields_param, list):
|
||||||
return None
|
return None
|
||||||
portFields = []
|
portFields: List[PortField] = []
|
||||||
for f in fields_param:
|
|
||||||
if isinstance(f, dict) and f.get("name"):
|
def _append_field(fname: str, ftype: Any, lab: Any, required: bool) -> None:
|
||||||
_lab = f.get("label")
|
_desc = resolveText(lab) if lab is not None else fname
|
||||||
_desc = resolveText(_lab) if _lab is not None else f["name"]
|
if not str(_desc).strip():
|
||||||
if not _desc.strip():
|
_desc = fname
|
||||||
_desc = f["name"]
|
|
||||||
portFields.append(PortField(
|
portFields.append(PortField(
|
||||||
name=f["name"],
|
name=fname,
|
||||||
type=f.get("type", "str"),
|
type=str(ftype) if ftype is not None else "str",
|
||||||
description=_desc,
|
description=_desc,
|
||||||
required=f.get("required", False),
|
required=required,
|
||||||
))
|
))
|
||||||
|
|
||||||
|
for f in fields_param:
|
||||||
|
if not isinstance(f, dict) or not f.get("name"):
|
||||||
|
continue
|
||||||
|
fname = str(f["name"])
|
||||||
|
if str(f.get("type", "")).lower() == "group" and isinstance(f.get("fields"), list):
|
||||||
|
for sub in f["fields"]:
|
||||||
|
if isinstance(sub, dict) and sub.get("name"):
|
||||||
|
_append_field(
|
||||||
|
f"{fname}.{sub['name']}",
|
||||||
|
sub.get("type", "str"),
|
||||||
|
sub.get("label"),
|
||||||
|
bool(sub.get("required", False)),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
_append_field(fname, f.get("type", "str"), f.get("label"), bool(f.get("required", False)))
|
||||||
return PortSchema(name="FormPayload_dynamic", fields=portFields) if portFields else None
|
return PortSchema(name="FormPayload_dynamic", fields=portFields) if portFields else None
|
||||||
|
|
||||||
|
|
||||||
|
def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
|
||||||
|
"""Derive output schema from form field definitions (``parameters.fields``)."""
|
||||||
|
return _derive_form_payload_schema_from_param(node, "fields")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_graph_defined_output_schema(
|
||||||
|
node: Dict[str, Any],
|
||||||
|
output_port: Dict[str, Any],
|
||||||
|
) -> Optional[PortSchema]:
|
||||||
|
"""
|
||||||
|
Resolve a node's output port to a concrete PortSchema.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Static catalog name: ``schema: "ActionResult"``
|
||||||
|
- Graph-defined: ``schema: {"kind": "fromGraph", "parameter": "fields"}``
|
||||||
|
- Legacy: ``dynamic`` + ``deriveFrom`` on the port dict.
|
||||||
|
"""
|
||||||
|
if not isinstance(output_port, dict):
|
||||||
|
return None
|
||||||
|
schema_spec = output_port.get("schema")
|
||||||
|
if isinstance(schema_spec, dict) and schema_spec.get("kind") == "fromGraph":
|
||||||
|
param_key = str(schema_spec.get("parameter") or "fields")
|
||||||
|
return _derive_form_payload_schema_from_param(node, param_key)
|
||||||
|
if output_port.get("dynamic") and output_port.get("deriveFrom"):
|
||||||
|
return _derive_form_payload_schema_from_param(node, str(output_port.get("deriveFrom")))
|
||||||
|
if isinstance(schema_spec, str) and schema_spec:
|
||||||
|
return PORT_TYPE_CATALOG.get(schema_spec)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_output_schema_name(node: Dict[str, Any], output_port: Dict[str, Any]) -> str:
|
||||||
|
"""Return a schema name for port compatibility / path listing."""
|
||||||
|
derived = parse_graph_defined_output_schema(node, output_port)
|
||||||
|
if derived:
|
||||||
|
return derived.name
|
||||||
|
spec = output_port.get("schema") if isinstance(output_port, dict) else None
|
||||||
|
if isinstance(spec, str) and spec:
|
||||||
|
return spec
|
||||||
|
return "Any"
|
||||||
|
|
||||||
|
|
||||||
def _deriveTransformSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
|
def _deriveTransformSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
|
||||||
"""Derive output schema from transform mappings."""
|
"""Derive output schema from transform mappings."""
|
||||||
mappings = (node.get("parameters") or {}).get("mappings")
|
mappings = (node.get("parameters") or {}).get("mappings")
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ from modules.workflows.automation2.runEnvelope import (
|
||||||
normalize_run_envelope,
|
normalize_run_envelope,
|
||||||
)
|
)
|
||||||
from modules.features.graphicalEditor.entryPoints import find_invocation
|
from modules.features.graphicalEditor.entryPoints import find_invocation
|
||||||
|
from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths
|
||||||
from modules.shared.i18nRegistry import apiRouteContext, resolveText
|
from modules.shared.i18nRegistry import apiRouteContext, resolveText
|
||||||
routeApiMsg = apiRouteContext("routeFeatureGraphicalEditor")
|
routeApiMsg = apiRouteContext("routeFeatureGraphicalEditor")
|
||||||
|
|
||||||
|
|
@ -135,6 +136,48 @@ def get_node_types(
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{instanceId}/upstream-paths")
|
||||||
|
@limiter.limit("60/minute")
|
||||||
|
def post_upstream_paths(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
body: Dict[str, Any] = Body(...),
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
) -> dict:
|
||||||
|
"""Return pickable upstream DataRef paths for a node (draft graph in body)."""
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
graph = body.get("graph")
|
||||||
|
node_id = body.get("nodeId")
|
||||||
|
if not isinstance(graph, dict) or not node_id:
|
||||||
|
raise HTTPException(status_code=400, detail=routeApiMsg("graph and nodeId are required"))
|
||||||
|
paths = compute_upstream_paths(graph, str(node_id))
|
||||||
|
return {"paths": paths}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{instanceId}/upstream-paths/{node_id}")
|
||||||
|
@limiter.limit("60/minute")
|
||||||
|
def get_upstream_paths_saved(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
node_id: str = Path(..., description="Target node id"),
|
||||||
|
workflowId: str = Query(..., description="Workflow id whose saved graph is used"),
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
) -> dict:
|
||||||
|
"""Return upstream paths using the persisted workflow graph (same payload as POST variant)."""
|
||||||
|
mandate_id = _validateInstanceAccess(instanceId, context)
|
||||||
|
if not workflowId:
|
||||||
|
raise HTTPException(status_code=400, detail=routeApiMsg("workflowId is required"))
|
||||||
|
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
|
||||||
|
|
||||||
|
iface = getGraphicalEditorInterface(context.user, mandate_id, featureInstanceId=instanceId)
|
||||||
|
wf = iface.getWorkflow(workflowId)
|
||||||
|
if not wf:
|
||||||
|
raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
|
||||||
|
graph = wf.get("graph") or {}
|
||||||
|
paths = compute_upstream_paths(graph if isinstance(graph, dict) else {}, str(node_id))
|
||||||
|
return {"paths": paths}
|
||||||
|
|
||||||
|
|
||||||
@router.get("/{instanceId}/options/user.connection")
|
@router.get("/{instanceId}/options/user.connection")
|
||||||
@limiter.limit("60/minute")
|
@limiter.limit("60/minute")
|
||||||
def get_user_connection_options(
|
def get_user_connection_options(
|
||||||
|
|
@ -813,6 +856,7 @@ async def _runEditorAgent(
|
||||||
"\n\nAvailable tools (all valid — use whichever the user's intent calls for):"
|
"\n\nAvailable tools (all valid — use whichever the user's intent calls for):"
|
||||||
"\n Graph-mutating: readWorkflowGraph, listAvailableNodeTypes, "
|
"\n Graph-mutating: readWorkflowGraph, listAvailableNodeTypes, "
|
||||||
"describeNodeType, addNode, removeNode, connectNodes, setNodeParameter, "
|
"describeNodeType, addNode, removeNode, connectNodes, setNodeParameter, "
|
||||||
|
"listUpstreamPaths, bindNodeParameter, "
|
||||||
"autoLayoutWorkflow, validateGraph."
|
"autoLayoutWorkflow, validateGraph."
|
||||||
"\n Workflow lifecycle: createWorkflow (new empty workflow), "
|
"\n Workflow lifecycle: createWorkflow (new empty workflow), "
|
||||||
"updateWorkflowMetadata (rename / change description / tags / activate), "
|
"updateWorkflowMetadata (rename / change description / tags / activate), "
|
||||||
|
|
@ -844,6 +888,8 @@ async def _runEditorAgent(
|
||||||
"description, sane defaults, or — for required user-connection fields — "
|
"description, sane defaults, or — for required user-connection fields — "
|
||||||
"an actual connectionId). Do NOT pass position; the layout step handles it."
|
"an actual connectionId). Do NOT pass position; the layout step handles it."
|
||||||
"\n6. connectNodes — wire the nodes consistent with port schemas from describeNodeType."
|
"\n6. connectNodes — wire the nodes consistent with port schemas from describeNodeType."
|
||||||
|
"\n6b. When a parameter must take data from an upstream node, call listUpstreamPaths(nodeId=target) "
|
||||||
|
"then bindNodeParameter(producerNodeId, path, parameterName) — do not rely on implicit wire fill."
|
||||||
"\n7. autoLayoutWorkflow — call exactly once as the LAST graph-mutating step so the "
|
"\n7. autoLayoutWorkflow — call exactly once as the LAST graph-mutating step so the "
|
||||||
"canvas shows a readable top-down layout instead of overlapping boxes."
|
"canvas shows a readable top-down layout instead of overlapping boxes."
|
||||||
"\n8. validateGraph — sanity check, then answer the user."
|
"\n8. validateGraph — sanity check, then answer the user."
|
||||||
|
|
|
||||||
128
modules/features/graphicalEditor/upstreamPathsService.py
Normal file
128
modules/features/graphicalEditor/upstreamPathsService.py
Normal file
|
|
@ -0,0 +1,128 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""Compute pickable upstream paths for DataPicker / AI workflow tools."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Dict, List, Set
|
||||||
|
|
||||||
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
|
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, PortSchema, parse_graph_defined_output_schema
|
||||||
|
from modules.workflows.automation2.graphUtils import buildConnectionMap
|
||||||
|
|
||||||
|
_NODE_BY_TYPE = {n["id"]: n for n in STATIC_NODE_TYPES}
|
||||||
|
|
||||||
|
|
||||||
|
def _paths_for_port_schema(schema: PortSchema, producer_node_id: str) -> List[Dict[str, Any]]:
|
||||||
|
out: List[Dict[str, Any]] = []
|
||||||
|
for field in schema.fields:
|
||||||
|
path = [field.name]
|
||||||
|
out.append(
|
||||||
|
{
|
||||||
|
"producerNodeId": producer_node_id,
|
||||||
|
"path": path,
|
||||||
|
"type": field.type,
|
||||||
|
"label": ".".join(str(p) for p in path),
|
||||||
|
"scopeOrigin": "data",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
out.append(
|
||||||
|
{
|
||||||
|
"producerNodeId": producer_node_id,
|
||||||
|
"path": [],
|
||||||
|
"type": schema.name,
|
||||||
|
"label": "(whole output)",
|
||||||
|
"scopeOrigin": "data",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def _paths_for_schema(schema_name: str, producer_node_id: str) -> List[Dict[str, Any]]:
|
||||||
|
if not schema_name or schema_name == "Transit":
|
||||||
|
return []
|
||||||
|
schema = PORT_TYPE_CATALOG.get(schema_name)
|
||||||
|
if not schema:
|
||||||
|
return []
|
||||||
|
return _paths_for_port_schema(schema, producer_node_id)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_upstream_paths(graph: Dict[str, Any], target_node_id: str) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Return flattened first-level paths for every ancestor node's primary output schema.
|
||||||
|
"""
|
||||||
|
nodes = graph.get("nodes") or []
|
||||||
|
connections = graph.get("connections") or []
|
||||||
|
node_by_id = {n["id"]: n for n in nodes if n.get("id")}
|
||||||
|
if target_node_id not in node_by_id:
|
||||||
|
return []
|
||||||
|
|
||||||
|
conn_map = buildConnectionMap(connections)
|
||||||
|
# predecessors: walk backwards along edges (target -> source)
|
||||||
|
preds: Dict[str, Set[str]] = {}
|
||||||
|
for tgt, pairs in conn_map.items():
|
||||||
|
for src, _, _ in pairs:
|
||||||
|
preds.setdefault(tgt, set()).add(src)
|
||||||
|
|
||||||
|
seen: Set[str] = set()
|
||||||
|
stack = [target_node_id]
|
||||||
|
ancestors: Set[str] = set()
|
||||||
|
while stack:
|
||||||
|
cur = stack.pop()
|
||||||
|
for p in preds.get(cur, ()):
|
||||||
|
if p not in seen:
|
||||||
|
seen.add(p)
|
||||||
|
ancestors.add(p)
|
||||||
|
stack.append(p)
|
||||||
|
|
||||||
|
paths: List[Dict[str, Any]] = []
|
||||||
|
for aid in sorted(ancestors):
|
||||||
|
anode = node_by_id.get(aid)
|
||||||
|
if not anode:
|
||||||
|
continue
|
||||||
|
nt = anode.get("type", "")
|
||||||
|
ndef = _NODE_BY_TYPE.get(nt)
|
||||||
|
if not ndef:
|
||||||
|
continue
|
||||||
|
out0 = (ndef.get("outputPorts") or {}).get(0, {})
|
||||||
|
derived = parse_graph_defined_output_schema(anode, out0 if isinstance(out0, dict) else {})
|
||||||
|
if derived:
|
||||||
|
for entry in _paths_for_port_schema(derived, aid):
|
||||||
|
entry["producerLabel"] = (anode.get("title") or "").strip() or aid
|
||||||
|
paths.append(entry)
|
||||||
|
else:
|
||||||
|
raw_schema = out0.get("schema") if isinstance(out0, dict) else None
|
||||||
|
schema_name = raw_schema if isinstance(raw_schema, str) and raw_schema else "ActionResult"
|
||||||
|
for entry in _paths_for_schema(schema_name, aid):
|
||||||
|
entry["producerLabel"] = (anode.get("title") or "").strip() or aid
|
||||||
|
paths.append(entry)
|
||||||
|
|
||||||
|
# Lexical loop hints (flow.loop): any loop node in ancestors adds synthetic paths
|
||||||
|
for aid in ancestors:
|
||||||
|
anode = node_by_id.get(aid) or {}
|
||||||
|
if anode.get("type") == "flow.loop":
|
||||||
|
paths.extend(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"producerNodeId": aid,
|
||||||
|
"path": ["currentItem"],
|
||||||
|
"type": "Any",
|
||||||
|
"label": "loop.currentItem",
|
||||||
|
"scopeOrigin": "loop",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"producerNodeId": aid,
|
||||||
|
"path": ["currentIndex"],
|
||||||
|
"type": "int",
|
||||||
|
"label": "loop.currentIndex",
|
||||||
|
"scopeOrigin": "loop",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"producerNodeId": aid,
|
||||||
|
"path": ["count"],
|
||||||
|
"type": "int",
|
||||||
|
"label": "loop.count",
|
||||||
|
"scopeOrigin": "loop",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
return paths
|
||||||
|
|
@ -4,7 +4,8 @@
|
||||||
Teamsbot Feature - Data Models.
|
Teamsbot Feature - Data Models.
|
||||||
Pydantic models for Teams Bot sessions, transcripts, bot responses, and configuration.
|
Pydantic models for Teams Bot sessions, transcripts, bot responses, and configuration.
|
||||||
"""
|
"""
|
||||||
from typing import Optional, List, Dict, Any
|
from typing import Optional, List, Dict, Any, Literal
|
||||||
|
from datetime import datetime, timezone
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
import uuid
|
import uuid
|
||||||
|
|
@ -12,6 +13,14 @@ import uuid
|
||||||
from modules.datamodels.datamodelBase import PowerOnModel
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Director Prompt Limits
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
DIRECTOR_PROMPT_TEXT_LIMIT = 8000
|
||||||
|
DIRECTOR_PROMPT_FILE_LIMIT = 10
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Enums
|
# Enums
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
@ -267,6 +276,56 @@ class SpeechTeamsResponse(BaseModel):
|
||||||
reasoning: str = Field(default="", description="Reasoning for the decision (for logging/debug)")
|
reasoning: str = Field(default="", description="Reasoning for the decision (for logging/debug)")
|
||||||
detectedIntent: str = Field(default="none", description="Detected intent: addressed, question, proactive, stop, none")
|
detectedIntent: str = Field(default="none", description="Detected intent: addressed, question, proactive, stop, none")
|
||||||
commands: Optional[List[TeamsbotCommand]] = Field(default=None, description="Optional list of commands to execute (e.g. toggle transcript, send chat, change language)")
|
commands: Optional[List[TeamsbotCommand]] = Field(default=None, description="Optional list of commands to execute (e.g. toggle transcript, send chat, change language)")
|
||||||
|
needsAgent: bool = Field(default=False, description="If True, escalate to agentService.runAgent for complex multi-step processing (web research, mail, etc.)")
|
||||||
|
agentReason: Optional[str] = Field(default=None, description="Why escalation to the full agent is required (used as task brief for the agent)")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Director Prompts (private operator instructions sent during a live meeting)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TeamsbotDirectorPromptStatus(str, Enum):
|
||||||
|
"""Lifecycle status of a Director Prompt."""
|
||||||
|
QUEUED = "queued"
|
||||||
|
RUNNING = "running"
|
||||||
|
SUCCEEDED = "succeeded"
|
||||||
|
FAILED = "failed"
|
||||||
|
CONSUMED = "consumed" # one-shot consumed; persistent prompts stay active
|
||||||
|
|
||||||
|
|
||||||
|
class TeamsbotDirectorPromptMode(str, Enum):
|
||||||
|
"""How long a Director Prompt remains effective."""
|
||||||
|
ONE_SHOT = "oneShot"
|
||||||
|
PERSISTENT = "persistent"
|
||||||
|
|
||||||
|
|
||||||
|
class TeamsbotDirectorPrompt(PowerOnModel):
|
||||||
|
"""A private operator instruction injected into the bot during a live meeting.
|
||||||
|
|
||||||
|
Stored in PostgreSQL so it survives reconnects (persistent prompts) and is
|
||||||
|
auditable. Visible only to the session owner via SSE; invisible to other
|
||||||
|
meeting participants.
|
||||||
|
"""
|
||||||
|
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Director prompt ID")
|
||||||
|
sessionId: str = Field(description="Teams Bot session ID (FK)")
|
||||||
|
instanceId: str = Field(description="Feature instance ID (FK)")
|
||||||
|
operatorUserId: str = Field(description="User ID of the operator who issued the prompt")
|
||||||
|
text: str = Field(description="The director instruction text", max_length=DIRECTOR_PROMPT_TEXT_LIMIT)
|
||||||
|
mode: TeamsbotDirectorPromptMode = Field(default=TeamsbotDirectorPromptMode.ONE_SHOT, description="oneShot or persistent")
|
||||||
|
fileIds: List[str] = Field(default_factory=list, description="UDB-selected file/object IDs to attach as RAG context")
|
||||||
|
status: TeamsbotDirectorPromptStatus = Field(default=TeamsbotDirectorPromptStatus.QUEUED, description="Lifecycle status")
|
||||||
|
statusMessage: Optional[str] = Field(default=None, description="Optional error or status detail")
|
||||||
|
createdAt: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat(), description="ISO timestamp when created")
|
||||||
|
consumedAt: Optional[str] = Field(default=None, description="ISO timestamp when consumed (one-shot) or marked done")
|
||||||
|
agentRunId: Optional[str] = Field(default=None, description="Reference to the agent run that processed this prompt")
|
||||||
|
responseText: Optional[str] = Field(default=None, description="Final agent text delivered to the meeting")
|
||||||
|
|
||||||
|
|
||||||
|
class TeamsbotDirectorPromptCreateRequest(BaseModel):
|
||||||
|
"""Request body for submitting a new Director Prompt."""
|
||||||
|
text: str = Field(description="Director instruction text", min_length=1, max_length=DIRECTOR_PROMPT_TEXT_LIMIT)
|
||||||
|
mode: TeamsbotDirectorPromptMode = Field(default=TeamsbotDirectorPromptMode.ONE_SHOT, description="oneShot or persistent")
|
||||||
|
fileIds: List[str] = Field(default_factory=list, description="UDB file IDs to attach (max 10)")
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,9 @@ from .datamodelTeamsbot import (
|
||||||
TeamsbotSystemBot,
|
TeamsbotSystemBot,
|
||||||
TeamsbotUserSettings,
|
TeamsbotUserSettings,
|
||||||
TeamsbotUserAccount,
|
TeamsbotUserAccount,
|
||||||
|
TeamsbotDirectorPrompt,
|
||||||
|
TeamsbotDirectorPromptStatus,
|
||||||
|
TeamsbotDirectorPromptMode,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -114,11 +117,10 @@ class TeamsbotObjects:
|
||||||
return self.db.recordModify(TeamsbotSession, sessionId, updates)
|
return self.db.recordModify(TeamsbotSession, sessionId, updates)
|
||||||
|
|
||||||
def deleteSession(self, sessionId: str) -> bool:
|
def deleteSession(self, sessionId: str) -> bool:
|
||||||
"""Delete a session and all related transcripts and responses."""
|
"""Delete a session and all related transcripts, responses and director prompts."""
|
||||||
# Delete related records first
|
|
||||||
self._deleteTranscriptsBySession(sessionId)
|
self._deleteTranscriptsBySession(sessionId)
|
||||||
self._deleteResponsesBySession(sessionId)
|
self._deleteResponsesBySession(sessionId)
|
||||||
# Delete session
|
self._deletePromptsBySession(sessionId)
|
||||||
return self.db.recordDelete(TeamsbotSession, sessionId)
|
return self.db.recordDelete(TeamsbotSession, sessionId)
|
||||||
|
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
|
|
@ -272,6 +274,62 @@ class TeamsbotObjects:
|
||||||
"""Delete saved MS credentials."""
|
"""Delete saved MS credentials."""
|
||||||
return self.db.recordDelete(TeamsbotUserAccount, accountId)
|
return self.db.recordDelete(TeamsbotUserAccount, accountId)
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Director Prompts (private operator instructions during a live meeting)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
def createDirectorPrompt(self, promptData: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Create a new director prompt record."""
|
||||||
|
return self.db.recordCreate(TeamsbotDirectorPrompt, promptData)
|
||||||
|
|
||||||
|
def getDirectorPrompt(self, promptId: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get a single director prompt by ID."""
|
||||||
|
records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter={"id": promptId})
|
||||||
|
return records[0] if records else None
|
||||||
|
|
||||||
|
def getDirectorPrompts(self, sessionId: str, operatorUserId: str | None = None) -> List[Dict[str, Any]]:
|
||||||
|
"""Get all director prompts for a session, optionally filtered by operator."""
|
||||||
|
recordFilter: Dict[str, Any] = {"sessionId": sessionId}
|
||||||
|
if operatorUserId:
|
||||||
|
recordFilter["operatorUserId"] = operatorUserId
|
||||||
|
records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter=recordFilter)
|
||||||
|
records.sort(key=lambda r: r.get("createdAt") or "")
|
||||||
|
return records
|
||||||
|
|
||||||
|
def getActivePersistentPrompts(self, sessionId: str) -> List[Dict[str, Any]]:
|
||||||
|
"""Get persistent prompts that are still active (not consumed/failed) for a session."""
|
||||||
|
records = self.db.getRecordset(
|
||||||
|
TeamsbotDirectorPrompt,
|
||||||
|
recordFilter={
|
||||||
|
"sessionId": sessionId,
|
||||||
|
"mode": TeamsbotDirectorPromptMode.PERSISTENT.value,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
terminal = {
|
||||||
|
TeamsbotDirectorPromptStatus.CONSUMED.value,
|
||||||
|
TeamsbotDirectorPromptStatus.FAILED.value,
|
||||||
|
}
|
||||||
|
active = [r for r in records if r.get("status") not in terminal]
|
||||||
|
active.sort(key=lambda r: r.get("createdAt") or "")
|
||||||
|
return active
|
||||||
|
|
||||||
|
def updateDirectorPrompt(self, promptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Update a director prompt (status, response text, etc.)."""
|
||||||
|
return self.db.recordModify(TeamsbotDirectorPrompt, promptId, updates)
|
||||||
|
|
||||||
|
def deleteDirectorPrompt(self, promptId: str) -> bool:
|
||||||
|
"""Delete a director prompt (e.g. when operator removes a persistent prompt)."""
|
||||||
|
return self.db.recordDelete(TeamsbotDirectorPrompt, promptId)
|
||||||
|
|
||||||
|
def _deletePromptsBySession(self, sessionId: str) -> int:
|
||||||
|
"""Delete all director prompts for a session (called from deleteSession)."""
|
||||||
|
records = self.db.getRecordset(TeamsbotDirectorPrompt, recordFilter={"sessionId": sessionId})
|
||||||
|
count = 0
|
||||||
|
for record in records:
|
||||||
|
self.db.recordDelete(TeamsbotDirectorPrompt, record.get("id"))
|
||||||
|
count += 1
|
||||||
|
return count
|
||||||
|
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
# Stats / Aggregation
|
# Stats / Aggregation
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,11 @@ from .datamodelTeamsbot import (
|
||||||
TeamsbotUserAccount,
|
TeamsbotUserAccount,
|
||||||
TeamsbotResponseChannel,
|
TeamsbotResponseChannel,
|
||||||
TeamsbotResponseMode,
|
TeamsbotResponseMode,
|
||||||
|
TeamsbotDirectorPromptCreateRequest,
|
||||||
|
TeamsbotDirectorPromptMode,
|
||||||
|
TeamsbotDirectorPromptStatus,
|
||||||
|
DIRECTOR_PROMPT_FILE_LIMIT,
|
||||||
|
DIRECTOR_PROMPT_TEXT_LIMIT,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Import service
|
# Import service
|
||||||
|
|
@ -383,6 +388,11 @@ async def streamSession(
|
||||||
# Send initial session state
|
# Send initial session state
|
||||||
yield f"data: {json.dumps({'type': 'sessionState', 'data': session})}\n\n"
|
yield f"data: {json.dumps({'type': 'sessionState', 'data': session})}\n\n"
|
||||||
|
|
||||||
|
# Send current bot WebSocket connection state so the operator UI can
|
||||||
|
# render the live indicator without waiting for the next connect/disconnect.
|
||||||
|
from .service import getActiveService as _getActiveService
|
||||||
|
yield f"data: {json.dumps({'type': 'botConnectionState', 'data': {'connected': _getActiveService(sessionId) is not None}})}\n\n"
|
||||||
|
|
||||||
# Stream events
|
# Stream events
|
||||||
eventQueue = _sessionEvents.get(sessionId)
|
eventQueue = _sessionEvents.get(sessionId)
|
||||||
if not eventQueue:
|
if not eventQueue:
|
||||||
|
|
@ -832,6 +842,132 @@ async def submitMfaCode(
|
||||||
raise HTTPException(status_code=404, detail=routeApiMsg("No active MFA challenge for this session"))
|
raise HTTPException(status_code=404, detail=routeApiMsg("No active MFA challenge for this session"))
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Director Prompts (private operator instructions during a live meeting)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
@router.post("/{instanceId}/sessions/{sessionId}/directorPrompts")
|
||||||
|
@limiter.limit("30/minute")
|
||||||
|
async def submitDirectorPrompt(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str,
|
||||||
|
sessionId: str,
|
||||||
|
body: TeamsbotDirectorPromptCreateRequest,
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
):
|
||||||
|
"""Submit a private director prompt to the running bot. Triggers the
|
||||||
|
full agent path (web, mail, RAG, etc.) and delivers the answer into the
|
||||||
|
meeting via TTS + chat. Only the session owner can submit prompts."""
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
interface = _getInterface(context, instanceId)
|
||||||
|
|
||||||
|
session = interface.getSession(sessionId)
|
||||||
|
if not session:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found")
|
||||||
|
_validateSessionOwnership(session, context)
|
||||||
|
|
||||||
|
if session.get("status") not in (
|
||||||
|
TeamsbotSessionStatus.ACTIVE.value,
|
||||||
|
TeamsbotSessionStatus.JOINING.value,
|
||||||
|
):
|
||||||
|
raise HTTPException(status_code=400, detail=routeApiMsg("Session is not active"))
|
||||||
|
|
||||||
|
text = (body.text or "").strip()
|
||||||
|
if not text:
|
||||||
|
raise HTTPException(status_code=400, detail=routeApiMsg("Prompt text is required"))
|
||||||
|
if len(text) > DIRECTOR_PROMPT_TEXT_LIMIT:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail=routeApiMsg(f"Prompt text exceeds limit of {DIRECTOR_PROMPT_TEXT_LIMIT} characters"),
|
||||||
|
)
|
||||||
|
fileIds = list(body.fileIds or [])
|
||||||
|
if len(fileIds) > DIRECTOR_PROMPT_FILE_LIMIT:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail=routeApiMsg(f"Too many files ({len(fileIds)}); max {DIRECTOR_PROMPT_FILE_LIMIT}"),
|
||||||
|
)
|
||||||
|
|
||||||
|
from .service import getActiveService
|
||||||
|
service = getActiveService(sessionId)
|
||||||
|
if not service:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=409,
|
||||||
|
detail=routeApiMsg(
|
||||||
|
"Bot is not yet live in the meeting (no WebSocket connection). "
|
||||||
|
"Wait until the bot status indicator turns green and try again."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
created = await service.submitDirectorPrompt(
|
||||||
|
sessionId=sessionId,
|
||||||
|
operatorUserId=str(context.user.id),
|
||||||
|
text=text,
|
||||||
|
mode=body.mode,
|
||||||
|
fileIds=fileIds,
|
||||||
|
)
|
||||||
|
return {"prompt": created}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{instanceId}/sessions/{sessionId}/directorPrompts")
|
||||||
|
@limiter.limit("30/minute")
|
||||||
|
async def listDirectorPrompts(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str,
|
||||||
|
sessionId: str,
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
):
|
||||||
|
"""List director prompts for a session (only operator's own prompts)."""
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
interface = _getInterface(context, instanceId)
|
||||||
|
|
||||||
|
session = interface.getSession(sessionId)
|
||||||
|
if not session:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found")
|
||||||
|
_validateSessionOwnership(session, context)
|
||||||
|
|
||||||
|
operatorUserId = None if context.isPlatformAdmin else str(context.user.id)
|
||||||
|
prompts = interface.getDirectorPrompts(sessionId, operatorUserId=operatorUserId)
|
||||||
|
return {"prompts": prompts}
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/{instanceId}/sessions/{sessionId}/directorPrompts/{promptId}")
|
||||||
|
@limiter.limit("30/minute")
|
||||||
|
async def deleteDirectorPrompt(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str,
|
||||||
|
sessionId: str,
|
||||||
|
promptId: str,
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
):
|
||||||
|
"""Remove a (typically persistent) director prompt. Marks it consumed so
|
||||||
|
it no longer influences the bot. The DB record is kept for audit."""
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
interface = _getInterface(context, instanceId)
|
||||||
|
|
||||||
|
session = interface.getSession(sessionId)
|
||||||
|
if not session:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Session '{sessionId}' not found")
|
||||||
|
_validateSessionOwnership(session, context)
|
||||||
|
|
||||||
|
prompt = interface.getDirectorPrompt(promptId)
|
||||||
|
if not prompt or prompt.get("sessionId") != sessionId:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Prompt '{promptId}' not found")
|
||||||
|
if not context.isPlatformAdmin and prompt.get("operatorUserId") != str(context.user.id):
|
||||||
|
raise HTTPException(status_code=404, detail=f"Prompt '{promptId}' not found")
|
||||||
|
|
||||||
|
from .service import getActiveService
|
||||||
|
service = getActiveService(sessionId)
|
||||||
|
if service:
|
||||||
|
await service.removePersistentPrompt(promptId)
|
||||||
|
else:
|
||||||
|
# Bot not connected: mark consumed directly
|
||||||
|
interface.updateDirectorPrompt(promptId, {
|
||||||
|
"status": TeamsbotDirectorPromptStatus.CONSUMED.value,
|
||||||
|
"statusMessage": "Removed by operator (bot offline)",
|
||||||
|
})
|
||||||
|
return {"deleted": True, "promptId": promptId}
|
||||||
|
|
||||||
|
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
# Voice Test Endpoint
|
# Voice Test Endpoint
|
||||||
# =========================================================================
|
# =========================================================================
|
||||||
|
|
@ -845,7 +981,7 @@ async def testVoice(
|
||||||
):
|
):
|
||||||
"""Test TTS voice with AI-generated sample text in the correct language."""
|
"""Test TTS voice with AI-generated sample text in the correct language."""
|
||||||
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
|
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
|
||||||
from modules.serviceCenter.services.serviceAi.mainServiceAi import AiService
|
from .service import _createAiService
|
||||||
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
|
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
|
||||||
|
|
||||||
mandateId = _validateInstanceAccess(instanceId, context)
|
mandateId = _validateInstanceAccess(instanceId, context)
|
||||||
|
|
@ -856,12 +992,7 @@ async def testVoice(
|
||||||
botName = body.get("botName", "AI Assistant")
|
botName = body.get("botName", "AI Assistant")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Generate test text dynamically via AI in the correct language
|
aiService = _createAiService(context.user, mandateId, instanceId)
|
||||||
serviceContext = type('Ctx', (), {
|
|
||||||
'user': context.user, 'mandateId': mandateId,
|
|
||||||
'featureInstanceId': instanceId, 'featureCode': 'teamsbot'
|
|
||||||
})()
|
|
||||||
aiService = AiService(serviceCenter=serviceContext)
|
|
||||||
await aiService.ensureAiObjectsInitialized()
|
await aiService.ensureAiObjectsInitialized()
|
||||||
|
|
||||||
aiRequest = AiCallRequest(
|
aiRequest = AiCallRequest(
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -144,6 +144,8 @@ class AccountingDataSync:
|
||||||
"journalLines": 0,
|
"journalLines": 0,
|
||||||
"contacts": 0,
|
"contacts": 0,
|
||||||
"accountBalances": 0,
|
"accountBalances": 0,
|
||||||
|
"oldestBookingDate": None,
|
||||||
|
"newestBookingDate": None,
|
||||||
"errors": [],
|
"errors": [],
|
||||||
"startedAt": time.time(),
|
"startedAt": time.time(),
|
||||||
}
|
}
|
||||||
|
|
@ -211,12 +213,14 @@ class AccountingDataSync:
|
||||||
)
|
)
|
||||||
_dumpSyncData("journalEntries", rawEntries)
|
_dumpSyncData("journalEntries", rawEntries)
|
||||||
_progress(60, f"Speichere {len(rawEntries)} Buchungssaetze...")
|
_progress(60, f"Speichere {len(rawEntries)} Buchungssaetze...")
|
||||||
entriesCount, linesCount = await asyncio.to_thread(
|
entriesCount, linesCount, oldestDate, newestDate = await asyncio.to_thread(
|
||||||
self._persistJournal, rawEntries, scope, featureInstanceId,
|
self._persistJournal, rawEntries, scope, featureInstanceId,
|
||||||
TrusteeDataJournalEntry, TrusteeDataJournalLine,
|
TrusteeDataJournalEntry, TrusteeDataJournalLine,
|
||||||
)
|
)
|
||||||
summary["journalEntries"] = entriesCount
|
summary["journalEntries"] = entriesCount
|
||||||
summary["journalLines"] = linesCount
|
summary["journalLines"] = linesCount
|
||||||
|
summary["oldestBookingDate"] = oldestDate
|
||||||
|
summary["newestBookingDate"] = newestDate
|
||||||
_progress(65, f"{entriesCount} Saetze + {linesCount} Buchungszeilen gespeichert.")
|
_progress(65, f"{entriesCount} Saetze + {linesCount} Buchungszeilen gespeichert.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Import journal entries failed: {e}", exc_info=True)
|
logger.error(f"Import journal entries failed: {e}", exc_info=True)
|
||||||
|
|
@ -277,6 +281,11 @@ class AccountingDataSync:
|
||||||
"journalLines": int(summary.get("journalLines", 0)),
|
"journalLines": int(summary.get("journalLines", 0)),
|
||||||
"contacts": int(summary.get("contacts", 0)),
|
"contacts": int(summary.get("contacts", 0)),
|
||||||
"accountBalances": int(summary.get("accountBalances", 0)),
|
"accountBalances": int(summary.get("accountBalances", 0)),
|
||||||
|
# Actual oldest/newest booking date observed in the
|
||||||
|
# imported journal entries. Lets the user verify that the
|
||||||
|
# full requested window was returned by the source system.
|
||||||
|
"oldestBookingDate": summary.get("oldestBookingDate"),
|
||||||
|
"newestBookingDate": summary.get("newestBookingDate"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
|
|
@ -321,6 +330,9 @@ class AccountingDataSync:
|
||||||
|
|
||||||
We pre-build the line rows in memory keyed by the freshly minted entryId
|
We pre-build the line rows in memory keyed by the freshly minted entryId
|
||||||
so a single ``execute_values`` call can persist all of them.
|
so a single ``execute_values`` call can persist all of them.
|
||||||
|
|
||||||
|
Returns ``(entriesCount, linesCount, oldestBookingDate, newestBookingDate)``
|
||||||
|
where the date strings are ISO ``YYYY-MM-DD`` (or ``None`` if no entries).
|
||||||
"""
|
"""
|
||||||
import uuid as _uuid
|
import uuid as _uuid
|
||||||
t0 = time.time()
|
t0 = time.time()
|
||||||
|
|
@ -329,12 +341,22 @@ class AccountingDataSync:
|
||||||
|
|
||||||
entryRows: List[Dict[str, Any]] = []
|
entryRows: List[Dict[str, Any]] = []
|
||||||
lineRows: List[Dict[str, Any]] = []
|
lineRows: List[Dict[str, Any]] = []
|
||||||
|
oldestDate: Optional[str] = None
|
||||||
|
newestDate: Optional[str] = None
|
||||||
for raw in rawEntries:
|
for raw in rawEntries:
|
||||||
entryId = str(_uuid.uuid4())
|
entryId = str(_uuid.uuid4())
|
||||||
|
bookingDate = raw.get("bookingDate")
|
||||||
|
if bookingDate:
|
||||||
|
normalized = str(bookingDate).split("T")[0][:10]
|
||||||
|
if normalized:
|
||||||
|
if oldestDate is None or normalized < oldestDate:
|
||||||
|
oldestDate = normalized
|
||||||
|
if newestDate is None or normalized > newestDate:
|
||||||
|
newestDate = normalized
|
||||||
entryRows.append({
|
entryRows.append({
|
||||||
"id": entryId,
|
"id": entryId,
|
||||||
"externalId": raw.get("externalId"),
|
"externalId": raw.get("externalId"),
|
||||||
"bookingDate": raw.get("bookingDate"),
|
"bookingDate": bookingDate,
|
||||||
"reference": raw.get("reference"),
|
"reference": raw.get("reference"),
|
||||||
"description": raw.get("description", ""),
|
"description": raw.get("description", ""),
|
||||||
"currency": raw.get("currency", "CHF"),
|
"currency": raw.get("currency", "CHF"),
|
||||||
|
|
@ -363,9 +385,10 @@ class AccountingDataSync:
|
||||||
linesCount = self._bulkCreate(modelLine, lineRows)
|
linesCount = self._bulkCreate(modelLine, lineRows)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Persisted {entriesCount} entries + {linesCount} lines for "
|
f"Persisted {entriesCount} entries + {linesCount} lines for "
|
||||||
f"{featureInstanceId} in {time.time() - t0:.1f}s"
|
f"{featureInstanceId} in {time.time() - t0:.1f}s "
|
||||||
|
f"(window: {oldestDate or '?'} .. {newestDate or '?'})"
|
||||||
)
|
)
|
||||||
return entriesCount, linesCount
|
return entriesCount, linesCount, oldestDate, newestDate
|
||||||
|
|
||||||
def _persistContacts(self, customers: list, vendors: list, scope: Dict[str, Any],
|
def _persistContacts(self, customers: list, vendors: list, scope: Dict[str, Any],
|
||||||
featureInstanceId: str, modelContact: Type) -> int:
|
featureInstanceId: str, modelContact: Type) -> int:
|
||||||
|
|
|
||||||
|
|
@ -437,7 +437,10 @@ class AccountingConnectorRma(BaseAccountingConnector):
|
||||||
"creditAmount": credit,
|
"creditAmount": credit,
|
||||||
"description": desc,
|
"description": desc,
|
||||||
})
|
})
|
||||||
entry["totalAmount"] += max(debit, credit)
|
# Booking total = sum of debits (== sum of credits for a balanced
|
||||||
|
# booking). Summing max(debit, credit) per line would double-count
|
||||||
|
# a balanced 2-line booking (200 instead of 100).
|
||||||
|
entry["totalAmount"] += debit
|
||||||
|
|
||||||
return list(entriesByRef.values())
|
return list(entriesByRef.values())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -494,7 +497,9 @@ class AccountingConnectorRma(BaseAccountingConnector):
|
||||||
"creditAmount": credit,
|
"creditAmount": credit,
|
||||||
"description": t.get("memo", ""),
|
"description": t.get("memo", ""),
|
||||||
})
|
})
|
||||||
totalAmt += max(debit, credit)
|
# Sum debits only -- equals sum of credits for a balanced
|
||||||
|
# booking. max(debit, credit) per line would double-count.
|
||||||
|
totalAmt += debit
|
||||||
|
|
||||||
entries.append({
|
entries.append({
|
||||||
"externalId": str(batch.get("id", ref)),
|
"externalId": str(batch.get("id", ref)),
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
"""Trustee models: TrusteeOrganisation, TrusteeRole, TrusteeAccess, TrusteeContract, TrusteeDocument, TrusteePosition."""
|
"""Trustee models: TrusteeOrganisation, TrusteeRole, TrusteeAccess, TrusteeContract, TrusteeDocument, TrusteePosition."""
|
||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional, Dict
|
from typing import Optional, Dict, Any
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from modules.datamodels.datamodelBase import PowerOnModel
|
from modules.datamodels.datamodelBase import PowerOnModel
|
||||||
|
|
@ -832,7 +832,7 @@ class TrusteeAccountingConfig(PowerOnModel):
|
||||||
lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error", json_schema_extra={"label": "Fehlermeldung"})
|
lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error", json_schema_extra={"label": "Fehlermeldung"})
|
||||||
lastSyncDateFrom: Optional[str] = Field(default=None, description="dateFrom (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster von"})
|
lastSyncDateFrom: Optional[str] = Field(default=None, description="dateFrom (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster von"})
|
||||||
lastSyncDateTo: Optional[str] = Field(default=None, description="dateTo (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster bis"})
|
lastSyncDateTo: Optional[str] = Field(default=None, description="dateTo (ISO date) of the last data import window", json_schema_extra={"label": "Letztes Import-Fenster bis"})
|
||||||
lastSyncCounts: Optional[Dict[str, int]] = Field(default=None, description="Per-entity counts of the last import (accounts, journalEntries, journalLines, contacts, accountBalances)", json_schema_extra={"label": "Letzte Import-Zaehler"})
|
lastSyncCounts: Optional[Dict[str, Any]] = Field(default=None, description="Last import summary: per-entity counts (accounts, journalEntries, journalLines, contacts, accountBalances) plus oldestBookingDate / newestBookingDate (ISO YYYY-MM-DD) for completeness verification", json_schema_extra={"label": "Letzte Import-Zaehler"})
|
||||||
cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})", json_schema_extra={"label": "Cached Kontoplan"})
|
cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})", json_schema_extra={"label": "Cached Kontoplan"})
|
||||||
chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt"})
|
chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt"})
|
||||||
mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}})
|
mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat", "fk_target": {"db": "poweron_app", "table": "Mandate"}})
|
||||||
|
|
|
||||||
|
|
@ -1864,13 +1864,87 @@ def clear_ai_data_cache(
|
||||||
instanceId: str = Path(..., description="Feature Instance ID"),
|
instanceId: str = Path(..., description="Feature Instance ID"),
|
||||||
context: RequestContext = Depends(getRequestContext),
|
context: RequestContext = Depends(getRequestContext),
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
"""Clear the AI feature-data query cache for this instance so the next AI query reads fresh DB data."""
|
"""Clear ONLY the AI feature-data query result cache (in-memory, ~5 min TTL).
|
||||||
|
|
||||||
|
Important: this does NOT touch the synchronised ``TrusteeData*`` tables.
|
||||||
|
The synced rows (chart of accounts, journal entries/lines, contacts, balances)
|
||||||
|
stay exactly as imported. To wipe those rows, use POST .../wipe-imported-data.
|
||||||
|
"""
|
||||||
_validateInstanceAccess(instanceId, context)
|
_validateInstanceAccess(instanceId, context)
|
||||||
from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
|
from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
|
||||||
removed = clearFeatureQueryCache(instanceId)
|
removed = clearFeatureQueryCache(instanceId)
|
||||||
return {"cleared": removed, "featureInstanceId": instanceId}
|
return {"cleared": removed, "featureInstanceId": instanceId}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{instanceId}/accounting/wipe-imported-data")
|
||||||
|
@limiter.limit("3/minute")
|
||||||
|
def wipe_imported_accounting_data(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature Instance ID"),
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Delete every ``TrusteeData*`` row imported for this feature instance.
|
||||||
|
|
||||||
|
Use when the source system was changed, test data needs to be cleared, or
|
||||||
|
the user suspects stale rows from earlier connector versions. Also resets
|
||||||
|
the ``lastSync*`` markers on the active config so the UI no longer reports
|
||||||
|
a stale "letzter Import" status. The connector configuration / credentials
|
||||||
|
remain untouched -- only synchronised payload data is removed.
|
||||||
|
"""
|
||||||
|
mandateId = _validateInstanceAccess(instanceId, context)
|
||||||
|
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
|
||||||
|
from .datamodelFeatureTrustee import (
|
||||||
|
TrusteeDataAccount, TrusteeDataJournalEntry, TrusteeDataJournalLine,
|
||||||
|
TrusteeDataContact, TrusteeDataAccountBalance, TrusteeAccountingConfig,
|
||||||
|
)
|
||||||
|
from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
|
||||||
|
|
||||||
|
removed: Dict[str, int] = {}
|
||||||
|
for tableName, model in [
|
||||||
|
("accounts", TrusteeDataAccount),
|
||||||
|
("journalEntries", TrusteeDataJournalEntry),
|
||||||
|
("journalLines", TrusteeDataJournalLine),
|
||||||
|
("contacts", TrusteeDataContact),
|
||||||
|
("accountBalances", TrusteeDataAccountBalance),
|
||||||
|
]:
|
||||||
|
try:
|
||||||
|
removed[tableName] = int(interface.db.recordDeleteWhere(model, {"featureInstanceId": instanceId}) or 0)
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("wipeImportedData: failed for %s: %s", tableName, ex)
|
||||||
|
removed[tableName] = 0
|
||||||
|
|
||||||
|
cfgRecords = interface.db.getRecordset(
|
||||||
|
TrusteeAccountingConfig,
|
||||||
|
recordFilter={"featureInstanceId": instanceId, "isActive": True},
|
||||||
|
)
|
||||||
|
if cfgRecords:
|
||||||
|
cfgId = cfgRecords[0].get("id")
|
||||||
|
if cfgId:
|
||||||
|
try:
|
||||||
|
interface.db.recordModify(TrusteeAccountingConfig, cfgId, {
|
||||||
|
"lastSyncAt": None,
|
||||||
|
"lastSyncStatus": None,
|
||||||
|
"lastSyncErrorMessage": None,
|
||||||
|
"lastSyncDateFrom": None,
|
||||||
|
"lastSyncDateTo": None,
|
||||||
|
"lastSyncCounts": None,
|
||||||
|
})
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("wipeImportedData: failed to reset lastSync* on cfg %s: %s", cfgId, ex)
|
||||||
|
|
||||||
|
cacheCleared = clearFeatureQueryCache(instanceId)
|
||||||
|
logger.info(
|
||||||
|
"wipeImportedData instance=%s removed=%s cacheCleared=%s",
|
||||||
|
instanceId, removed, cacheCleared,
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"removed": removed,
|
||||||
|
"totalRemoved": sum(removed.values()),
|
||||||
|
"cacheCleared": cacheCleared,
|
||||||
|
"featureInstanceId": instanceId,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# ===== Data Export =====
|
# ===== Data Export =====
|
||||||
|
|
||||||
@router.get("/{instanceId}/accounting/export-data")
|
@router.get("/{instanceId}/accounting/export-data")
|
||||||
|
|
|
||||||
|
|
@ -210,7 +210,18 @@ def _buildSystemTemplates():
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Täglicher Check", "parameters": {}},
|
{"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Täglicher Check", "parameters": {}},
|
||||||
{"id": "n2", "type": "email.checkEmail", "x": 300, "y": 200, "title": "Mailbox prüfen", "parameters": {}},
|
{"id": "n2", "type": "email.checkEmail", "x": 300, "y": 200, "title": "Mailbox prüfen", "parameters": {}},
|
||||||
{"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro E-Mail", "parameters": {}},
|
{
|
||||||
|
"id": "n3",
|
||||||
|
"type": "flow.loop",
|
||||||
|
"x": 550,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Pro E-Mail",
|
||||||
|
"parameters": {
|
||||||
|
"items": {"type": "ref", "nodeId": "n2", "path": ["emails"]},
|
||||||
|
"level": "auto",
|
||||||
|
"concurrency": 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
{"id": "n4", "type": "ai.prompt", "x": 800, "y": 200, "title": "Analyse: Antwort nötig?", "parameters": {}},
|
{"id": "n4", "type": "ai.prompt", "x": 800, "y": 200, "title": "Analyse: Antwort nötig?", "parameters": {}},
|
||||||
{"id": "n5", "type": "flow.ifElse", "x": 1050, "y": 200, "title": "Antwort nötig?", "parameters": {}},
|
{"id": "n5", "type": "flow.ifElse", "x": 1050, "y": 200, "title": "Antwort nötig?", "parameters": {}},
|
||||||
{"id": "n6", "type": "ai.prompt", "x": 1300, "y": 100, "title": "Kontext abrufen & Antwort formulieren", "parameters": {}},
|
{"id": "n6", "type": "ai.prompt", "x": 1300, "y": 100, "title": "Kontext abrufen & Antwort formulieren", "parameters": {}},
|
||||||
|
|
@ -239,7 +250,18 @@ def _buildSystemTemplates():
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Geplanter Import", "parameters": {}},
|
{"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Geplanter Import", "parameters": {}},
|
||||||
{"id": "n2", "type": "sharepoint.listFiles", "x": 300, "y": 200, "title": "SharePoint Ordner lesen", "parameters": {}},
|
{"id": "n2", "type": "sharepoint.listFiles", "x": 300, "y": 200, "title": "SharePoint Ordner lesen", "parameters": {}},
|
||||||
{"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro Dokument", "parameters": {}},
|
{
|
||||||
|
"id": "n3",
|
||||||
|
"type": "flow.loop",
|
||||||
|
"x": 550,
|
||||||
|
"y": 200,
|
||||||
|
"title": "Pro Dokument",
|
||||||
|
"parameters": {
|
||||||
|
"items": {"type": "ref", "nodeId": "n2", "path": ["files"]},
|
||||||
|
"level": "auto",
|
||||||
|
"concurrency": 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
{"id": "n4", "type": "sharepoint.readFile", "x": 800, "y": 200, "title": "PDF-Inhalt lesen", "parameters": {}},
|
{"id": "n4", "type": "sharepoint.readFile", "x": 800, "y": 200, "title": "PDF-Inhalt lesen", "parameters": {}},
|
||||||
{"id": "n5", "type": "ai.prompt", "x": 1050, "y": 200, "title": "Typ klassifizieren (Rechnung, Beleg, Bankauszug, Vertrag, etc.)", "parameters": {}},
|
{"id": "n5", "type": "ai.prompt", "x": 1050, "y": 200, "title": "Typ klassifizieren (Rechnung, Beleg, Bankauszug, Vertrag, etc.)", "parameters": {}},
|
||||||
{"id": "n6", "type": "trustee.extractFromFiles", "x": 1300, "y": 200, "title": "Dokument extrahieren", "parameters": {}},
|
{"id": "n6", "type": "trustee.extractFromFiles", "x": 1300, "y": 200, "title": "Dokument extrahieren", "parameters": {}},
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
"""ActionToolAdapter: wraps existing workflow actions (dynamicMode=True) as agent tools."""
|
"""ActionToolAdapter: wraps existing workflow actions (dynamicMode=True) as agent tools."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, Any, List, Optional
|
from typing import Dict, Any, List
|
||||||
|
|
||||||
from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
|
from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
|
||||||
ToolDefinition, ToolResult
|
ToolDefinition, ToolResult
|
||||||
|
|
@ -70,22 +70,28 @@ def _buildToolDefinition(compoundName: str, actionDef, actionInfo: Dict[str, Any
|
||||||
|
|
||||||
|
|
||||||
def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]:
|
def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
"""Convert workflow action parameter schema to JSON Schema for tool definitions."""
|
"""Convert workflow action parameter schema to JSON Schema for tool definitions.
|
||||||
properties = {}
|
|
||||||
required = []
|
Schicht-3 Adapter (typed): looks up each parameter's `type` against the
|
||||||
|
PORT_TYPE_CATALOG and produces a strict JSON Schema fragment.
|
||||||
|
Falls back to a generic string schema only when the type is fully unknown
|
||||||
|
(which should never happen after Phase 2's signature validator).
|
||||||
|
"""
|
||||||
|
properties: Dict[str, Any] = {}
|
||||||
|
required: List[str] = []
|
||||||
|
|
||||||
for paramName, paramInfo in actionParams.items():
|
for paramName, paramInfo in actionParams.items():
|
||||||
paramType = paramInfo.get("type", "str") if isinstance(paramInfo, dict) else "str"
|
if not isinstance(paramInfo, dict):
|
||||||
paramDesc = paramInfo.get("description", "") if isinstance(paramInfo, dict) else ""
|
properties[paramName] = {"type": "string", "description": ""}
|
||||||
paramRequired = paramInfo.get("required", False) if isinstance(paramInfo, dict) else False
|
continue
|
||||||
|
|
||||||
jsonType = _pythonTypeToJsonType(paramType)
|
paramType = paramInfo.get("type", "str")
|
||||||
prop: Dict[str, Any] = {
|
paramDesc = paramInfo.get("description", "") or ""
|
||||||
"type": jsonType,
|
paramRequired = bool(paramInfo.get("required", False))
|
||||||
"description": paramDesc,
|
|
||||||
}
|
prop = _catalogTypeToJsonSchema(paramType)
|
||||||
if jsonType == "array":
|
if paramDesc:
|
||||||
prop["items"] = _pythonTypeToArrayItems(paramType) or {"type": "string"}
|
prop["description"] = paramDesc
|
||||||
properties[paramName] = prop
|
properties[paramName] = prop
|
||||||
|
|
||||||
if paramRequired:
|
if paramRequired:
|
||||||
|
|
@ -94,41 +100,90 @@ def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": properties,
|
"properties": properties,
|
||||||
"required": required
|
"required": required,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
_TYPE_MAPPING = {
|
# Primitive Python type strings → JSON Schema scalar types.
|
||||||
|
_PRIMITIVE_JSON_TYPE: Dict[str, str] = {
|
||||||
"str": "string",
|
"str": "string",
|
||||||
"int": "integer",
|
"int": "integer",
|
||||||
"float": "number",
|
"float": "number",
|
||||||
"bool": "boolean",
|
"bool": "boolean",
|
||||||
"list": "array",
|
|
||||||
"dict": "object",
|
|
||||||
"List[str]": "array",
|
|
||||||
"List[int]": "array",
|
|
||||||
"List[dict]": "array",
|
|
||||||
"List[float]": "array",
|
|
||||||
"Dict[str, Any]": "object",
|
|
||||||
}
|
|
||||||
|
|
||||||
_ARRAY_ITEMS_MAPPING = {
|
|
||||||
"list": {"type": "string"},
|
|
||||||
"List[str]": {"type": "string"},
|
|
||||||
"List[int]": {"type": "integer"},
|
|
||||||
"List[float]": {"type": "number"},
|
|
||||||
"List[dict]": {"type": "object"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def _pythonTypeToJsonType(pythonType: str) -> str:
|
def _catalogTypeToJsonSchema(typeStr: str, _depth: int = 0) -> Dict[str, Any]:
|
||||||
"""Map Python type strings to JSON Schema types."""
|
"""Recursively convert a PORT_TYPE_CATALOG type reference into a JSON Schema fragment.
|
||||||
return _TYPE_MAPPING.get(pythonType, "string")
|
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Primitives (str/int/bool/float/Any)
|
||||||
|
- Catalog object schemas (recursively expanded with properties/required)
|
||||||
|
- List[X] (array with typed items)
|
||||||
|
- Dict[K, V] (object with typed additionalProperties)
|
||||||
|
|
||||||
def _pythonTypeToArrayItems(pythonType: str) -> Optional[Dict[str, Any]]:
|
`_depth` guards against pathological recursion in case of a cyclic catalog.
|
||||||
"""Return the JSON Schema `items` descriptor for array types, or None."""
|
"""
|
||||||
return _ARRAY_ITEMS_MAPPING.get(pythonType)
|
from modules.features.graphicalEditor.portTypes import (
|
||||||
|
PORT_TYPE_CATALOG,
|
||||||
|
PRIMITIVE_TYPES,
|
||||||
|
)
|
||||||
|
|
||||||
|
if _depth > 6:
|
||||||
|
return {"type": "object", "description": "(max-depth)"}
|
||||||
|
|
||||||
|
if not typeStr or not isinstance(typeStr, str):
|
||||||
|
return {"type": "string"}
|
||||||
|
|
||||||
|
typeStr = typeStr.strip()
|
||||||
|
|
||||||
|
if typeStr in _PRIMITIVE_JSON_TYPE:
|
||||||
|
return {"type": _PRIMITIVE_JSON_TYPE[typeStr]}
|
||||||
|
if typeStr == "Any":
|
||||||
|
return {}
|
||||||
|
|
||||||
|
if typeStr.startswith("List[") and typeStr.endswith("]"):
|
||||||
|
inner = typeStr[5:-1].strip()
|
||||||
|
return {"type": "array", "items": _catalogTypeToJsonSchema(inner, _depth + 1)}
|
||||||
|
|
||||||
|
if typeStr.startswith("Dict[") and typeStr.endswith("]"):
|
||||||
|
inner = typeStr[5:-1].strip()
|
||||||
|
valueType = "Any"
|
||||||
|
parts = [p.strip() for p in inner.split(",", 1)]
|
||||||
|
if len(parts) == 2:
|
||||||
|
valueType = parts[1]
|
||||||
|
return {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": _catalogTypeToJsonSchema(valueType, _depth + 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
schema = PORT_TYPE_CATALOG.get(typeStr)
|
||||||
|
if schema is not None:
|
||||||
|
props: Dict[str, Any] = {}
|
||||||
|
required: List[str] = []
|
||||||
|
for f in schema.fields:
|
||||||
|
fragment = _catalogTypeToJsonSchema(f.type, _depth + 1)
|
||||||
|
if f.description:
|
||||||
|
fragment["description"] = f.description
|
||||||
|
if f.enumValues:
|
||||||
|
fragment["enum"] = list(f.enumValues)
|
||||||
|
props[f.name] = fragment
|
||||||
|
if f.required:
|
||||||
|
required.append(f.name)
|
||||||
|
out: Dict[str, Any] = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": props,
|
||||||
|
"description": f"PORT_TYPE_CATALOG schema '{schema.name}'",
|
||||||
|
}
|
||||||
|
if required:
|
||||||
|
out["required"] = required
|
||||||
|
return out
|
||||||
|
|
||||||
|
# Lowercase 'list' / 'dict' aliases (legacy, should be eradicated by Phase 2 validator)
|
||||||
|
if typeStr in PRIMITIVE_TYPES and typeStr in {"List", "Dict"}:
|
||||||
|
return {"type": "array" if typeStr == "List" else "object"}
|
||||||
|
|
||||||
|
return {"type": "string", "description": f"unknown type '{typeStr}' (defaulted to string)"}
|
||||||
|
|
||||||
|
|
||||||
def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
|
def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
|
||||||
|
|
|
||||||
|
|
@ -291,6 +291,85 @@ async def _setNodeParameter(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||||
return _err(name, str(e))
|
return _err(name, str(e))
|
||||||
|
|
||||||
|
|
||||||
|
async def _list_upstream_paths(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||||
|
"""List pickable upstream DataRef paths for a node (saved workflow graph)."""
|
||||||
|
name = "listUpstreamPaths"
|
||||||
|
try:
|
||||||
|
workflow_id, instance_id = _resolveIds(params, context)
|
||||||
|
node_id = params.get("nodeId")
|
||||||
|
if not workflow_id or not instance_id or not node_id:
|
||||||
|
return _err(name, "workflowId, instanceId, and nodeId required")
|
||||||
|
|
||||||
|
iface = _getInterface(context, instance_id)
|
||||||
|
wf = iface.getWorkflow(workflow_id)
|
||||||
|
if not wf:
|
||||||
|
return _err(name, f"Workflow {workflow_id} not found")
|
||||||
|
|
||||||
|
graph = wf.get("graph", {}) or {}
|
||||||
|
from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths
|
||||||
|
|
||||||
|
paths = compute_upstream_paths(graph if isinstance(graph, dict) else {}, str(node_id))
|
||||||
|
return _ok(name, {"paths": paths})
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("listUpstreamPaths failed: %s", e)
|
||||||
|
return _err(name, str(e))
|
||||||
|
|
||||||
|
|
||||||
|
async def _bind_node_parameter(params: Dict[str, Any], context: Any) -> ToolResult:
|
||||||
|
"""Bind a node parameter to an upstream field via an explicit DataRef."""
|
||||||
|
name = "bindNodeParameter"
|
||||||
|
try:
|
||||||
|
workflow_id, instance_id = _resolveIds(params, context)
|
||||||
|
node_id = params.get("nodeId")
|
||||||
|
param_name = params.get("parameterName")
|
||||||
|
producer_node_id = params.get("producerNodeId")
|
||||||
|
path = params.get("path")
|
||||||
|
if not workflow_id or not instance_id or not node_id or not param_name:
|
||||||
|
return _err(name, "workflowId, instanceId, nodeId, and parameterName required")
|
||||||
|
if not producer_node_id:
|
||||||
|
return _err(name, "producerNodeId required")
|
||||||
|
|
||||||
|
iface = _getInterface(context, instance_id)
|
||||||
|
wf = iface.getWorkflow(workflow_id)
|
||||||
|
if not wf:
|
||||||
|
return _err(name, f"Workflow {workflow_id} not found")
|
||||||
|
|
||||||
|
graph = dict(wf.get("graph", {}) or {})
|
||||||
|
nodes = list(graph.get("nodes", []) or [])
|
||||||
|
found = False
|
||||||
|
ref: Dict[str, Any] = {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": str(producer_node_id),
|
||||||
|
"path": list(path) if isinstance(path, (list, tuple)) else [],
|
||||||
|
}
|
||||||
|
exp_type = params.get("expectedType")
|
||||||
|
if exp_type:
|
||||||
|
ref["expectedType"] = str(exp_type)
|
||||||
|
|
||||||
|
for n in nodes:
|
||||||
|
if n.get("id") == node_id:
|
||||||
|
node_params = dict(n.get("parameters", {}) or {})
|
||||||
|
node_params[param_name] = ref
|
||||||
|
n["parameters"] = node_params
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return _err(name, f"Node {node_id} not found in graph")
|
||||||
|
|
||||||
|
graph["nodes"] = nodes
|
||||||
|
iface.updateWorkflow(workflow_id, {"graph": graph})
|
||||||
|
return _ok(name, {
|
||||||
|
"nodeId": node_id,
|
||||||
|
"parameter": param_name,
|
||||||
|
"dataRef": ref,
|
||||||
|
"message": f"Parameter '{param_name}' bound to upstream {producer_node_id}",
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("bindNodeParameter failed: %s", e)
|
||||||
|
return _err(name, str(e))
|
||||||
|
|
||||||
|
|
||||||
def _coerceLabel(rawLabel: Any, fallback: str) -> str:
|
def _coerceLabel(rawLabel: Any, fallback: str) -> str:
|
||||||
"""Normalize a node label which may be a string, dict {locale: str}, or other."""
|
"""Normalize a node label which may be a string, dict {locale: str}, or other."""
|
||||||
if isinstance(rawLabel, str):
|
if isinstance(rawLabel, str):
|
||||||
|
|
@ -950,6 +1029,45 @@ def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
|
||||||
},
|
},
|
||||||
"toolSet": TOOLBOX_ID,
|
"toolSet": TOOLBOX_ID,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "listUpstreamPaths",
|
||||||
|
"handler": _list_upstream_paths,
|
||||||
|
"description": (
|
||||||
|
"List pickable upstream paths for binding DataRefs on a node. "
|
||||||
|
"Call after readWorkflowGraph; use with bindNodeParameter instead of relying on implicit wiring."
|
||||||
|
),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
**_idFields,
|
||||||
|
"nodeId": {"type": "string", "description": "Target node id (the node whose parameters you will bind)"},
|
||||||
|
},
|
||||||
|
"required": ["nodeId"],
|
||||||
|
},
|
||||||
|
"readOnly": True,
|
||||||
|
"toolSet": TOOLBOX_ID,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "bindNodeParameter",
|
||||||
|
"handler": _bind_node_parameter,
|
||||||
|
"description": (
|
||||||
|
"Bind a parameter to an upstream output using an explicit DataRef "
|
||||||
|
"(producerNodeId + path). Prefer listUpstreamPaths to discover valid paths."
|
||||||
|
),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
**_idFields,
|
||||||
|
"nodeId": {"type": "string"},
|
||||||
|
"parameterName": {"type": "string"},
|
||||||
|
"producerNodeId": {"type": "string", "description": "Upstream node id (port 0 producer)"},
|
||||||
|
"path": {"type": "array", "items": {}, "description": "JSON path segments, e.g. [\"documents\"] or [\"id\"]"},
|
||||||
|
"expectedType": {"type": "string", "description": "Optional type hint stored on the ref"},
|
||||||
|
},
|
||||||
|
"required": ["nodeId", "parameterName", "producerNodeId"],
|
||||||
|
},
|
||||||
|
"toolSet": TOOLBOX_ID,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "listAvailableNodeTypes",
|
"name": "listAvailableNodeTypes",
|
||||||
"handler": _listAvailableNodeTypes,
|
"handler": _listAvailableNodeTypes,
|
||||||
|
|
|
||||||
|
|
@ -520,7 +520,28 @@ STOP-ERKENNUNG:
|
||||||
Wenn jemand dich bittet aufzuhoeren, still zu sein, zu stoppen, oder nicht mehr zu reden
|
Wenn jemand dich bittet aufzuhoeren, still zu sein, zu stoppen, oder nicht mehr zu reden
|
||||||
(in JEDER Sprache, z.B. "{botFirstName} stop", "{botFirstName} sei still", "{botFirstName} halt", "{botFirstName} be quiet",
|
(in JEDER Sprache, z.B. "{botFirstName} stop", "{botFirstName} sei still", "{botFirstName} halt", "{botFirstName} be quiet",
|
||||||
"{botFirstName} shut up", "{botFirstName} arrete", etc.), dann setze detectedIntent auf "stop" und
|
"{botFirstName} shut up", "{botFirstName} arrete", etc.), dann setze detectedIntent auf "stop" und
|
||||||
shouldRespond auf false. Du musst NICHT antworten wenn jemand dich stoppt."""
|
shouldRespond auf false. Du musst NICHT antworten wenn jemand dich stoppt.
|
||||||
|
|
||||||
|
AGENT-ESKALATION (needsAgent):
|
||||||
|
Du bist ein SCHNELLER Reflex-Pfad. Fuer komplexe Aufgaben gibt es einen vollwertigen Agent
|
||||||
|
mit Web-Recherche, E-Mail-Versand, Dokumenten-Erzeugung und Datenquellen-Zugriff
|
||||||
|
(SharePoint, Outlook, GoogleDrive etc. via User-Connections).
|
||||||
|
|
||||||
|
Setze "needsAgent": true und "agentReason": "<kurze Beschreibung der Aufgabe in einem Satz>"
|
||||||
|
WENN die Aufgabe eines oder mehrere dieser Merkmale hat:
|
||||||
|
- Recherche im Internet noetig (z.B. "recherchier was im Internet ueber XY", "schau mal nach", "google das")
|
||||||
|
- E-Mail an Teilnehmer/Kontakte versenden
|
||||||
|
- Dokument (PDF, Word, Excel) generieren oder im SharePoint/Drive ablegen
|
||||||
|
- Mehrere Schritte oder Tool-Aufrufe noetig (Zusammenfassung + Versand, Recherche + Empfehlung etc.)
|
||||||
|
- Daten aus externen Quellen abrufen (Outlook-Kontakte, SharePoint-Dateien, Kalender etc.)
|
||||||
|
|
||||||
|
Wenn needsAgent=true:
|
||||||
|
- Setze shouldRespond=false (der Agent uebernimmt; du sprichst NICHT eigenstaendig).
|
||||||
|
- responseText kann eine kurze Bestaetigung sein, wird aber nicht ausgesprochen.
|
||||||
|
- agentReason ist die Aufgabenbeschreibung fuer den Agent (klar, in einer Zeile).
|
||||||
|
|
||||||
|
Wenn die Aufgabe einfach ist (Definition, Wissensfrage aus eigenem Wissen, kurze Meinung,
|
||||||
|
Wiedergabe von vorhandenem Kontext), erledige sie SELBST mit shouldRespond=true und needsAgent=false."""
|
||||||
|
|
||||||
# Append user-configured instructions if provided
|
# Append user-configured instructions if provided
|
||||||
if userSystemPrompt and userSystemPrompt.strip():
|
if userSystemPrompt and userSystemPrompt.strip():
|
||||||
|
|
@ -546,7 +567,9 @@ WICHTIG: Antworte IMMER als valides JSON in exakt diesem Format:
|
||||||
"responseChannels": optional - ["voice"], ["chat"] oder ["voice","chat"] je nach User-Anfrage,
|
"responseChannels": optional - ["voice"], ["chat"] oder ["voice","chat"] je nach User-Anfrage,
|
||||||
"reasoning": "Kurze Begruendung deiner Entscheidung",
|
"reasoning": "Kurze Begruendung deiner Entscheidung",
|
||||||
"detectedIntent": "addressed" | "question" | "proactive" | "stop" | "none",
|
"detectedIntent": "addressed" | "question" | "proactive" | "stop" | "none",
|
||||||
"commands": [] oder null
|
"commands": [] oder null,
|
||||||
|
"needsAgent": false (true nur bei komplexen Aufgaben gemaess Eskalations-Regeln),
|
||||||
|
"agentReason": null (oder kurze Aufgabenbeschreibung wenn needsAgent=true)
|
||||||
}}
|
}}
|
||||||
|
|
||||||
detectedIntent-Werte:
|
detectedIntent-Werte:
|
||||||
|
|
|
||||||
|
|
@ -77,7 +77,11 @@ def _outputSchemaForNode(nodeType: str) -> Optional[str]:
|
||||||
if isinstance(ports, dict):
|
if isinstance(ports, dict):
|
||||||
p0 = ports.get(0) or ports.get("0")
|
p0 = ports.get(0) or ports.get("0")
|
||||||
if isinstance(p0, dict):
|
if isinstance(p0, dict):
|
||||||
return p0.get("schema")
|
spec = p0.get("schema")
|
||||||
|
if isinstance(spec, dict) and spec.get("kind") == "fromGraph":
|
||||||
|
return "FormPayload"
|
||||||
|
if isinstance(spec, str):
|
||||||
|
return spec
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -329,6 +333,15 @@ async def executeGraph(
|
||||||
)
|
)
|
||||||
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
|
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
|
||||||
discoverMethods(services)
|
discoverMethods(services)
|
||||||
|
from modules.workflows.automation2.pickNotPushMigration import materializeConnectionRefs
|
||||||
|
from modules.workflows.automation2.featureInstanceRefMigration import (
|
||||||
|
materializeFeatureInstanceRefs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Phase-5 Schicht-4: typed-ref envelopes are materialized FIRST so the
|
||||||
|
# subsequent connection-ref pass and validation see the canonical shape.
|
||||||
|
graph = materializeFeatureInstanceRefs(graph)
|
||||||
|
graph = materializeConnectionRefs(graph)
|
||||||
nodeTypeIds = _getNodeTypeIds(services)
|
nodeTypeIds = _getNodeTypeIds(services)
|
||||||
logger.debug("executeGraph nodeTypeIds (%d): %s", len(nodeTypeIds), sorted(nodeTypeIds))
|
logger.debug("executeGraph nodeTypeIds (%d): %s", len(nodeTypeIds), sorted(nodeTypeIds))
|
||||||
errors = validateGraph(graph, nodeTypeIds)
|
errors = validateGraph(graph, nodeTypeIds)
|
||||||
|
|
|
||||||
|
|
@ -1,19 +1,17 @@
|
||||||
# Copyright (c) 2025 Patrick Motsch
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions.
|
# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions.
|
||||||
#
|
#
|
||||||
# Typed Port System: no heuristic merging. Uses INPUT_EXTRACTORS for wire-handover,
|
# Typed Port System: explicit DataRefs / static parameters only (no runtime wire-handover).
|
||||||
# DataRef for explicit parameter mapping, and _normalizeToSchema for output normalization.
|
# ``materializeConnectionRefs`` (see pickNotPushMigration) may still rewrite empty connectionReference at run start.
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
from typing import Dict, Any, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
from modules.features.graphicalEditor.portTypes import (
|
from modules.features.graphicalEditor.portTypes import (
|
||||||
INPUT_EXTRACTORS,
|
|
||||||
_normalizeToSchema,
|
|
||||||
_normalizeError,
|
_normalizeError,
|
||||||
_unwrapTransit,
|
_normalizeToSchema,
|
||||||
)
|
)
|
||||||
from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionInactiveException as _SubscriptionInactiveException
|
from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionInactiveException as _SubscriptionInactiveException
|
||||||
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import BillingContextError as _BillingContextError
|
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import BillingContextError as _BillingContextError
|
||||||
|
|
@ -119,6 +117,63 @@ def _buildSearchQuery(
|
||||||
return " ".join(parts) if parts else "*"
|
return " ".join(parts) if parts else "*"
|
||||||
|
|
||||||
|
|
||||||
|
def _buildConnectionRefDict(connRef: str, chatService, services) -> Optional[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Build {id, authority, label} for node outputs (no secrets).
|
||||||
|
connRef may be UUID or logical connection:authority:user.
|
||||||
|
"""
|
||||||
|
if not connRef or not isinstance(connRef, str):
|
||||||
|
return None
|
||||||
|
original_ref = connRef.strip()
|
||||||
|
ref = original_ref
|
||||||
|
if _isUserConnectionId(ref):
|
||||||
|
resolved = _resolveConnectionIdToReference(chatService, ref, services)
|
||||||
|
if resolved:
|
||||||
|
ref = resolved
|
||||||
|
if not ref.startswith("connection:"):
|
||||||
|
return None
|
||||||
|
parts = ref.split(":", 2)
|
||||||
|
authority = parts[1] if len(parts) > 1 else ""
|
||||||
|
user = parts[2] if len(parts) > 2 else ""
|
||||||
|
label = ref
|
||||||
|
conn_id = ""
|
||||||
|
if chatService:
|
||||||
|
try:
|
||||||
|
for c in chatService.getUserConnections() or []:
|
||||||
|
conn = c if isinstance(c, dict) else (c.model_dump() if hasattr(c, "model_dump") else {})
|
||||||
|
aid = conn.get("authority", "")
|
||||||
|
if hasattr(aid, "value"):
|
||||||
|
aid = aid.value
|
||||||
|
un = conn.get("externalUsername", "") or conn.get("externalId", "") or ""
|
||||||
|
logical = f"connection:{aid}:{un}"
|
||||||
|
if logical == ref or str(conn.get("id")) == original_ref:
|
||||||
|
conn_id = str(conn.get("id", "") or "")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug("_buildConnectionRefDict: getUserConnections: %s", e)
|
||||||
|
return {"id": conn_id, "authority": authority, "label": label or f"{authority}:{user}"}
|
||||||
|
|
||||||
|
|
||||||
|
def _attachConnectionProvenance(
|
||||||
|
out: Dict[str, Any],
|
||||||
|
resolvedParams: Dict[str, Any],
|
||||||
|
outputSchema: str,
|
||||||
|
chatService,
|
||||||
|
services,
|
||||||
|
) -> None:
|
||||||
|
"""Mutates out to include connection provenance for typed list/draft outputs."""
|
||||||
|
if out.get("connection"):
|
||||||
|
return
|
||||||
|
cref = resolvedParams.get("connectionReference")
|
||||||
|
if not cref:
|
||||||
|
return
|
||||||
|
if outputSchema not in ("FileList", "DocumentList", "EmailList", "TaskList", "EmailDraft", "UdmDocument"):
|
||||||
|
return
|
||||||
|
payload = _buildConnectionRefDict(str(cref), chatService, services)
|
||||||
|
if payload:
|
||||||
|
out["connection"] = payload
|
||||||
|
|
||||||
|
|
||||||
def _resolveConnectionParam(params: Dict, chatService, services) -> None:
|
def _resolveConnectionParam(params: Dict, chatService, services) -> None:
|
||||||
"""Resolve connectionReference if it looks like a UUID (UserConnection.id)."""
|
"""Resolve connectionReference if it looks like a UUID (UserConnection.id)."""
|
||||||
connRef = params.get("connectionReference")
|
connRef = params.get("connectionReference")
|
||||||
|
|
@ -157,45 +212,6 @@ def _applyEmailSearchQuery(params: Dict) -> None:
|
||||||
params.pop(k, None)
|
params.pop(k, None)
|
||||||
|
|
||||||
|
|
||||||
def _wireHandover(nodeDef: Dict, inputSources: Dict, nodeOutputs: Dict, params: Dict) -> None:
|
|
||||||
"""Apply wire-handover: extract fields from upstream using INPUT_EXTRACTORS."""
|
|
||||||
if 0 not in inputSources:
|
|
||||||
logger.debug("_wireHandover: no port 0 in inputSources=%s", inputSources)
|
|
||||||
return
|
|
||||||
srcId, _ = inputSources[0]
|
|
||||||
upstream = nodeOutputs.get(srcId)
|
|
||||||
if not upstream or not isinstance(upstream, dict):
|
|
||||||
logger.debug("_wireHandover: upstream for %s is missing or not dict: %s", srcId, type(upstream))
|
|
||||||
return
|
|
||||||
|
|
||||||
data = _unwrapTransit(upstream)
|
|
||||||
if not isinstance(data, dict):
|
|
||||||
logger.debug("_wireHandover: unwrapped data is not dict: %s", type(data))
|
|
||||||
return
|
|
||||||
|
|
||||||
inputPorts = nodeDef.get("inputPorts", {})
|
|
||||||
port0 = inputPorts.get(0, {})
|
|
||||||
accepts = port0.get("accepts", [])
|
|
||||||
logger.debug("_wireHandover: srcId=%s accepts=%s upstream_keys=%s params_keys_before=%s", srcId, accepts, list(data.keys()), list(params.keys()))
|
|
||||||
|
|
||||||
for schemaName in accepts:
|
|
||||||
if schemaName == "Transit":
|
|
||||||
continue
|
|
||||||
extractor = INPUT_EXTRACTORS.get(schemaName)
|
|
||||||
if extractor:
|
|
||||||
extracted = extractor(data)
|
|
||||||
logger.debug("_wireHandover: extractor %s returned keys=%s", schemaName, list(extracted.keys()) if extracted else None)
|
|
||||||
if extracted:
|
|
||||||
for k, v in extracted.items():
|
|
||||||
existing = params.get(k)
|
|
||||||
if not existing:
|
|
||||||
params[k] = v
|
|
||||||
logger.debug("_wireHandover: set %s (was empty/missing) type=%s len=%s", k, type(v).__name__, len(v) if isinstance(v, (list, str, dict)) else "n/a")
|
|
||||||
else:
|
|
||||||
logger.debug("_wireHandover: skip %s (already has value, type=%s)", k, type(existing).__name__)
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def _getOutputSchemaName(nodeDef: Dict) -> str:
|
def _getOutputSchemaName(nodeDef: Dict) -> str:
|
||||||
"""Get the output schema name from the node definition."""
|
"""Get the output schema name from the node definition."""
|
||||||
outputPorts = nodeDef.get("outputPorts", {})
|
outputPorts = nodeDef.get("outputPorts", {})
|
||||||
|
|
@ -238,22 +254,17 @@ class ActionNodeExecutor:
|
||||||
resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {}))
|
resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {}))
|
||||||
logger.debug("ActionNodeExecutor node %s resolved params keys=%s documentList_present=%s documentList_type=%s", nodeId, list(resolvedParams.keys()), "documentList" in resolvedParams, type(resolvedParams.get("documentList")).__name__)
|
logger.debug("ActionNodeExecutor node %s resolved params keys=%s documentList_present=%s documentList_type=%s", nodeId, list(resolvedParams.keys()), "documentList" in resolvedParams, type(resolvedParams.get("documentList")).__name__)
|
||||||
|
|
||||||
# 2. Wire-handover via extractors (fills missing params from upstream)
|
# 2. Apply defaults from parameter definitions
|
||||||
inputSources = context.get("inputSources", {}).get(nodeId, {})
|
|
||||||
_wireHandover(nodeDef, inputSources, context.get("nodeOutputs", {}), resolvedParams)
|
|
||||||
logger.debug("ActionNodeExecutor node %s after wireHandover: params keys=%s documentList_present=%s documentList_type=%s", nodeId, list(resolvedParams.keys()), "documentList" in resolvedParams, type(resolvedParams.get("documentList")).__name__)
|
|
||||||
|
|
||||||
# 3. Apply defaults from parameter definitions
|
|
||||||
for pDef in nodeDef.get("parameters", []):
|
for pDef in nodeDef.get("parameters", []):
|
||||||
pName = pDef.get("name")
|
pName = pDef.get("name")
|
||||||
if pName and pName not in resolvedParams and "default" in pDef:
|
if pName and pName not in resolvedParams and "default" in pDef:
|
||||||
resolvedParams[pName] = pDef["default"]
|
resolvedParams[pName] = pDef["default"]
|
||||||
|
|
||||||
# 4. Resolve connectionReference
|
# 3. Resolve connectionReference
|
||||||
chatService = getattr(self.services, "chat", None)
|
chatService = getattr(self.services, "chat", None)
|
||||||
_resolveConnectionParam(resolvedParams, chatService, self.services)
|
_resolveConnectionParam(resolvedParams, chatService, self.services)
|
||||||
|
|
||||||
# 5. Node-type-specific param transformations
|
# 4. Node-type-specific param transformations
|
||||||
if nodeType == "email.checkEmail":
|
if nodeType == "email.checkEmail":
|
||||||
_applyEmailCheckFilter(resolvedParams)
|
_applyEmailCheckFilter(resolvedParams)
|
||||||
elif nodeType == "email.searchEmail":
|
elif nodeType == "email.searchEmail":
|
||||||
|
|
@ -262,7 +273,7 @@ class ActionNodeExecutor:
|
||||||
from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries
|
from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries
|
||||||
merge_clickup_task_update_entries(resolvedParams)
|
merge_clickup_task_update_entries(resolvedParams)
|
||||||
|
|
||||||
# 6. email.checkEmail pause for email wait
|
# 5. email.checkEmail pause for email wait
|
||||||
if nodeType == "email.checkEmail":
|
if nodeType == "email.checkEmail":
|
||||||
runId = context.get("_runId")
|
runId = context.get("_runId")
|
||||||
workflowId = context.get("workflowId")
|
workflowId = context.get("workflowId")
|
||||||
|
|
@ -277,12 +288,12 @@ class ActionNodeExecutor:
|
||||||
}
|
}
|
||||||
raise PauseForEmailWaitError(runId=runId, nodeId=nodeId, waitConfig=waitConfig)
|
raise PauseForEmailWaitError(runId=runId, nodeId=nodeId, waitConfig=waitConfig)
|
||||||
|
|
||||||
# 7. AI nodes: normalize legacy "prompt" -> "aiPrompt"
|
# 6. AI nodes: normalize legacy "prompt" -> "aiPrompt"
|
||||||
if nodeType == "ai.prompt":
|
if nodeType == "ai.prompt":
|
||||||
if "aiPrompt" not in resolvedParams and "prompt" in resolvedParams:
|
if "aiPrompt" not in resolvedParams and "prompt" in resolvedParams:
|
||||||
resolvedParams["aiPrompt"] = resolvedParams.pop("prompt")
|
resolvedParams["aiPrompt"] = resolvedParams.pop("prompt")
|
||||||
|
|
||||||
# 8. Build context for email.draftEmail from subject + body
|
# 7. Build context for email.draftEmail from subject + body
|
||||||
if nodeType == "email.draftEmail":
|
if nodeType == "email.draftEmail":
|
||||||
subject = resolvedParams.get("subject", "")
|
subject = resolvedParams.get("subject", "")
|
||||||
body = resolvedParams.get("body", "")
|
body = resolvedParams.get("body", "")
|
||||||
|
|
@ -296,7 +307,7 @@ class ActionNodeExecutor:
|
||||||
resolvedParams.pop("subject", None)
|
resolvedParams.pop("subject", None)
|
||||||
resolvedParams.pop("body", None)
|
resolvedParams.pop("body", None)
|
||||||
|
|
||||||
# 9. Execute action
|
# 8. Execute action
|
||||||
logger.info("ActionNodeExecutor node %s calling %s.%s with %d params", nodeId, methodName, actionName, len(resolvedParams))
|
logger.info("ActionNodeExecutor node %s calling %s.%s with %d params", nodeId, methodName, actionName, len(resolvedParams))
|
||||||
try:
|
try:
|
||||||
executor = ActionExecutor(self.services)
|
executor = ActionExecutor(self.services)
|
||||||
|
|
@ -307,7 +318,7 @@ class ActionNodeExecutor:
|
||||||
logger.exception("ActionNodeExecutor node %s FAILED: %s", nodeId, e)
|
logger.exception("ActionNodeExecutor node %s FAILED: %s", nodeId, e)
|
||||||
return _normalizeError(e, outputSchema)
|
return _normalizeError(e, outputSchema)
|
||||||
|
|
||||||
# 10. Persist generated documents as files and build JSON-safe output
|
# 9. Persist generated documents as files and build JSON-safe output
|
||||||
docsList = []
|
docsList = []
|
||||||
for d in (result.documents or []):
|
for d in (result.documents or []):
|
||||||
dumped = d.model_dump() if hasattr(d, "model_dump") else dict(d) if isinstance(d, dict) else d
|
dumped = d.model_dump() if hasattr(d, "model_dump") else dict(d) if isinstance(d, dict) else d
|
||||||
|
|
@ -360,7 +371,6 @@ class ActionNodeExecutor:
|
||||||
"success": result.success,
|
"success": result.success,
|
||||||
"error": result.error,
|
"error": result.error,
|
||||||
"documents": docsList,
|
"documents": docsList,
|
||||||
"documentList": docsList,
|
|
||||||
"data": dataField,
|
"data": dataField,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -396,6 +406,8 @@ class ActionNodeExecutor:
|
||||||
"mode": data_dict.get("mode", resolvedParams.get("mode", "summarize")),
|
"mode": data_dict.get("mode", resolvedParams.get("mode", "summarize")),
|
||||||
"count": int(data_dict.get("count", 0)),
|
"count": int(data_dict.get("count", 0)),
|
||||||
}
|
}
|
||||||
|
_attachConnectionProvenance(cr_out, resolvedParams, outputSchema, chatService, self.services)
|
||||||
return _normalizeToSchema(cr_out, outputSchema)
|
return _normalizeToSchema(cr_out, outputSchema)
|
||||||
|
|
||||||
|
_attachConnectionProvenance(out, resolvedParams, outputSchema, chatService, self.services)
|
||||||
return _normalizeToSchema(out, outputSchema)
|
return _normalizeToSchema(out, outputSchema)
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
# Copyright (c) 2025 Patrick Motsch
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
# Data manipulation node executor: data.aggregate, data.transform, data.filter.
|
# Data manipulation node executor: data.aggregate, data.filter, data.consolidate.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
@ -10,7 +10,7 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DataExecutor:
|
class DataExecutor:
|
||||||
"""Execute data.aggregate, data.transform, data.filter nodes."""
|
"""Execute data.aggregate, data.filter, data.consolidate nodes."""
|
||||||
|
|
||||||
async def execute(
|
async def execute(
|
||||||
self,
|
self,
|
||||||
|
|
@ -26,8 +26,6 @@ class DataExecutor:
|
||||||
|
|
||||||
if nodeType == "data.aggregate":
|
if nodeType == "data.aggregate":
|
||||||
return await self._aggregate(node, nodeOutputs, nodeId, inputSources, context)
|
return await self._aggregate(node, nodeOutputs, nodeId, inputSources, context)
|
||||||
if nodeType == "data.transform":
|
|
||||||
return await self._transform(node, nodeOutputs, nodeId, inputSources)
|
|
||||||
if nodeType == "data.filter":
|
if nodeType == "data.filter":
|
||||||
return await self._filter(node, nodeOutputs, nodeId, inputSources)
|
return await self._filter(node, nodeOutputs, nodeId, inputSources)
|
||||||
if nodeType == "data.consolidate":
|
if nodeType == "data.consolidate":
|
||||||
|
|
@ -70,41 +68,6 @@ class DataExecutor:
|
||||||
|
|
||||||
return {"items": items, "count": len(items), "_success": True}
|
return {"items": items, "count": len(items), "_success": True}
|
||||||
|
|
||||||
async def _transform(
|
|
||||||
self,
|
|
||||||
node: Dict,
|
|
||||||
nodeOutputs: Dict,
|
|
||||||
nodeId: str,
|
|
||||||
inputSources: Dict,
|
|
||||||
) -> Any:
|
|
||||||
"""Apply mappings to restructure data."""
|
|
||||||
from modules.workflows.automation2.graphUtils import resolveParameterReferences
|
|
||||||
|
|
||||||
inp = self._getInput(inputSources, nodeOutputs)
|
|
||||||
data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
|
|
||||||
mappings = (node.get("parameters") or {}).get("mappings", [])
|
|
||||||
|
|
||||||
result = {}
|
|
||||||
for mapping in mappings:
|
|
||||||
if not isinstance(mapping, dict):
|
|
||||||
continue
|
|
||||||
outputField = mapping.get("outputField")
|
|
||||||
if not outputField:
|
|
||||||
continue
|
|
||||||
source = mapping.get("source")
|
|
||||||
if source and isinstance(source, dict) and source.get("type") == "ref":
|
|
||||||
resolved = resolveParameterReferences(source, nodeOutputs)
|
|
||||||
result[outputField] = resolved
|
|
||||||
elif source and isinstance(source, dict) and source.get("type") == "value":
|
|
||||||
result[outputField] = source.get("value")
|
|
||||||
elif isinstance(data, dict) and mapping.get("sourceField"):
|
|
||||||
result[outputField] = data.get(mapping["sourceField"])
|
|
||||||
else:
|
|
||||||
result[outputField] = source
|
|
||||||
|
|
||||||
result["_success"] = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
async def _filter(
|
async def _filter(
|
||||||
self,
|
self,
|
||||||
node: Dict,
|
node: Dict,
|
||||||
|
|
|
||||||
159
modules/workflows/automation2/featureInstanceRefMigration.py
Normal file
159
modules/workflows/automation2/featureInstanceRefMigration.py
Normal file
|
|
@ -0,0 +1,159 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""
|
||||||
|
Phase-5 Schicht-4 migration: convert raw ``featureInstanceId: "<uuid>"`` workflow
|
||||||
|
parameters into typed ``FeatureInstanceRef`` envelopes on disk.
|
||||||
|
|
||||||
|
Why
|
||||||
|
---
|
||||||
|
The Typed Action Architecture (see
|
||||||
|
``wiki/c-work/1-plan/2026-04-typed-action-architecture.md``) declares
|
||||||
|
``featureInstanceId`` as ``FeatureInstanceRef`` (a catalog-typed reference with
|
||||||
|
a ``featureCode`` discriminator). Older workflows persist this parameter as a
|
||||||
|
plain UUID string, which carries no type information and forces every action /
|
||||||
|
adapter to re-derive the feature code from the node type.
|
||||||
|
|
||||||
|
What this module does
|
||||||
|
---------------------
|
||||||
|
``materializeFeatureInstanceRefs(graph)`` walks every node, and whenever a
|
||||||
|
node parameter named ``featureInstanceId`` is a non-empty string (raw UUID),
|
||||||
|
it rewrites the value to a typed envelope::
|
||||||
|
|
||||||
|
{"$type": "FeatureInstanceRef",
|
||||||
|
"id": "<uuid>",
|
||||||
|
"featureCode": "<derived-from-node-method>"}
|
||||||
|
|
||||||
|
The runtime resolver (``graphUtils._unwrapTypedRef``) automatically unwraps
|
||||||
|
that envelope back to the canonical primitive (``id``) when feeding action
|
||||||
|
implementations, so legacy action code keeps working unchanged.
|
||||||
|
|
||||||
|
Idempotent
|
||||||
|
----------
|
||||||
|
Already-migrated values (already-envelope dicts, empty strings, ``None``) are
|
||||||
|
left untouched. Running the migration twice is a no-op.
|
||||||
|
|
||||||
|
Out of scope
|
||||||
|
------------
|
||||||
|
The runtime helper ``pickNotPushMigration.materializeConnectionRefs`` solves a
|
||||||
|
related but different problem (resolving empty ``connectionReference`` to
|
||||||
|
upstream DataRefs at run-start). Both helpers compose: the typical
|
||||||
|
``executeGraph`` pipeline is
|
||||||
|
|
||||||
|
raw graph
|
||||||
|
-> materializeFeatureInstanceRefs (this module, on save / on load)
|
||||||
|
-> materializeConnectionRefs (pickNotPushMigration, at run-start)
|
||||||
|
-> ActionNodeExecutor / ActionExecutor
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import logging
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Single source of truth for node-type → feature code mapping. Keep in sync
|
||||||
|
# with the method registry; values must be the same string the FeatureInstance
|
||||||
|
# row uses for its ``featureCode`` column.
|
||||||
|
_NODE_TYPE_PREFIX_TO_FEATURE_CODE: Dict[str, str] = {
|
||||||
|
"trustee": "trustee",
|
||||||
|
"redmine": "redmine",
|
||||||
|
"clickup": "clickup",
|
||||||
|
"sharepoint": "sharepoint",
|
||||||
|
"outlook": "outlook",
|
||||||
|
"email": "outlook",
|
||||||
|
"teamsbot": "teamsbot",
|
||||||
|
"ai": "ai",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _deriveFeatureCode(nodeType: str) -> Optional[str]:
|
||||||
|
"""Best-effort feature-code derivation from a node type id.
|
||||||
|
|
||||||
|
Returns ``None`` if the prefix is not in the registry — the migration then
|
||||||
|
omits ``featureCode`` from the envelope rather than guessing wrongly.
|
||||||
|
"""
|
||||||
|
if not nodeType or not isinstance(nodeType, str):
|
||||||
|
return None
|
||||||
|
prefix = nodeType.split(".", 1)[0].strip().lower()
|
||||||
|
return _NODE_TYPE_PREFIX_TO_FEATURE_CODE.get(prefix)
|
||||||
|
|
||||||
|
|
||||||
|
def _isAlreadyTypedEnvelope(value: Any) -> bool:
|
||||||
|
return (
|
||||||
|
isinstance(value, dict)
|
||||||
|
and value.get("$type") == "FeatureInstanceRef"
|
||||||
|
and isinstance(value.get("id"), str)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _isMigratableUuidValue(value: Any) -> bool:
|
||||||
|
"""A bare non-empty string is treated as a UUID candidate worth migrating.
|
||||||
|
|
||||||
|
We deliberately do NOT enforce a strict UUID regex — historically
|
||||||
|
workflows have been seen with non-UUID instance ids (e.g. demo seeds).
|
||||||
|
The migration converts whatever string is there; downstream code already
|
||||||
|
treats the value as opaque.
|
||||||
|
"""
|
||||||
|
return isinstance(value, str) and value.strip() != ""
|
||||||
|
|
||||||
|
|
||||||
|
def _buildEnvelope(uuidValue: str, nodeType: str) -> Dict[str, Any]:
|
||||||
|
envelope: Dict[str, Any] = {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": uuidValue.strip(),
|
||||||
|
}
|
||||||
|
code = _deriveFeatureCode(nodeType)
|
||||||
|
if code:
|
||||||
|
envelope["featureCode"] = code
|
||||||
|
return envelope
|
||||||
|
|
||||||
|
|
||||||
|
def materializeFeatureInstanceRefs(graph: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Return a deep-copied graph with raw ``featureInstanceId`` strings rewritten
|
||||||
|
to typed ``FeatureInstanceRef`` envelopes.
|
||||||
|
|
||||||
|
The function never mutates its input. It is safe to call repeatedly
|
||||||
|
(idempotent) and on partial graphs (missing nodes, missing parameters).
|
||||||
|
"""
|
||||||
|
if not isinstance(graph, dict):
|
||||||
|
return graph
|
||||||
|
|
||||||
|
out = copy.deepcopy(graph)
|
||||||
|
nodes = out.get("nodes")
|
||||||
|
if not isinstance(nodes, list):
|
||||||
|
return out
|
||||||
|
|
||||||
|
migratedCount = 0
|
||||||
|
for node in nodes:
|
||||||
|
if not isinstance(node, dict):
|
||||||
|
continue
|
||||||
|
params = node.get("parameters")
|
||||||
|
if not isinstance(params, dict):
|
||||||
|
continue
|
||||||
|
current = params.get("featureInstanceId")
|
||||||
|
if current is None:
|
||||||
|
continue
|
||||||
|
if _isAlreadyTypedEnvelope(current):
|
||||||
|
continue
|
||||||
|
if not _isMigratableUuidValue(current):
|
||||||
|
continue
|
||||||
|
envelope = _buildEnvelope(current, node.get("type") or "")
|
||||||
|
params["featureInstanceId"] = envelope
|
||||||
|
migratedCount += 1
|
||||||
|
logger.debug(
|
||||||
|
"materializeFeatureInstanceRefs: node %s (%s) -> envelope %r",
|
||||||
|
node.get("id"),
|
||||||
|
node.get("type"),
|
||||||
|
envelope,
|
||||||
|
)
|
||||||
|
|
||||||
|
if migratedCount:
|
||||||
|
logger.info(
|
||||||
|
"materializeFeatureInstanceRefs: migrated %d featureInstanceId value(s)",
|
||||||
|
migratedCount,
|
||||||
|
)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ["materializeFeatureInstanceRefs"]
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
# Graph parsing, validation, and topological sort for automation2.
|
# Graph parsing, validation, and topological sort for automation2.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, List, Any, Tuple, Set
|
from typing import Dict, List, Any, Tuple, Set, Optional
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -113,10 +113,11 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
|
||||||
if nid not in nodeIds:
|
if nid not in nodeIds:
|
||||||
errors.append(f"Connection references non-existent node {nid}")
|
errors.append(f"Connection references non-existent node {nid}")
|
||||||
|
|
||||||
# Soft port compatibility check (warnings, not errors)
|
# Port compatibility: hard-fail (Pick-not-Push typed graph)
|
||||||
warnings = _checkPortCompatibility(nodes, connMap)
|
port_errors = _checkPortCompatibility(nodes, connMap)
|
||||||
if warnings:
|
if port_errors:
|
||||||
logger.info("validateGraph port warnings: %s", warnings)
|
logger.warning("validateGraph port mismatches: %s", port_errors)
|
||||||
|
errors.extend(port_errors)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
logger.debug("validateGraph errors: %s", errors)
|
logger.debug("validateGraph errors: %s", errors)
|
||||||
|
|
@ -125,19 +126,35 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
|
||||||
return errors
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
def parse_graph_defined_schema(node: Dict[str, Any], parameter_key: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Build a JSON-serializable port schema dict from graph parameters (e.g. form ``fields``).
|
||||||
|
Used by tooling and future API surfaces; mirrors ``parse_graph_defined_output_schema`` logic.
|
||||||
|
"""
|
||||||
|
from modules.features.graphicalEditor.portTypes import _derive_form_payload_schema_from_param
|
||||||
|
|
||||||
|
sch = _derive_form_payload_schema_from_param(node, parameter_key)
|
||||||
|
if sch is None:
|
||||||
|
return None
|
||||||
|
return {
|
||||||
|
"name": sch.name,
|
||||||
|
"fields": [f.model_dump() for f in sch.fields],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def _checkPortCompatibility(
|
def _checkPortCompatibility(
|
||||||
nodes: List[Dict],
|
nodes: List[Dict],
|
||||||
connMap: Dict[str, List[Tuple[str, int, int]]],
|
connMap: Dict[str, List[Tuple[str, int, int]]],
|
||||||
) -> List[str]:
|
) -> List[str]:
|
||||||
"""
|
"""
|
||||||
Soft check: warn if connected port types are incompatible.
|
Hard typed-port check: incompatible connections become validation errors.
|
||||||
Returns warnings (never blocks execution).
|
|
||||||
"""
|
"""
|
||||||
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
|
from modules.features.graphicalEditor.portTypes import resolve_output_schema_name
|
||||||
|
|
||||||
nodeDefMap = {n["id"]: n for n in STATIC_NODE_TYPES}
|
nodeDefMap = {n["id"]: n for n in STATIC_NODE_TYPES}
|
||||||
nodeById = {n["id"]: n for n in nodes if n.get("id")}
|
nodeById = {n["id"]: n for n in nodes if n.get("id")}
|
||||||
warnings = []
|
warnings: List[str] = []
|
||||||
|
|
||||||
for tgt, pairs in connMap.items():
|
for tgt, pairs in connMap.items():
|
||||||
tgtNode = nodeById.get(tgt)
|
tgtNode = nodeById.get(tgt)
|
||||||
|
|
@ -156,19 +173,27 @@ def _checkPortCompatibility(
|
||||||
if not srcDef:
|
if not srcDef:
|
||||||
continue
|
continue
|
||||||
srcOutputPorts = srcDef.get("outputPorts", {})
|
srcOutputPorts = srcDef.get("outputPorts", {})
|
||||||
srcPort = srcOutputPorts.get(srcOut, {})
|
srcPort = srcOutputPorts.get(srcOut, {}) or {}
|
||||||
tgtPort = tgtInputPorts.get(tgtIn, {})
|
tgtPort = tgtInputPorts.get(tgtIn, {}) or {}
|
||||||
|
|
||||||
srcSchema = srcPort.get("schema", "")
|
if not isinstance(srcPort, dict):
|
||||||
|
continue
|
||||||
|
src_schema = resolve_output_schema_name(srcNode, srcPort)
|
||||||
accepts = tgtPort.get("accepts", [])
|
accepts = tgtPort.get("accepts", [])
|
||||||
|
|
||||||
if not accepts or not srcSchema:
|
if not accepts or not src_schema:
|
||||||
continue
|
continue
|
||||||
if "Transit" in accepts:
|
if src_schema in accepts:
|
||||||
|
continue
|
||||||
|
# Port that only declares Transit behaves as an untyped sink (legacy graphs).
|
||||||
|
if len(accepts) == 1 and accepts[0] == "Transit":
|
||||||
|
continue
|
||||||
|
if src_schema == "FormPayload_dynamic" and "FormPayload" in accepts:
|
||||||
|
continue
|
||||||
|
if src_schema.startswith("FormPayload") and "FormPayload" in accepts:
|
||||||
continue
|
continue
|
||||||
if srcSchema not in accepts:
|
|
||||||
warnings.append(
|
warnings.append(
|
||||||
f"Port mismatch: {src}[out:{srcOut}] ({srcSchema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})"
|
f"Port mismatch: {src}[out:{srcOut}] ({src_schema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})"
|
||||||
)
|
)
|
||||||
|
|
||||||
return warnings
|
return warnings
|
||||||
|
|
@ -217,12 +242,35 @@ def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, in
|
||||||
return order
|
return order
|
||||||
|
|
||||||
|
|
||||||
|
_WILDCARD_SEGMENT = "*"
|
||||||
|
|
||||||
|
|
||||||
def _get_by_path(data: Any, path: List[Any]) -> Any:
|
def _get_by_path(data: Any, path: List[Any]) -> Any:
|
||||||
"""Traverse data by path (strings and ints); return None if not found."""
|
"""Traverse data by path (strings and ints); return None if not found.
|
||||||
|
|
||||||
|
Supports the iteration wildcard ``"*"`` as a path segment: when applied
|
||||||
|
to a list, the remainder of the path is mapped over each element and the
|
||||||
|
results are returned as a list (drops elements that resolve to ``None``).
|
||||||
|
This is the "typed Bindings-Resolver" iteration primitive defined for
|
||||||
|
Schicht 4 of the Typed Action Architecture.
|
||||||
|
"""
|
||||||
current = data
|
current = data
|
||||||
for seg in path:
|
for i, seg in enumerate(path):
|
||||||
if current is None:
|
if current is None:
|
||||||
return None
|
return None
|
||||||
|
if isinstance(seg, str) and seg == _WILDCARD_SEGMENT:
|
||||||
|
if not isinstance(current, (list, tuple)):
|
||||||
|
return None
|
||||||
|
tail = list(path[i + 1 :])
|
||||||
|
if not tail:
|
||||||
|
return list(current)
|
||||||
|
mapped: List[Any] = []
|
||||||
|
for item in current:
|
||||||
|
resolved = _get_by_path(item, tail)
|
||||||
|
if resolved is None:
|
||||||
|
continue
|
||||||
|
mapped.append(resolved)
|
||||||
|
return mapped
|
||||||
if isinstance(current, dict) and isinstance(seg, str) and seg in current:
|
if isinstance(current, dict) and isinstance(seg, str) and seg in current:
|
||||||
current = current[seg]
|
current = current[seg]
|
||||||
elif isinstance(current, (list, tuple)) and isinstance(seg, (int, str)):
|
elif isinstance(current, (list, tuple)) and isinstance(seg, (int, str)):
|
||||||
|
|
@ -236,6 +284,52 @@ def _get_by_path(data: Any, path: List[Any]) -> Any:
|
||||||
return current
|
return current
|
||||||
|
|
||||||
|
|
||||||
|
def _pathContainsWildcard(path: List[Any]) -> bool:
|
||||||
|
"""True if any segment is the iteration wildcard ``"*"``."""
|
||||||
|
return any(isinstance(seg, str) and seg == _WILDCARD_SEGMENT for seg in path)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Phase-5 Schicht-4 — Typed-Ref envelope unwrap
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
#
|
||||||
|
# Workflow params can carry a typed-ref envelope like
|
||||||
|
# ``{"$type": "FeatureInstanceRef", "id": "<uuid>", "featureCode": "trustee"}``.
|
||||||
|
# Action implementations historically receive the canonical primitive (the
|
||||||
|
# referenced ``id``) as a string. ``_unwrapTypedRef`` extracts that primitive
|
||||||
|
# without losing the typed envelope shape on disk — the migration script
|
||||||
|
# (``featureInstanceRefMigration.materializeFeatureInstanceRefs``) writes the
|
||||||
|
# envelope, the resolver unwraps it on its way to the action.
|
||||||
|
|
||||||
|
_TYPED_REF_PRIMARY_FIELD = {
|
||||||
|
"FeatureInstanceRef": "id",
|
||||||
|
"ConnectionRef": "id",
|
||||||
|
"PromptTemplateRef": "id",
|
||||||
|
"ClickUpListRef": "listId",
|
||||||
|
"SharePointFileRef": "filePath",
|
||||||
|
"SharePointFolderRef": "folderPath",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _isTypedRefEnvelope(value: Any) -> bool:
|
||||||
|
"""True if ``value`` looks like a typed-ref envelope ({\"$type\": \"<CatalogType>\", ...})."""
|
||||||
|
if not isinstance(value, dict):
|
||||||
|
return False
|
||||||
|
typeName = value.get("$type")
|
||||||
|
return isinstance(typeName, str) and typeName in _TYPED_REF_PRIMARY_FIELD
|
||||||
|
|
||||||
|
|
||||||
|
def _unwrapTypedRef(value: Any) -> Any:
|
||||||
|
"""If ``value`` is a typed-ref envelope, return its primary primitive.
|
||||||
|
|
||||||
|
Falls back to the original value for unknown / non-envelope inputs.
|
||||||
|
"""
|
||||||
|
if not _isTypedRefEnvelope(value):
|
||||||
|
return value
|
||||||
|
primary = _TYPED_REF_PRIMARY_FIELD[value["$type"]]
|
||||||
|
return value.get(primary, value)
|
||||||
|
|
||||||
|
|
||||||
def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
|
def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
|
||||||
"""
|
"""
|
||||||
Resolve parameter references:
|
Resolve parameter references:
|
||||||
|
|
@ -247,6 +341,11 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
|
||||||
import re
|
import re
|
||||||
|
|
||||||
if isinstance(value, dict):
|
if isinstance(value, dict):
|
||||||
|
# Phase-5 Schicht-4: typed-ref envelopes (FeatureInstanceRef etc.) on
|
||||||
|
# disk get unwrapped to their canonical primitive (e.g. ``id``) so
|
||||||
|
# legacy action signatures keep working. See ``_unwrapTypedRef``.
|
||||||
|
if _isTypedRefEnvelope(value):
|
||||||
|
return _unwrapTypedRef(value)
|
||||||
if value.get("type") == "ref":
|
if value.get("type") == "ref":
|
||||||
node_id = value.get("nodeId")
|
node_id = value.get("nodeId")
|
||||||
path = value.get("path")
|
path = value.get("path")
|
||||||
|
|
|
||||||
83
modules/workflows/automation2/pickNotPushMigration.py
Normal file
83
modules/workflows/automation2/pickNotPushMigration.py
Normal file
|
|
@ -0,0 +1,83 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""
|
||||||
|
Graph helpers for Pick-not-Push: materialize connectionReference as explicit DataRefs.
|
||||||
|
|
||||||
|
Runtime: executeGraph deep-copies the version graph and applies materialize_connection_refs
|
||||||
|
so downstream nodes resolve connection UUIDs from upstream output.connection.id.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import logging
|
||||||
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
|
from modules.features.graphicalEditor.portTypes import resolve_output_schema_name
|
||||||
|
from modules.workflows.automation2.graphUtils import buildConnectionMap, getInputSources
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_NODE_DEF_BY_ID = {n["id"]: n for n in STATIC_NODE_TYPES}
|
||||||
|
|
||||||
|
_SCHEMAS_WITH_CONNECTION = frozenset(
|
||||||
|
{"FileList", "DocumentList", "EmailList", "TaskList", "EmailDraft", "UdmDocument"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _data_ref(node_id: str, path: List[Any]) -> Dict[str, Any]:
|
||||||
|
return {"type": "ref", "nodeId": node_id, "path": list(path)}
|
||||||
|
|
||||||
|
|
||||||
|
def materializeConnectionRefs(graph: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Deep-copy graph and set empty connectionReference (userConnection params) to
|
||||||
|
DataRef { nodeId: upstreamPort0, path: ['connection','id'] } when upstream
|
||||||
|
output schema carries connection provenance.
|
||||||
|
"""
|
||||||
|
g = copy.deepcopy(graph)
|
||||||
|
nodes: List[Dict[str, Any]] = g.get("nodes") or []
|
||||||
|
connections = g.get("connections") or []
|
||||||
|
if not nodes:
|
||||||
|
return g
|
||||||
|
|
||||||
|
conn_map = buildConnectionMap(connections)
|
||||||
|
node_by_id = {n["id"]: n for n in nodes if n.get("id")}
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
nid = node.get("id")
|
||||||
|
ntype = node.get("type")
|
||||||
|
if not nid or not ntype:
|
||||||
|
continue
|
||||||
|
node_def = _NODE_DEF_BY_ID.get(ntype)
|
||||||
|
if not node_def:
|
||||||
|
continue
|
||||||
|
pdefs = node_def.get("parameters") or []
|
||||||
|
has_conn = any(
|
||||||
|
p.get("name") == "connectionReference" and p.get("frontendType") == "userConnection"
|
||||||
|
for p in pdefs
|
||||||
|
)
|
||||||
|
if not has_conn:
|
||||||
|
continue
|
||||||
|
params = node.get("parameters")
|
||||||
|
if not isinstance(params, dict):
|
||||||
|
node["parameters"] = {}
|
||||||
|
params = node["parameters"]
|
||||||
|
cur = params.get("connectionReference")
|
||||||
|
if cur not in (None, "", {}):
|
||||||
|
continue
|
||||||
|
input_sources = getInputSources(nid, conn_map)
|
||||||
|
if 0 not in input_sources:
|
||||||
|
continue
|
||||||
|
src_id, _ = input_sources[0]
|
||||||
|
src_node = node_by_id.get(src_id) or {}
|
||||||
|
src_def = _NODE_DEF_BY_ID.get(src_node.get("type") or "")
|
||||||
|
if not src_def:
|
||||||
|
continue
|
||||||
|
out_port = (src_def.get("outputPorts") or {}).get(0, {}) or {}
|
||||||
|
out_schema = resolve_output_schema_name(src_node, out_port if isinstance(out_port, dict) else {})
|
||||||
|
if out_schema not in _SCHEMAS_WITH_CONNECTION:
|
||||||
|
continue
|
||||||
|
params["connectionReference"] = _data_ref(src_id, ["connection", "id"])
|
||||||
|
logger.debug("materializeConnectionRefs: %s.connectionReference -> ref %s.connection.id", nid, src_id)
|
||||||
|
|
||||||
|
return g
|
||||||
36
modules/workflows/automation2/udmUpstreamShapes.py
Normal file
36
modules/workflows/automation2/udmUpstreamShapes.py
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""
|
||||||
|
Pure shape coercion for UDM-related upstream payloads (tests + optional tooling).
|
||||||
|
|
||||||
|
No runtime wire-handover — kept only so unit tests can assert stable normalisation rules.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
|
||||||
|
def _coerceUdmDocumentInput(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
if upstream.get("children") is not None and upstream.get("sourceType"):
|
||||||
|
return upstream
|
||||||
|
udm = upstream.get("udm")
|
||||||
|
if isinstance(udm, dict) and udm.get("children") is not None:
|
||||||
|
return udm
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _coerceUdmNodeListInput(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
nodes = upstream.get("nodes")
|
||||||
|
if isinstance(nodes, list):
|
||||||
|
return {"nodes": nodes, "count": len(nodes)}
|
||||||
|
children = upstream.get("children")
|
||||||
|
if isinstance(children, list):
|
||||||
|
return {"nodes": children, "count": len(children)}
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _coerceConsolidateResultInput(upstream: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
result: Dict[str, Any] = {}
|
||||||
|
for key in ("result", "mode", "count"):
|
||||||
|
if key in upstream:
|
||||||
|
result[key] = upstream[key]
|
||||||
|
return result
|
||||||
177
modules/workflows/methods/_actionSignatureValidator.py
Normal file
177
modules/workflows/methods/_actionSignatureValidator.py
Normal file
|
|
@ -0,0 +1,177 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Action signature validator for the Typed Action Architecture (Phase 2).
|
||||||
|
|
||||||
|
Verifies that every WorkflowActionDefinition exposed by a Method:
|
||||||
|
1. Declares a parameter `type` that is either a primitive or a known
|
||||||
|
PORT_TYPE_CATALOG schema name.
|
||||||
|
2. Declares an `outputType` that exists in PORT_TYPE_CATALOG.
|
||||||
|
3. Declares container types (`List[X]`, `Dict[K,V]`) whose element types
|
||||||
|
are also primitives or catalog schemas.
|
||||||
|
|
||||||
|
Used at startup (and in CI tests) to prevent silent drift between
|
||||||
|
backend method signatures and the type catalog.
|
||||||
|
|
||||||
|
Plan: wiki/c-work/1-plan/2026-04-typed-action-architecture.md
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Iterable, List, Optional
|
||||||
|
|
||||||
|
from modules.datamodels.datamodelWorkflowActions import (
|
||||||
|
WorkflowActionDefinition,
|
||||||
|
WorkflowActionParameter,
|
||||||
|
)
|
||||||
|
from modules.features.graphicalEditor.portTypes import (
|
||||||
|
PORT_TYPE_CATALOG,
|
||||||
|
PRIMITIVE_TYPES,
|
||||||
|
_stripContainer,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Catalog types that are explicitly allowed as fire-and-forget outputs
|
||||||
|
# (no typed payload expected by downstream nodes).
|
||||||
|
_ALLOWED_GENERIC_OUTPUTS = frozenset({"ActionResult", "Transit"})
|
||||||
|
|
||||||
|
|
||||||
|
def _isKnownType(typeName: str) -> bool:
|
||||||
|
"""Primitive or catalog-resolvable type name."""
|
||||||
|
return typeName in PRIMITIVE_TYPES or typeName in PORT_TYPE_CATALOG
|
||||||
|
|
||||||
|
|
||||||
|
def _validateTypeRef(typeStr: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Validate a single type reference string (the value of `type` on a
|
||||||
|
WorkflowActionParameter or `outputType` on a WorkflowActionDefinition).
|
||||||
|
|
||||||
|
Returns a list of human-readable error fragments (empty if OK).
|
||||||
|
"""
|
||||||
|
if not typeStr or not isinstance(typeStr, str):
|
||||||
|
return ["empty/non-string type"]
|
||||||
|
|
||||||
|
# Backwards-compatible aliases (lowercase Python builtins)
|
||||||
|
if typeStr in {"list", "dict"}:
|
||||||
|
return [
|
||||||
|
f"'{typeStr}' is too generic — use 'List[X]' / 'Dict[K,V]' or a "
|
||||||
|
f"catalog schema name"
|
||||||
|
]
|
||||||
|
|
||||||
|
parts = _stripContainer(typeStr)
|
||||||
|
if not parts:
|
||||||
|
return [f"could not parse type '{typeStr}'"]
|
||||||
|
|
||||||
|
errors: List[str] = []
|
||||||
|
for part in parts:
|
||||||
|
if not _isKnownType(part):
|
||||||
|
errors.append(
|
||||||
|
f"unknown type '{part}' (not a primitive and not in catalog)"
|
||||||
|
)
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
def _validateActionParameter(
|
||||||
|
actionId: str,
|
||||||
|
paramName: str,
|
||||||
|
param: WorkflowActionParameter,
|
||||||
|
) -> List[str]:
|
||||||
|
"""Validate a single parameter; returns prefixed error messages."""
|
||||||
|
out: List[str] = []
|
||||||
|
for err in _validateTypeRef(param.type):
|
||||||
|
out.append(f"{actionId}.{paramName}: {err}")
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def _validateActionDefinition(
|
||||||
|
actionDef: WorkflowActionDefinition,
|
||||||
|
) -> List[str]:
|
||||||
|
"""Validate parameters + outputType of one action."""
|
||||||
|
errors: List[str] = []
|
||||||
|
actionId = actionDef.actionId or "<no-actionId>"
|
||||||
|
|
||||||
|
for paramName, param in (actionDef.parameters or {}).items():
|
||||||
|
errors.extend(_validateActionParameter(actionId, paramName, param))
|
||||||
|
|
||||||
|
outputType = actionDef.outputType
|
||||||
|
if outputType not in _ALLOWED_GENERIC_OUTPUTS:
|
||||||
|
for err in _validateTypeRef(outputType):
|
||||||
|
errors.append(f"{actionId}.<outputType>: {err}")
|
||||||
|
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
def _validateActionsDict(
|
||||||
|
methodName: str,
|
||||||
|
actions: Dict[str, WorkflowActionDefinition],
|
||||||
|
) -> List[str]:
|
||||||
|
"""Validate every action in a Method's _actions dict."""
|
||||||
|
errors: List[str] = []
|
||||||
|
if not actions:
|
||||||
|
return errors
|
||||||
|
for localName, actionDef in actions.items():
|
||||||
|
if not isinstance(actionDef, WorkflowActionDefinition):
|
||||||
|
errors.append(
|
||||||
|
f"{methodName}.{localName}: not a WorkflowActionDefinition instance"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
errors.extend(_validateActionDefinition(actionDef))
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Public entry points
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _validateMethods(methodInstances: Iterable) -> List[str]:
|
||||||
|
"""
|
||||||
|
Validate a sequence of Method instances.
|
||||||
|
|
||||||
|
Each instance is expected to expose `_actions` (Dict[str, WorkflowActionDefinition]).
|
||||||
|
"""
|
||||||
|
errors: List[str] = []
|
||||||
|
for method in methodInstances:
|
||||||
|
methodName = getattr(method, "name", method.__class__.__name__)
|
||||||
|
actions = getattr(method, "_actions", None) or {}
|
||||||
|
errors.extend(_validateActionsDict(methodName, actions))
|
||||||
|
return errors
|
||||||
|
|
||||||
|
|
||||||
|
def _formatValidationReport(errors: List[str]) -> str:
|
||||||
|
"""Build a multi-line human-readable error report."""
|
||||||
|
if not errors:
|
||||||
|
return "Action signatures are healthy."
|
||||||
|
lines = [f"Found {len(errors)} action-signature drift(s):"]
|
||||||
|
lines.extend(f" - {e}" for e in errors)
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def _logValidationReport(errors: List[str], strict: bool = False) -> None:
|
||||||
|
"""
|
||||||
|
Log validation results.
|
||||||
|
|
||||||
|
If `strict=True`, raises RuntimeError on any error (use in tests / CI).
|
||||||
|
Otherwise emits warnings (use at startup so the app keeps running but
|
||||||
|
operators see the drift in the log).
|
||||||
|
"""
|
||||||
|
report = _formatValidationReport(errors)
|
||||||
|
if errors:
|
||||||
|
if strict:
|
||||||
|
raise RuntimeError(report)
|
||||||
|
logger.warning(report)
|
||||||
|
else:
|
||||||
|
logger.info(report)
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"_validateMethods",
|
||||||
|
"_validateActionsDict",
|
||||||
|
"_validateActionDefinition",
|
||||||
|
"_validateActionParameter",
|
||||||
|
"_validateTypeRef",
|
||||||
|
"_formatValidationReport",
|
||||||
|
"_logValidationReport",
|
||||||
|
]
|
||||||
|
|
@ -39,17 +39,19 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.process",
|
actionId="ai.process",
|
||||||
description="Universal AI document processing action - accepts multiple input documents in any format and processes them together with a prompt. If the prompt specifies document formats to deliver, include them in the prompt",
|
description="Universal AI document processing action - accepts multiple input documents in any format and processes them together with a prompt. If the prompt specifies document formats to deliver, include them in the prompt",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="AiResult",
|
||||||
parameters={
|
parameters={
|
||||||
"aiPrompt": WorkflowActionParameter(
|
"aiPrompt": WorkflowActionParameter(
|
||||||
name="aiPrompt",
|
name="aiPrompt",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=True,
|
required=True,
|
||||||
description="Instruction for the AI describing what processing to perform"
|
description="Instruction for the AI describing what processing to perform"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Document reference(s) in any format to use as input/context"
|
description="Document reference(s) in any format to use as input/context"
|
||||||
|
|
@ -82,7 +84,7 @@ class MethodAi(MethodBase):
|
||||||
),
|
),
|
||||||
"contentParts": WorkflowActionParameter(
|
"contentParts": WorkflowActionParameter(
|
||||||
name="contentParts",
|
name="contentParts",
|
||||||
type="List[ContentPart]",
|
type="List[Any]",
|
||||||
frontendType=FrontendType.HIDDEN,
|
frontendType=FrontendType.HIDDEN,
|
||||||
required=False,
|
required=False,
|
||||||
description="Pre-extracted content parts (internal parameter, typically passed between actions). If provided, these will be used instead of extracting from documentList. Can be a list of ContentPart objects or an object with a 'parts' attribute."
|
description="Pre-extracted content parts (internal parameter, typically passed between actions). If provided, these will be used instead of extracting from documentList. Can be a list of ContentPart objects or an object with a 'parts' attribute."
|
||||||
|
|
@ -94,10 +96,12 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.webResearch",
|
actionId="ai.webResearch",
|
||||||
description="Web research with two-step process: search for URLs, then crawl content",
|
description="Web research with two-step process: search for URLs, then crawl content",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="AiResult",
|
||||||
parameters={
|
parameters={
|
||||||
"prompt": WorkflowActionParameter(
|
"prompt": WorkflowActionParameter(
|
||||||
name="prompt",
|
name="prompt",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=True,
|
required=True,
|
||||||
description="Natural language research instruction"
|
description="Natural language research instruction"
|
||||||
|
|
@ -140,10 +144,11 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.summarizeDocument",
|
actionId="ai.summarizeDocument",
|
||||||
description="Summarize one or more documents, extracting key points and main ideas. If the prompt specifies document formats to deliver, include them in the prompt",
|
description="Summarize one or more documents, extracting key points and main ideas. If the prompt specifies document formats to deliver, include them in the prompt",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference(s) to summarize"
|
description="Document reference(s) to summarize"
|
||||||
|
|
@ -180,10 +185,11 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.translateDocument",
|
actionId="ai.translateDocument",
|
||||||
description="Translate documents to a target language while preserving formatting and structure",
|
description="Translate documents to a target language while preserving formatting and structure",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference(s) to translate"
|
description="Document reference(s) to translate"
|
||||||
|
|
@ -224,10 +230,11 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.convertDocument",
|
actionId="ai.convertDocument",
|
||||||
description="Convert documents between different formats (PDF→Word, Excel→CSV, etc.)",
|
description="Convert documents between different formats (PDF→Word, Excel→CSV, etc.)",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference(s) to convert"
|
description="Document reference(s) to convert"
|
||||||
|
|
@ -255,17 +262,19 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.generateDocument",
|
actionId="ai.generateDocument",
|
||||||
description="Generate documents from scratch or based on templates/inputs. If the prompt specifies document formats to deliver, include them in the prompt",
|
description="Generate documents from scratch or based on templates/inputs. If the prompt specifies document formats to deliver, include them in the prompt",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"prompt": WorkflowActionParameter(
|
"prompt": WorkflowActionParameter(
|
||||||
name="prompt",
|
name="prompt",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=True,
|
required=True,
|
||||||
description="Description of the document to generate"
|
description="Description of the document to generate"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Template documents or reference documents to use as a guide"
|
description="Template documents or reference documents to use as a guide"
|
||||||
|
|
@ -293,17 +302,19 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.generateCode",
|
actionId="ai.generateCode",
|
||||||
description="Generate one or multiple code files in a single action - explicitly sets intent to 'code'. This action can generate multiple files (e.g., config.json, customers.json, settings.json) when the prompt requests multiple files. If the prompt specifies file formats to deliver, include them in the prompt. IMPORTANT: When the user requests multiple files (e.g., 'generate 3 JSON files'), use a SINGLE ai.generateCode action with a prompt that describes ALL requested files, rather than splitting into multiple actions.",
|
description="Generate one or multiple code files in a single action - explicitly sets intent to 'code'. This action can generate multiple files (e.g., config.json, customers.json, settings.json) when the prompt requests multiple files. If the prompt specifies file formats to deliver, include them in the prompt. IMPORTANT: When the user requests multiple files (e.g., 'generate 3 JSON files'), use a SINGLE ai.generateCode action with a prompt that describes ALL requested files, rather than splitting into multiple actions.",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"prompt": WorkflowActionParameter(
|
"prompt": WorkflowActionParameter(
|
||||||
name="prompt",
|
name="prompt",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=True,
|
required=True,
|
||||||
description="Description of code to generate. If multiple files are requested, describe ALL files in this single prompt (e.g., 'Generate 3 JSON files: 1) config.json with..., 2) customers.json with..., 3) settings.json with...')."
|
description="Description of code to generate. If multiple files are requested, describe ALL files in this single prompt (e.g., 'Generate 3 JSON files: 1) config.json with..., 2) customers.json with..., 3) settings.json with...')."
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Reference documents"
|
description="Reference documents"
|
||||||
|
|
@ -323,6 +334,7 @@ class MethodAi(MethodBase):
|
||||||
actionId="ai.consolidate",
|
actionId="ai.consolidate",
|
||||||
description="AI-assisted consolidation of aggregated workflow results (summarize, classify, semantic merge)",
|
description="AI-assisted consolidation of aggregated workflow results (summarize, classify, semantic merge)",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ConsolidateResult",
|
||||||
parameters={
|
parameters={
|
||||||
"mode": WorkflowActionParameter(
|
"mode": WorkflowActionParameter(
|
||||||
name="mode",
|
name="mode",
|
||||||
|
|
@ -336,6 +348,7 @@ class MethodAi(MethodBase):
|
||||||
"prompt": WorkflowActionParameter(
|
"prompt": WorkflowActionParameter(
|
||||||
name="prompt",
|
name="prompt",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=False,
|
required=False,
|
||||||
description="Optional extra instructions for the LLM",
|
description="Optional extra instructions for the LLM",
|
||||||
|
|
|
||||||
|
|
@ -176,6 +176,7 @@ class MethodBase:
|
||||||
'default': param.default,
|
'default': param.default,
|
||||||
'frontendType': param.frontendType.value,
|
'frontendType': param.frontendType.value,
|
||||||
'frontendOptions': param.frontendOptions,
|
'frontendOptions': param.frontendOptions,
|
||||||
|
'uiHint': param.uiHint,
|
||||||
'validation': param.validation
|
'validation': param.validation
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
|
@ -230,8 +231,18 @@ class MethodBase:
|
||||||
return validated
|
return validated
|
||||||
|
|
||||||
def _validateType(self, value: Any, expectedType: str) -> Any:
|
def _validateType(self, value: Any, expectedType: str) -> Any:
|
||||||
"""Validate and convert value to expected type"""
|
"""Validate and convert value to expected type.
|
||||||
# Type validation logic
|
|
||||||
|
Catalog types (e.g. 'ConnectionRef', 'FeatureInstanceRef',
|
||||||
|
'DocumentList', 'TrusteeProcessResult') pass through unchanged —
|
||||||
|
runtime structural validation is handled by the workflow engine /
|
||||||
|
port-schema layer, not at the action-call boundary.
|
||||||
|
"""
|
||||||
|
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
|
||||||
|
|
||||||
|
if expectedType in PORT_TYPE_CATALOG:
|
||||||
|
return value
|
||||||
|
|
||||||
typeMap = {
|
typeMap = {
|
||||||
'str': str,
|
'str': str,
|
||||||
'int': int,
|
'int': int,
|
||||||
|
|
@ -239,8 +250,12 @@ class MethodBase:
|
||||||
'bool': bool,
|
'bool': bool,
|
||||||
'list': list,
|
'list': list,
|
||||||
'dict': dict,
|
'dict': dict,
|
||||||
|
'Any': lambda v: v,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if expectedType == 'Any':
|
||||||
|
return value
|
||||||
|
|
||||||
# Handle List[str], List[int], etc.
|
# Handle List[str], List[int], etc.
|
||||||
if expectedType.startswith('List['):
|
if expectedType.startswith('List['):
|
||||||
if isinstance(value, str):
|
if isinstance(value, str):
|
||||||
|
|
|
||||||
|
|
@ -25,17 +25,19 @@ class MethodChatbot(MethodBase):
|
||||||
actionId="chatbot.queryDatabase",
|
actionId="chatbot.queryDatabase",
|
||||||
description="Execute a SQL SELECT query via the preprocessor connector. Returns formatted query results.",
|
description="Execute a SQL SELECT query via the preprocessor connector. Returns formatted query results.",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="QueryResult",
|
||||||
parameters={
|
parameters={
|
||||||
"sqlQuery": WorkflowActionParameter(
|
"sqlQuery": WorkflowActionParameter(
|
||||||
name="sqlQuery",
|
name="sqlQuery",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=False,
|
required=False,
|
||||||
description="SQL SELECT query to execute. If not provided, will attempt to extract from analysis_result document in documentList."
|
description="SQL SELECT query to execute. If not provided, will attempt to extract from analysis_result document in documentList."
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Document reference(s) containing analysis_result with sqlQuery field. Used if sqlQuery parameter is not provided."
|
description="Document reference(s) containing analysis_result with sqlQuery field. Used if sqlQuery parameter is not provided."
|
||||||
|
|
|
||||||
|
|
@ -34,10 +34,11 @@ class MethodClickup(MethodBase):
|
||||||
actionId="clickup.listTasks",
|
actionId="clickup.listTasks",
|
||||||
description="List tasks in a ClickUp list (virtual path /team/{id}/list/{id})",
|
description="List tasks in a ClickUp list (virtual path /team/{id}/list/{id})",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="TaskList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="ClickUp connection",
|
description="ClickUp connection",
|
||||||
|
|
@ -72,10 +73,11 @@ class MethodClickup(MethodBase):
|
||||||
actionId="clickup.listFields",
|
actionId="clickup.listFields",
|
||||||
description="List custom and built-in field definitions for a ClickUp list (names, types, ids)",
|
description="List custom and built-in field definitions for a ClickUp list (names, types, ids)",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="ClickUp connection",
|
description="ClickUp connection",
|
||||||
|
|
@ -101,10 +103,11 @@ class MethodClickup(MethodBase):
|
||||||
actionId="clickup.searchTasks",
|
actionId="clickup.searchTasks",
|
||||||
description="Search tasks in a ClickUp workspace (team)",
|
description="Search tasks in a ClickUp workspace (team)",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="TaskList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="ClickUp connection",
|
description="ClickUp connection",
|
||||||
|
|
@ -172,10 +175,11 @@ class MethodClickup(MethodBase):
|
||||||
actionId="clickup.getTask",
|
actionId="clickup.getTask",
|
||||||
description="Get a single task by ID",
|
description="Get a single task by ID",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="TaskResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="ClickUp connection",
|
description="ClickUp connection",
|
||||||
|
|
@ -201,10 +205,11 @@ class MethodClickup(MethodBase):
|
||||||
actionId="clickup.createTask",
|
actionId="clickup.createTask",
|
||||||
description="Create a task in a list",
|
description="Create a task in a list",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="TaskResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="ClickUp connection",
|
description="ClickUp connection",
|
||||||
|
|
@ -300,10 +305,11 @@ class MethodClickup(MethodBase):
|
||||||
actionId="clickup.updateTask",
|
actionId="clickup.updateTask",
|
||||||
description="Update a task (JSON body per ClickUp API)",
|
description="Update a task (JSON body per ClickUp API)",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="TaskResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="ClickUp connection",
|
description="ClickUp connection",
|
||||||
|
|
@ -336,10 +342,11 @@ class MethodClickup(MethodBase):
|
||||||
actionId="clickup.uploadAttachment",
|
actionId="clickup.uploadAttachment",
|
||||||
description="Upload a file attachment to a task",
|
description="Upload a file attachment to a task",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="TaskAttachmentRef",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="ClickUp connection",
|
description="ClickUp connection",
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ class MethodContext(MethodBase):
|
||||||
actionId="context.getDocumentIndex",
|
actionId="context.getDocumentIndex",
|
||||||
description="Generate a comprehensive index of all documents available in the current workflow",
|
description="Generate a comprehensive index of all documents available in the current workflow",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"resultType": WorkflowActionParameter(
|
"resultType": WorkflowActionParameter(
|
||||||
name="resultType",
|
name="resultType",
|
||||||
|
|
@ -53,17 +54,18 @@ class MethodContext(MethodBase):
|
||||||
actionId="context.extractContent",
|
actionId="context.extractContent",
|
||||||
description="Extract raw content parts from documents without AI processing. Returns ContentParts with different typeGroups (text, image, table, structure, container). Images are returned as base64 data, not as extracted text. Text content is extracted from text-based formats (PDF text layers, Word docs, etc.) but NOT from images (no OCR). Use this action to prepare documents for subsequent AI processing actions.",
|
description="Extract raw content parts from documents without AI processing. Returns ContentParts with different typeGroups (text, image, table, structure, container). Images are returned as base64 data, not as extracted text. Text content is extracted from text-based formats (PDF text layers, Word docs, etc.) but NOT from images (no OCR). Use this action to prepare documents for subsequent AI processing actions.",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="UdmDocument",
|
||||||
parameters={
|
parameters={
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference(s) to extract content from"
|
description="Document reference(s) to extract content from"
|
||||||
),
|
),
|
||||||
"extractionOptions": WorkflowActionParameter(
|
"extractionOptions": WorkflowActionParameter(
|
||||||
name="extractionOptions",
|
name="extractionOptions",
|
||||||
type="dict",
|
type="Dict[str,Any]",
|
||||||
frontendType=FrontendType.JSON,
|
frontendType=FrontendType.JSON,
|
||||||
required=False,
|
required=False,
|
||||||
description="Extraction options (if not provided, defaults are used). Note: This action does NOT use AI - it performs pure content extraction. Images are preserved as base64 data, not converted to text."
|
description="Extraction options (if not provided, defaults are used). Note: This action does NOT use AI - it performs pure content extraction. Images are preserved as base64 data, not converted to text."
|
||||||
|
|
@ -74,10 +76,11 @@ class MethodContext(MethodBase):
|
||||||
"neutralizeData": WorkflowActionDefinition(
|
"neutralizeData": WorkflowActionDefinition(
|
||||||
actionId="context.neutralizeData",
|
actionId="context.neutralizeData",
|
||||||
description="Neutralize extracted data from ContentExtracted documents (for use after extractContent)",
|
description="Neutralize extracted data from ContentExtracted documents (for use after extractContent)",
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference(s) containing ContentExtracted objects to neutralize"
|
description="Document reference(s) containing ContentExtracted objects to neutralize"
|
||||||
|
|
@ -88,6 +91,7 @@ class MethodContext(MethodBase):
|
||||||
"triggerPreprocessingServer": WorkflowActionDefinition(
|
"triggerPreprocessingServer": WorkflowActionDefinition(
|
||||||
actionId="context.triggerPreprocessingServer",
|
actionId="context.triggerPreprocessingServer",
|
||||||
description="Trigger preprocessing server at customer tenant to update database with configuration",
|
description="Trigger preprocessing server at customer tenant to update database with configuration",
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"endpoint": WorkflowActionParameter(
|
"endpoint": WorkflowActionParameter(
|
||||||
name="endpoint",
|
name="endpoint",
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,11 @@ class MethodFile(MethodBase):
|
||||||
actionId="file.create",
|
actionId="file.create",
|
||||||
description="Create a file from context (text/markdown from AI). Configurable format and style preset.",
|
description="Create a file from context (text/markdown from AI). Configurable format and style preset.",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"contentSources": WorkflowActionParameter(
|
"contentSources": WorkflowActionParameter(
|
||||||
name="contentSources",
|
name="contentSources",
|
||||||
type="list",
|
type="List[str]",
|
||||||
frontendType=FrontendType.HIDDEN,
|
frontendType=FrontendType.HIDDEN,
|
||||||
required=False,
|
required=False,
|
||||||
description="Array of context refs. Resolved and concatenated. Empty = from connected node.",
|
description="Array of context refs. Resolved and concatenated. Empty = from connected node.",
|
||||||
|
|
|
||||||
|
|
@ -42,6 +42,7 @@ class MethodJira(MethodBase):
|
||||||
"connectJira": WorkflowActionDefinition(
|
"connectJira": WorkflowActionDefinition(
|
||||||
actionId="jira.connectJira",
|
actionId="jira.connectJira",
|
||||||
description="Connect to JIRA instance and create ticket interface",
|
description="Connect to JIRA instance and create ticket interface",
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"apiUsername": WorkflowActionParameter(
|
"apiUsername": WorkflowActionParameter(
|
||||||
name="apiUsername",
|
name="apiUsername",
|
||||||
|
|
@ -81,6 +82,7 @@ class MethodJira(MethodBase):
|
||||||
"taskSyncDefinition": WorkflowActionParameter(
|
"taskSyncDefinition": WorkflowActionParameter(
|
||||||
name="taskSyncDefinition",
|
name="taskSyncDefinition",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=True,
|
required=True,
|
||||||
description="Field mapping definition as JSON string or dict"
|
description="Field mapping definition as JSON string or dict"
|
||||||
|
|
@ -91,6 +93,7 @@ class MethodJira(MethodBase):
|
||||||
"exportTicketsAsJson": WorkflowActionDefinition(
|
"exportTicketsAsJson": WorkflowActionDefinition(
|
||||||
actionId="jira.exportTicketsAsJson",
|
actionId="jira.exportTicketsAsJson",
|
||||||
description="Export tickets from JIRA as JSON list",
|
description="Export tickets from JIRA as JSON list",
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionId": WorkflowActionParameter(
|
"connectionId": WorkflowActionParameter(
|
||||||
name="connectionId",
|
name="connectionId",
|
||||||
|
|
@ -112,6 +115,7 @@ class MethodJira(MethodBase):
|
||||||
"importTicketsFromJson": WorkflowActionDefinition(
|
"importTicketsFromJson": WorkflowActionDefinition(
|
||||||
actionId="jira.importTicketsFromJson",
|
actionId="jira.importTicketsFromJson",
|
||||||
description="Import ticket data from JSON back to JIRA",
|
description="Import ticket data from JSON back to JIRA",
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionId": WorkflowActionParameter(
|
"connectionId": WorkflowActionParameter(
|
||||||
name="connectionId",
|
name="connectionId",
|
||||||
|
|
@ -122,7 +126,7 @@ class MethodJira(MethodBase):
|
||||||
),
|
),
|
||||||
"ticketData": WorkflowActionParameter(
|
"ticketData": WorkflowActionParameter(
|
||||||
name="ticketData",
|
name="ticketData",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference containing ticket data as JSON"
|
description="Document reference containing ticket data as JSON"
|
||||||
|
|
@ -140,17 +144,18 @@ class MethodJira(MethodBase):
|
||||||
"mergeTicketData": WorkflowActionDefinition(
|
"mergeTicketData": WorkflowActionDefinition(
|
||||||
actionId="jira.mergeTicketData",
|
actionId="jira.mergeTicketData",
|
||||||
description="Merge JIRA export data with existing SharePoint data",
|
description="Merge JIRA export data with existing SharePoint data",
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"jiraData": WorkflowActionParameter(
|
"jiraData": WorkflowActionParameter(
|
||||||
name="jiraData",
|
name="jiraData",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference containing JIRA ticket data as JSON array"
|
description="Document reference containing JIRA ticket data as JSON array"
|
||||||
),
|
),
|
||||||
"existingData": WorkflowActionParameter(
|
"existingData": WorkflowActionParameter(
|
||||||
name="existingData",
|
name="existingData",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference containing existing SharePoint data as JSON array"
|
description="Document reference containing existing SharePoint data as JSON array"
|
||||||
|
|
@ -176,10 +181,11 @@ class MethodJira(MethodBase):
|
||||||
"parseCsvContent": WorkflowActionDefinition(
|
"parseCsvContent": WorkflowActionDefinition(
|
||||||
actionId="jira.parseCsvContent",
|
actionId="jira.parseCsvContent",
|
||||||
description="Parse CSV content with custom headers",
|
description="Parse CSV content with custom headers",
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"csvContent": WorkflowActionParameter(
|
"csvContent": WorkflowActionParameter(
|
||||||
name="csvContent",
|
name="csvContent",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference containing CSV file content as bytes"
|
description="Document reference containing CSV file content as bytes"
|
||||||
|
|
@ -207,10 +213,11 @@ class MethodJira(MethodBase):
|
||||||
"parseExcelContent": WorkflowActionDefinition(
|
"parseExcelContent": WorkflowActionDefinition(
|
||||||
actionId="jira.parseExcelContent",
|
actionId="jira.parseExcelContent",
|
||||||
description="Parse Excel content with custom headers",
|
description="Parse Excel content with custom headers",
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"excelContent": WorkflowActionParameter(
|
"excelContent": WorkflowActionParameter(
|
||||||
name="excelContent",
|
name="excelContent",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference containing Excel file content as bytes"
|
description="Document reference containing Excel file content as bytes"
|
||||||
|
|
@ -238,17 +245,18 @@ class MethodJira(MethodBase):
|
||||||
"createCsvContent": WorkflowActionDefinition(
|
"createCsvContent": WorkflowActionDefinition(
|
||||||
actionId="jira.createCsvContent",
|
actionId="jira.createCsvContent",
|
||||||
description="Create CSV content with custom headers",
|
description="Create CSV content with custom headers",
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"data": WorkflowActionParameter(
|
"data": WorkflowActionParameter(
|
||||||
name="data",
|
name="data",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference containing data as JSON (with data field from mergeTicketData)"
|
description="Document reference containing data as JSON (with data field from mergeTicketData)"
|
||||||
),
|
),
|
||||||
"headers": WorkflowActionParameter(
|
"headers": WorkflowActionParameter(
|
||||||
name="headers",
|
name="headers",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Document reference containing headers JSON (from parseCsvContent/parseExcelContent)"
|
description="Document reference containing headers JSON (from parseCsvContent/parseExcelContent)"
|
||||||
|
|
@ -273,17 +281,18 @@ class MethodJira(MethodBase):
|
||||||
"createExcelContent": WorkflowActionDefinition(
|
"createExcelContent": WorkflowActionDefinition(
|
||||||
actionId="jira.createExcelContent",
|
actionId="jira.createExcelContent",
|
||||||
description="Create Excel content with custom headers",
|
description="Create Excel content with custom headers",
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"data": WorkflowActionParameter(
|
"data": WorkflowActionParameter(
|
||||||
name="data",
|
name="data",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference containing data as JSON (with data field from mergeTicketData)"
|
description="Document reference containing data as JSON (with data field from mergeTicketData)"
|
||||||
),
|
),
|
||||||
"headers": WorkflowActionParameter(
|
"headers": WorkflowActionParameter(
|
||||||
name="headers",
|
name="headers",
|
||||||
type="str",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Document reference containing headers JSON (from parseExcelContent)"
|
description="Document reference containing headers JSON (from parseExcelContent)"
|
||||||
|
|
|
||||||
|
|
@ -40,10 +40,11 @@ class MethodOutlook(MethodBase):
|
||||||
actionId="outlook.readEmails",
|
actionId="outlook.readEmails",
|
||||||
description="Read emails and metadata from a mailbox folder",
|
description="Read emails and metadata from a mailbox folder",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="EmailList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
@ -89,10 +90,11 @@ class MethodOutlook(MethodBase):
|
||||||
actionId="outlook.searchEmails",
|
actionId="outlook.searchEmails",
|
||||||
description="Search emails by query and return matching items with metadata",
|
description="Search emails by query and return matching items with metadata",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="EmailList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
@ -138,10 +140,11 @@ class MethodOutlook(MethodBase):
|
||||||
actionId="outlook.composeAndDraftEmailWithContext",
|
actionId="outlook.composeAndDraftEmailWithContext",
|
||||||
description="Compose email content using AI from context and optional documents, then create a draft",
|
description="Compose email content using AI from context and optional documents, then create a draft",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="EmailDraft",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
@ -156,20 +159,21 @@ class MethodOutlook(MethodBase):
|
||||||
"context": WorkflowActionParameter(
|
"context": WorkflowActionParameter(
|
||||||
name="context",
|
name="context",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=False,
|
required=False,
|
||||||
description="Detailed context for AI composition (omit when emailContent provided)"
|
description="Detailed context for AI composition (omit when emailContent provided)"
|
||||||
),
|
),
|
||||||
"emailContent": WorkflowActionParameter(
|
"emailContent": WorkflowActionParameter(
|
||||||
name="emailContent",
|
name="emailContent",
|
||||||
type="dict",
|
type="Dict[str,Any]",
|
||||||
frontendType=FrontendType.HIDDEN,
|
frontendType=FrontendType.HIDDEN,
|
||||||
required=False,
|
required=False,
|
||||||
description="Direct subject/body/to from upstream (skips AI composition)"
|
description="Direct subject/body/to from upstream (skips AI composition)"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[Any]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Document references or inline ActionDocuments for attachments"
|
description="Document references or inline ActionDocuments for attachments"
|
||||||
|
|
@ -213,17 +217,18 @@ class MethodOutlook(MethodBase):
|
||||||
actionId="outlook.sendDraftEmail",
|
actionId="outlook.sendDraftEmail",
|
||||||
description="Send draft email(s) using draft email JSON document(s) from action outlook.composeAndDraftEmailWithContext",
|
description="Send draft email(s) using draft email JSON document(s) from action outlook.composeAndDraftEmailWithContext",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference(s) to draft emails in JSON format (outputs from outlook.composeAndDraftEmailWithContext function)"
|
description="Document reference(s) to draft emails in JSON format (outputs from outlook.composeAndDraftEmailWithContext function)"
|
||||||
|
|
|
||||||
|
|
@ -43,10 +43,11 @@ class MethodRedmine(MethodBase):
|
||||||
actionId="redmine.readTicket",
|
actionId="redmine.readTicket",
|
||||||
description="Read a single Redmine ticket from the local mirror by ticketId.",
|
description="Read a single Redmine ticket from the local mirror by ticketId.",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="RedmineTicket",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
|
name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
|
||||||
required=True, description="Redmine feature instance ID",
|
required=True, description="Redmine feature instance",
|
||||||
),
|
),
|
||||||
"ticketId": WorkflowActionParameter(
|
"ticketId": WorkflowActionParameter(
|
||||||
name="ticketId", type="int", frontendType=FrontendType.TEXT,
|
name="ticketId", type="int", frontendType=FrontendType.TEXT,
|
||||||
|
|
@ -59,13 +60,14 @@ class MethodRedmine(MethodBase):
|
||||||
actionId="redmine.listTickets",
|
actionId="redmine.listTickets",
|
||||||
description="List tickets from the mirror with optional filters (tracker, status, period, assignee).",
|
description="List tickets from the mirror with optional filters (tracker, status, period, assignee).",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="RedmineTicketList",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
|
name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
|
||||||
required=True, description="Redmine feature instance ID",
|
required=True, description="Redmine feature instance",
|
||||||
),
|
),
|
||||||
"trackerIds": WorkflowActionParameter(
|
"trackerIds": WorkflowActionParameter(
|
||||||
name="trackerIds", type="list", frontendType=FrontendType.JSON,
|
name="trackerIds", type="List[int]", frontendType=FrontendType.JSON,
|
||||||
required=False, description="Restrict to these tracker ids (list of int or comma-separated string).",
|
required=False, description="Restrict to these tracker ids (list of int or comma-separated string).",
|
||||||
),
|
),
|
||||||
"status": WorkflowActionParameter(
|
"status": WorkflowActionParameter(
|
||||||
|
|
@ -95,10 +97,11 @@ class MethodRedmine(MethodBase):
|
||||||
actionId="redmine.createTicket",
|
actionId="redmine.createTicket",
|
||||||
description="Create a new Redmine ticket. Requires subject and trackerId.",
|
description="Create a new Redmine ticket. Requires subject and trackerId.",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="RedmineTicket",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
|
name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
|
||||||
required=True, description="Redmine feature instance ID",
|
required=True, description="Redmine feature instance",
|
||||||
),
|
),
|
||||||
"subject": WorkflowActionParameter(
|
"subject": WorkflowActionParameter(
|
||||||
name="subject", type="str", frontendType=FrontendType.TEXT,
|
name="subject", type="str", frontendType=FrontendType.TEXT,
|
||||||
|
|
@ -109,7 +112,7 @@ class MethodRedmine(MethodBase):
|
||||||
required=True, description="Tracker id (Userstory, Feature, Task ...).",
|
required=True, description="Tracker id (Userstory, Feature, Task ...).",
|
||||||
),
|
),
|
||||||
"description": WorkflowActionParameter(
|
"description": WorkflowActionParameter(
|
||||||
name="description", type="str", frontendType=FrontendType.TEXTAREA,
|
name="description", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA,
|
||||||
required=False, description="Markdown/Textile description body.",
|
required=False, description="Markdown/Textile description body.",
|
||||||
),
|
),
|
||||||
"statusId": WorkflowActionParameter(
|
"statusId": WorkflowActionParameter(
|
||||||
|
|
@ -133,7 +136,7 @@ class MethodRedmine(MethodBase):
|
||||||
required=False, description="Target/fixed version id.",
|
required=False, description="Target/fixed version id.",
|
||||||
),
|
),
|
||||||
"customFields": WorkflowActionParameter(
|
"customFields": WorkflowActionParameter(
|
||||||
name="customFields", type="dict", frontendType=FrontendType.JSON,
|
name="customFields", type="Dict[str,Any]", frontendType=FrontendType.JSON,
|
||||||
required=False, description="Custom fields as {customFieldId: value}.",
|
required=False, description="Custom fields as {customFieldId: value}.",
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
@ -143,10 +146,11 @@ class MethodRedmine(MethodBase):
|
||||||
actionId="redmine.updateTicket",
|
actionId="redmine.updateTicket",
|
||||||
description="Update a Redmine ticket. Only provided fields are sent.",
|
description="Update a Redmine ticket. Only provided fields are sent.",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="RedmineTicket",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
|
name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
|
||||||
required=True, description="Redmine feature instance ID",
|
required=True, description="Redmine feature instance",
|
||||||
),
|
),
|
||||||
"ticketId": WorkflowActionParameter(
|
"ticketId": WorkflowActionParameter(
|
||||||
name="ticketId", type="int", frontendType=FrontendType.TEXT,
|
name="ticketId", type="int", frontendType=FrontendType.TEXT,
|
||||||
|
|
@ -157,7 +161,7 @@ class MethodRedmine(MethodBase):
|
||||||
required=False, description="New title.",
|
required=False, description="New title.",
|
||||||
),
|
),
|
||||||
"description": WorkflowActionParameter(
|
"description": WorkflowActionParameter(
|
||||||
name="description", type="str", frontendType=FrontendType.TEXTAREA,
|
name="description", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA,
|
||||||
required=False, description="New description.",
|
required=False, description="New description.",
|
||||||
),
|
),
|
||||||
"trackerId": WorkflowActionParameter(
|
"trackerId": WorkflowActionParameter(
|
||||||
|
|
@ -185,11 +189,11 @@ class MethodRedmine(MethodBase):
|
||||||
required=False, description="Change fixed version.",
|
required=False, description="Change fixed version.",
|
||||||
),
|
),
|
||||||
"notes": WorkflowActionParameter(
|
"notes": WorkflowActionParameter(
|
||||||
name="notes", type="str", frontendType=FrontendType.TEXTAREA,
|
name="notes", type="str", uiHint="textarea", frontendType=FrontendType.TEXTAREA,
|
||||||
required=False, description="Journal entry (comment) added to the ticket.",
|
required=False, description="Journal entry (comment) added to the ticket.",
|
||||||
),
|
),
|
||||||
"customFields": WorkflowActionParameter(
|
"customFields": WorkflowActionParameter(
|
||||||
name="customFields", type="dict", frontendType=FrontendType.JSON,
|
name="customFields", type="Dict[str,Any]", frontendType=FrontendType.JSON,
|
||||||
required=False, description="Custom fields as {customFieldId: value}.",
|
required=False, description="Custom fields as {customFieldId: value}.",
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
@ -199,10 +203,11 @@ class MethodRedmine(MethodBase):
|
||||||
actionId="redmine.getStats",
|
actionId="redmine.getStats",
|
||||||
description="Aggregated stats (KPIs, throughput, status distribution, backlog) from the mirror.",
|
description="Aggregated stats (KPIs, throughput, status distribution, backlog) from the mirror.",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="RedmineStats",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
|
name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
|
||||||
required=True, description="Redmine feature instance ID",
|
required=True, description="Redmine feature instance",
|
||||||
),
|
),
|
||||||
"dateFrom": WorkflowActionParameter(
|
"dateFrom": WorkflowActionParameter(
|
||||||
name="dateFrom", type="str", frontendType=FrontendType.TEXT,
|
name="dateFrom", type="str", frontendType=FrontendType.TEXT,
|
||||||
|
|
@ -217,7 +222,7 @@ class MethodRedmine(MethodBase):
|
||||||
required=False, description="'day' | 'week' | 'month' (default 'week').",
|
required=False, description="'day' | 'week' | 'month' (default 'week').",
|
||||||
),
|
),
|
||||||
"trackerIds": WorkflowActionParameter(
|
"trackerIds": WorkflowActionParameter(
|
||||||
name="trackerIds", type="list", frontendType=FrontendType.JSON,
|
name="trackerIds", type="List[int]", frontendType=FrontendType.JSON,
|
||||||
required=False, description="Restrict to these tracker ids.",
|
required=False, description="Restrict to these tracker ids.",
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
@ -227,10 +232,11 @@ class MethodRedmine(MethodBase):
|
||||||
actionId="redmine.runSync",
|
actionId="redmine.runSync",
|
||||||
description="Sync Redmine tickets and relations into the local mirror (incremental by default).",
|
description="Sync Redmine tickets and relations into the local mirror (incremental by default).",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId", type="str", frontendType=FrontendType.TEXT,
|
name="featureInstanceId", type="FeatureInstanceRef", frontendType=FrontendType.TEXT,
|
||||||
required=True, description="Redmine feature instance ID",
|
required=True, description="Redmine feature instance",
|
||||||
),
|
),
|
||||||
"force": WorkflowActionParameter(
|
"force": WorkflowActionParameter(
|
||||||
name="force", type="bool", frontendType=FrontendType.CHECKBOX,
|
name="force", type="bool", frontendType=FrontendType.CHECKBOX,
|
||||||
|
|
|
||||||
|
|
@ -51,10 +51,11 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.findDocumentPath",
|
actionId="sharepoint.findDocumentPath",
|
||||||
description="Find documents and folders by name/path across sites",
|
description="Find documents and folders by name/path across sites",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
@ -89,17 +90,18 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.readDocuments",
|
actionId="sharepoint.readDocuments",
|
||||||
description="Read documents from SharePoint and extract content/metadata",
|
description="Read documents from SharePoint and extract content/metadata",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Document list reference(s) containing findDocumentPath result"
|
description="Document list reference(s) containing findDocumentPath result"
|
||||||
|
|
@ -126,17 +128,18 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.uploadDocument",
|
actionId="sharepoint.uploadDocument",
|
||||||
description="Upload documents to SharePoint",
|
description="Upload documents to SharePoint",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document reference(s) to upload. File names are taken from the documents"
|
description="Document reference(s) to upload. File names are taken from the documents"
|
||||||
|
|
@ -155,17 +158,18 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.listDocuments",
|
actionId="sharepoint.listDocuments",
|
||||||
description="List documents and folders in SharePoint paths across sites",
|
description="List documents and folders in SharePoint paths across sites",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="FileList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=False,
|
required=False,
|
||||||
description="Document list reference(s) containing findDocumentPath result"
|
description="Document list reference(s) containing findDocumentPath result"
|
||||||
|
|
@ -192,17 +196,18 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.analyzeFolderUsage",
|
actionId="sharepoint.analyzeFolderUsage",
|
||||||
description="Analyze usage intensity of folders and files in SharePoint",
|
description="Analyze usage intensity of folders and files in SharePoint",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
),
|
),
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="List[str]",
|
type="DocumentList",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Document list reference(s) containing findDocumentPath result"
|
description="Document list reference(s) containing findDocumentPath result"
|
||||||
|
|
@ -237,10 +242,11 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.findSiteByUrl",
|
actionId="sharepoint.findSiteByUrl",
|
||||||
description="Find SharePoint site by hostname and site path",
|
description="Find SharePoint site by hostname and site path",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
@ -266,10 +272,11 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.downloadFileByPath",
|
actionId="sharepoint.downloadFileByPath",
|
||||||
description="Download file from SharePoint by exact file path",
|
description="Download file from SharePoint by exact file path",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="DocumentList",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
@ -302,10 +309,11 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.copyFile",
|
actionId="sharepoint.copyFile",
|
||||||
description="Copy file within SharePoint",
|
description="Copy file within SharePoint",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
@ -331,10 +339,11 @@ class MethodSharepoint(MethodBase):
|
||||||
actionId="sharepoint.uploadFile",
|
actionId="sharepoint.uploadFile",
|
||||||
description="Upload raw file content (bytes) to SharePoint",
|
description="Upload raw file content (bytes) to SharePoint",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=True,
|
required=True,
|
||||||
description="Microsoft connection label"
|
description="Microsoft connection label"
|
||||||
|
|
|
||||||
|
|
@ -2,10 +2,15 @@
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Process extracted documents: create TrusteeDocument + TrusteePosition from extraction JSON.
|
Process extracted documents: create TrusteeDocument + TrusteePosition from extraction JSON.
|
||||||
Input: documentList (reference to extractFromFiles result).
|
|
||||||
Each document is JSON with documentType, extractedData, fileId, fileName.
|
Input: documentList (DataRef on upstream DocumentList.documents — typically
|
||||||
extractedData is a list of expense/position records.
|
trustee.extractFromFiles[documents]). Each item is an ActionDocument-dump dict
|
||||||
Output: one ActionDocument with JSON { positionIds, documentIds } for chaining to syncToAccounting.
|
with `documentData` (JSON string) carrying { documentType, extractedData, fileId,
|
||||||
|
fileName }. extractedData is a list of expense/position records.
|
||||||
|
|
||||||
|
Output: ActionResult with one ActionDocument containing JSON
|
||||||
|
{ positionIds, documentIds, autoMatchedPositionIds } for chaining to
|
||||||
|
syncToAccounting (via DataRef on documents[0]).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,10 @@
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
"""
|
"""
|
||||||
Sync trustee positions to accounting (Buha).
|
Sync trustee positions to accounting (Buha).
|
||||||
Input: featureInstanceId, documentList (reference to processDocuments result message).
|
|
||||||
Reads positionIds from the document and calls AccountingBridge.pushBatchToAccounting.
|
Input: featureInstanceId, documentList (DataRef on processDocuments[documents] —
|
||||||
|
list with one ActionDocument carrying JSON { positionIds, documentIds, ... }).
|
||||||
|
Reads positionIds from the first document and calls AccountingBridge.pushBatchToAccounting.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
|
||||||
|
|
@ -31,17 +31,21 @@ class MethodTrustee(MethodBase):
|
||||||
actionId="trustee.extractFromFiles",
|
actionId="trustee.extractFromFiles",
|
||||||
description="Extract document type and data from PDF/JPG (fileIds or SharePoint folder)",
|
description="Extract document type and data from PDF/JPG (fileIds or SharePoint folder)",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
# Runtime returns ActionResult.isSuccess(documents=[...]); see
|
||||||
|
# actions/extractFromFiles.py. Keep this in sync with the
|
||||||
|
# graphical-editor adapter (nodeDefinitions/trustee.py).
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"fileIds": WorkflowActionParameter(
|
"fileIds": WorkflowActionParameter(
|
||||||
name="fileIds",
|
name="fileIds",
|
||||||
type="list",
|
type="List[str]",
|
||||||
frontendType=FrontendType.JSON,
|
frontendType=FrontendType.JSON,
|
||||||
required=False,
|
required=False,
|
||||||
description="List of file IDs already in DB (alternative to connectionReference + sharepointFolder)",
|
description="List of file IDs already in DB (alternative to connectionReference + sharepointFolder)",
|
||||||
),
|
),
|
||||||
"connectionReference": WorkflowActionParameter(
|
"connectionReference": WorkflowActionParameter(
|
||||||
name="connectionReference",
|
name="connectionReference",
|
||||||
type="str",
|
type="ConnectionRef",
|
||||||
frontendType=FrontendType.USER_CONNECTION,
|
frontendType=FrontendType.USER_CONNECTION,
|
||||||
required=False,
|
required=False,
|
||||||
description="Microsoft connection for SharePoint (use with sharepointFolder)",
|
description="Microsoft connection for SharePoint (use with sharepointFolder)",
|
||||||
|
|
@ -55,14 +59,15 @@ class MethodTrustee(MethodBase):
|
||||||
),
|
),
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId",
|
name="featureInstanceId",
|
||||||
type="str",
|
type="FeatureInstanceRef",
|
||||||
frontendType=FrontendType.TEXT,
|
frontendType=FrontendType.TEXT,
|
||||||
required=True,
|
required=True,
|
||||||
description="Trustee feature instance ID",
|
description="Trustee feature instance",
|
||||||
),
|
),
|
||||||
"prompt": WorkflowActionParameter(
|
"prompt": WorkflowActionParameter(
|
||||||
name="prompt",
|
name="prompt",
|
||||||
type="str",
|
type="str",
|
||||||
|
uiHint="textarea",
|
||||||
frontendType=FrontendType.TEXTAREA,
|
frontendType=FrontendType.TEXTAREA,
|
||||||
required=False,
|
required=False,
|
||||||
description="AI prompt for extraction (optional)",
|
description="AI prompt for extraction (optional)",
|
||||||
|
|
@ -74,20 +79,24 @@ class MethodTrustee(MethodBase):
|
||||||
actionId="trustee.processDocuments",
|
actionId="trustee.processDocuments",
|
||||||
description="Create TrusteeDocument + TrusteePosition from extraction result (documentList from previous action)",
|
description="Create TrusteeDocument + TrusteePosition from extraction result (documentList from previous action)",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
# Runtime returns ActionResult.isSuccess(documents=[...]).
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="list",
|
# Concrete shape consumed by _resolveDocumentList (list
|
||||||
|
# of dicts with documentName/documentData/mimeType).
|
||||||
|
type="List[ActionDocument]",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Reference to extractFromFiles result (e.g. docList:messageId:extract_result)",
|
description="DataRef to upstream documents (e.g. trustee.extractFromFiles → documents)",
|
||||||
),
|
),
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId",
|
name="featureInstanceId",
|
||||||
type="str",
|
type="FeatureInstanceRef",
|
||||||
frontendType=FrontendType.TEXT,
|
frontendType=FrontendType.TEXT,
|
||||||
required=True,
|
required=True,
|
||||||
description="Trustee feature instance ID",
|
description="Trustee feature instance",
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
execute=processDocuments.__get__(self, self.__class__),
|
execute=processDocuments.__get__(self, self.__class__),
|
||||||
|
|
@ -96,20 +105,24 @@ class MethodTrustee(MethodBase):
|
||||||
actionId="trustee.syncToAccounting",
|
actionId="trustee.syncToAccounting",
|
||||||
description="Push trustee positions to accounting (documentList = processDocuments result)",
|
description="Push trustee positions to accounting (documentList = processDocuments result)",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
# Runtime returns ActionResult.isSuccess(documents=[...]).
|
||||||
|
outputType="ActionResult",
|
||||||
parameters={
|
parameters={
|
||||||
"documentList": WorkflowActionParameter(
|
"documentList": WorkflowActionParameter(
|
||||||
name="documentList",
|
name="documentList",
|
||||||
type="list",
|
# Concrete shape consumed by syncToAccounting._resolveDocumentList:
|
||||||
|
# list of ActionDocument dicts produced by processDocuments.
|
||||||
|
type="List[ActionDocument]",
|
||||||
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
frontendType=FrontendType.DOCUMENT_REFERENCE,
|
||||||
required=True,
|
required=True,
|
||||||
description="Reference to processDocuments result message",
|
description="DataRef to upstream documents (e.g. trustee.processDocuments → documents)",
|
||||||
),
|
),
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId",
|
name="featureInstanceId",
|
||||||
type="str",
|
type="FeatureInstanceRef",
|
||||||
frontendType=FrontendType.TEXT,
|
frontendType=FrontendType.TEXT,
|
||||||
required=True,
|
required=True,
|
||||||
description="Trustee feature instance ID",
|
description="Trustee feature instance",
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
execute=syncToAccounting.__get__(self, self.__class__),
|
execute=syncToAccounting.__get__(self, self.__class__),
|
||||||
|
|
@ -118,13 +131,14 @@ class MethodTrustee(MethodBase):
|
||||||
actionId="trustee.refreshAccountingData",
|
actionId="trustee.refreshAccountingData",
|
||||||
description="Import/refresh accounting data from external system (e.g. Abacus) into local tables. Checks cache freshness; use forceRefresh to re-import.",
|
description="Import/refresh accounting data from external system (e.g. Abacus) into local tables. Checks cache freshness; use forceRefresh to re-import.",
|
||||||
dynamicMode=True,
|
dynamicMode=True,
|
||||||
|
outputType="TrusteeRefreshResult",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId",
|
name="featureInstanceId",
|
||||||
type="str",
|
type="FeatureInstanceRef",
|
||||||
frontendType=FrontendType.TEXT,
|
frontendType=FrontendType.TEXT,
|
||||||
required=True,
|
required=True,
|
||||||
description="Trustee feature instance ID",
|
description="Trustee feature instance",
|
||||||
),
|
),
|
||||||
"forceRefresh": WorkflowActionParameter(
|
"forceRefresh": WorkflowActionParameter(
|
||||||
name="forceRefresh",
|
name="forceRefresh",
|
||||||
|
|
@ -154,13 +168,14 @@ class MethodTrustee(MethodBase):
|
||||||
actionId="trustee.queryData",
|
actionId="trustee.queryData",
|
||||||
description="Read data from the Trustee DB (lookup tenant+rent, raw recordset, or aggregate). Does NOT trigger an external sync.",
|
description="Read data from the Trustee DB (lookup tenant+rent, raw recordset, or aggregate). Does NOT trigger an external sync.",
|
||||||
dynamicMode=False,
|
dynamicMode=False,
|
||||||
|
outputType="QueryResult",
|
||||||
parameters={
|
parameters={
|
||||||
"featureInstanceId": WorkflowActionParameter(
|
"featureInstanceId": WorkflowActionParameter(
|
||||||
name="featureInstanceId",
|
name="featureInstanceId",
|
||||||
type="str",
|
type="FeatureInstanceRef",
|
||||||
frontendType=FrontendType.TEXT,
|
frontendType=FrontendType.TEXT,
|
||||||
required=True,
|
required=True,
|
||||||
description="Trustee feature instance ID",
|
description="Trustee feature instance",
|
||||||
),
|
),
|
||||||
"mode": WorkflowActionParameter(
|
"mode": WorkflowActionParameter(
|
||||||
name="mode",
|
name="mode",
|
||||||
|
|
|
||||||
25
scripts/_listMandates.py
Normal file
25
scripts/_listMandates.py
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
|
||||||
|
import psycopg2, psycopg2.extras
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
|
||||||
|
c = psycopg2.connect(
|
||||||
|
host=APP_CONFIG.get('DB_HOST','localhost'),
|
||||||
|
user=APP_CONFIG.get('DB_USER'),
|
||||||
|
password=APP_CONFIG.get('DB_PASSWORD_SECRET'),
|
||||||
|
port=int(APP_CONFIG.get('DB_PORT',5432)),
|
||||||
|
dbname='poweron_app',
|
||||||
|
)
|
||||||
|
cur = c.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||||
|
cur.execute('SELECT id, name, label, enabled, "deletedAt", "sysCreatedAt" FROM "Mandate" ORDER BY "sysCreatedAt"')
|
||||||
|
print("All Mandates in poweron_app:")
|
||||||
|
for r in cur.fetchall():
|
||||||
|
print(f" id={r['id']} name={r['name']} label={r['label']} enabled={r['enabled']} deletedAt={r['deletedAt']}")
|
||||||
|
|
||||||
|
cur.execute('SELECT COUNT(*) AS n FROM "FeatureInstance" WHERE "featureCode" = %s', ("redmine",))
|
||||||
|
print(f"\nTotal redmine FeatureInstances in poweron_app: {cur.fetchone()['n']}")
|
||||||
|
|
||||||
|
cur.execute('SELECT id, "mandateId", label, enabled FROM "FeatureInstance" WHERE "featureCode" = %s ORDER BY "sysCreatedAt"', ("redmine",))
|
||||||
|
for r in cur.fetchall():
|
||||||
|
print(f" fi={r['id']} mandate={r['mandateId']} label={r['label']} enabled={r['enabled']}")
|
||||||
97
scripts/check_orphan_featureinstance.py
Normal file
97
scripts/check_orphan_featureinstance.py
Normal file
|
|
@ -0,0 +1,97 @@
|
||||||
|
"""Quick-Check: existiert FeatureInstance-Row 6019e7d0-b23d-41ec-b9f7-3dd1293078f2
|
||||||
|
in poweron_app, und welche Mandate/Instances stehen mit dem RedmineTicketMirror in Verbindung?
|
||||||
|
|
||||||
|
Aufruf: python gateway/scripts/check_orphan_featureinstance.py
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
_GATEWAY = Path(__file__).resolve().parents[1]
|
||||||
|
if str(_GATEWAY) not in sys.path:
|
||||||
|
sys.path.insert(0, str(_GATEWAY))
|
||||||
|
|
||||||
|
import psycopg2
|
||||||
|
import psycopg2.extras
|
||||||
|
|
||||||
|
from modules.shared.configuration import APP_CONFIG
|
||||||
|
|
||||||
|
|
||||||
|
_TARGET_FI = "6019e7d0-b23d-41ec-b9f7-3dd1293078f2"
|
||||||
|
_TARGET_MANDATE = "674b1bc0-1d01-4696-a094-3374c450f6e2"
|
||||||
|
|
||||||
|
|
||||||
|
def _connect(dbName: str):
|
||||||
|
return psycopg2.connect(
|
||||||
|
host=APP_CONFIG.get("DB_HOST", "localhost"),
|
||||||
|
user=APP_CONFIG.get("DB_USER"),
|
||||||
|
password=APP_CONFIG.get("DB_PASSWORD_SECRET"),
|
||||||
|
port=int(APP_CONFIG.get("DB_PORT", 5432)),
|
||||||
|
dbname=dbName,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
print(f"Checking FeatureInstance {_TARGET_FI} ...\n")
|
||||||
|
|
||||||
|
with _connect("poweron_app") as appConn:
|
||||||
|
with appConn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:
|
||||||
|
cur.execute(
|
||||||
|
'SELECT id, "mandateId", "featureCode", label, enabled, '
|
||||||
|
'"sysCreatedAt", "sysModifiedAt" '
|
||||||
|
'FROM "FeatureInstance" WHERE id = %s',
|
||||||
|
(_TARGET_FI,),
|
||||||
|
)
|
||||||
|
fi = cur.fetchone()
|
||||||
|
print(f"FeatureInstance row in poweron_app: {fi}\n")
|
||||||
|
|
||||||
|
cur.execute(
|
||||||
|
'SELECT id, "mandateId", "featureCode", label, enabled '
|
||||||
|
'FROM "FeatureInstance" '
|
||||||
|
'WHERE "mandateId" = %s AND "featureCode" = %s',
|
||||||
|
(_TARGET_MANDATE, "redmine"),
|
||||||
|
)
|
||||||
|
sameMandateRedmine = cur.fetchall()
|
||||||
|
print(
|
||||||
|
f"All redmine FeatureInstances on mandate {_TARGET_MANDATE} "
|
||||||
|
f"({len(sameMandateRedmine)}):"
|
||||||
|
)
|
||||||
|
for r in sameMandateRedmine:
|
||||||
|
print(f" {r}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
cur.execute(
|
||||||
|
'SELECT id, name, label, enabled, "deletedAt", '
|
||||||
|
'"sysCreatedAt", "sysModifiedAt" '
|
||||||
|
'FROM "Mandate" WHERE id = %s',
|
||||||
|
(_TARGET_MANDATE,),
|
||||||
|
)
|
||||||
|
mandate = cur.fetchone()
|
||||||
|
print(f"Mandate row: {mandate}\n")
|
||||||
|
|
||||||
|
with _connect("poweron_redmine") as rmConn:
|
||||||
|
with rmConn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:
|
||||||
|
cur.execute(
|
||||||
|
'SELECT COUNT(*) AS n '
|
||||||
|
'FROM "RedmineTicketMirror" WHERE "featureInstanceId" = %s',
|
||||||
|
(_TARGET_FI,),
|
||||||
|
)
|
||||||
|
n = cur.fetchone()["n"]
|
||||||
|
print(f"RedmineTicketMirror rows with featureInstanceId={_TARGET_FI}: {n}")
|
||||||
|
|
||||||
|
cur.execute(
|
||||||
|
'SELECT DISTINCT "featureInstanceId", "mandateId", COUNT(*) AS n '
|
||||||
|
'FROM "RedmineTicketMirror" '
|
||||||
|
'GROUP BY "featureInstanceId", "mandateId" ORDER BY n DESC LIMIT 20'
|
||||||
|
)
|
||||||
|
distribution = cur.fetchall()
|
||||||
|
print(f"\nRedmineTicketMirror distribution (top 20):")
|
||||||
|
for r in distribution:
|
||||||
|
print(f" fi={r['featureInstanceId']} mandate={r['mandateId']} count={r['n']}")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
213
scripts/script_migrate_feature_instance_refs.py
Normal file
213
scripts/script_migrate_feature_instance_refs.py
Normal file
|
|
@ -0,0 +1,213 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Persistent DB migration: rewrite raw ``featureInstanceId`` UUIDs in stored
|
||||||
|
workflow graphs to typed ``FeatureInstanceRef`` envelopes.
|
||||||
|
|
||||||
|
Why
|
||||||
|
---
|
||||||
|
The runtime engine (``executeGraph``) already calls
|
||||||
|
``materializeFeatureInstanceRefs`` on every run, so legacy graphs *execute*
|
||||||
|
correctly today. The Editor however reads the persisted ``graph`` field
|
||||||
|
directly and shows whatever shape is on disk — until a workflow is saved
|
||||||
|
again it still displays the old plain-string format.
|
||||||
|
|
||||||
|
What this script does
|
||||||
|
---------------------
|
||||||
|
Walks every row of:
|
||||||
|
|
||||||
|
* ``poweron_graphicaleditor.Automation2Workflow`` (legacy ``graph`` column)
|
||||||
|
* ``poweron_graphicaleditor.AutoVersion`` (canonical ``graph`` column)
|
||||||
|
|
||||||
|
For each row, it:
|
||||||
|
|
||||||
|
1. Loads the JSONB ``graph`` column.
|
||||||
|
2. Applies :func:`materializeFeatureInstanceRefs`.
|
||||||
|
3. Persists the result if (and only if) it differs from the input.
|
||||||
|
|
||||||
|
Idempotent — re-runs are no-ops.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
::
|
||||||
|
|
||||||
|
python scripts/script_migrate_feature_instance_refs.py --dry-run
|
||||||
|
python scripts/script_migrate_feature_instance_refs.py
|
||||||
|
|
||||||
|
Plan: ``wiki/c-work/1-plan/2026-04-typed-action-followups.md`` (Track C1).
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Iterable, List, Tuple
|
||||||
|
|
||||||
|
_scriptPath = Path(__file__).resolve()
|
||||||
|
_gatewayPath = _scriptPath.parent.parent
|
||||||
|
sys.path.insert(0, str(_gatewayPath))
|
||||||
|
os.chdir(str(_gatewayPath))
|
||||||
|
|
||||||
|
import psycopg2 # noqa: E402
|
||||||
|
from psycopg2.extras import Json, RealDictCursor # noqa: E402
|
||||||
|
|
||||||
|
from modules.shared.configuration import APP_CONFIG # noqa: E402
|
||||||
|
from modules.workflows.automation2.featureInstanceRefMigration import ( # noqa: E402
|
||||||
|
materializeFeatureInstanceRefs,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
|
||||||
|
logger = logging.getLogger("script_migrate_feature_instance_refs")
|
||||||
|
|
||||||
|
|
||||||
|
_DB_NAME = "poweron_graphicaleditor"
|
||||||
|
_TABLES_AND_PK: List[Tuple[str, str]] = [
|
||||||
|
('"Automation2Workflow"', "id"),
|
||||||
|
('"AutoVersion"', "id"),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _connect() -> "psycopg2.extensions.connection":
|
||||||
|
cfg = {
|
||||||
|
"host": APP_CONFIG.get("DB_HOST", "localhost"),
|
||||||
|
"port": int(APP_CONFIG.get("DB_PORT", "5432")),
|
||||||
|
"user": APP_CONFIG.get("DB_USER"),
|
||||||
|
"password": (
|
||||||
|
APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD")
|
||||||
|
),
|
||||||
|
"database": _DB_NAME,
|
||||||
|
}
|
||||||
|
if not cfg["user"] or not cfg["password"]:
|
||||||
|
raise SystemExit("DB_USER and DB_PASSWORD/DB_PASSWORD_SECRET must be set")
|
||||||
|
return psycopg2.connect(**cfg)
|
||||||
|
|
||||||
|
|
||||||
|
def _loadGraph(value: Any) -> Dict[str, Any]:
|
||||||
|
"""psycopg2 returns JSONB as a Python dict, but legacy data may be a JSON string."""
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return value
|
||||||
|
if isinstance(value, (bytes, bytearray)):
|
||||||
|
value = value.decode("utf-8", errors="replace")
|
||||||
|
if isinstance(value, str) and value.strip():
|
||||||
|
try:
|
||||||
|
return json.loads(value)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _countMigrations(before: Dict[str, Any], after: Dict[str, Any]) -> int:
|
||||||
|
"""Count how many ``featureInstanceId`` values were rewritten."""
|
||||||
|
if before == after:
|
||||||
|
return 0
|
||||||
|
bnodes = before.get("nodes") if isinstance(before, dict) else None
|
||||||
|
anodes = after.get("nodes") if isinstance(after, dict) else None
|
||||||
|
if not isinstance(bnodes, list) or not isinstance(anodes, list):
|
||||||
|
return 0
|
||||||
|
count = 0
|
||||||
|
for bn, an in zip(bnodes, anodes):
|
||||||
|
bp = (bn.get("parameters") or {}) if isinstance(bn, dict) else {}
|
||||||
|
ap = (an.get("parameters") or {}) if isinstance(an, dict) else {}
|
||||||
|
if bp.get("featureInstanceId") != ap.get("featureInstanceId"):
|
||||||
|
count += 1
|
||||||
|
return count
|
||||||
|
|
||||||
|
|
||||||
|
def _migrateOneTable(
|
||||||
|
conn,
|
||||||
|
table: str,
|
||||||
|
pk: str,
|
||||||
|
*,
|
||||||
|
dryRun: bool,
|
||||||
|
) -> Dict[str, int]:
|
||||||
|
"""Process one table; returns counts dict."""
|
||||||
|
counts = {"scanned": 0, "rowsChanged": 0, "fieldsRewritten": 0}
|
||||||
|
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||||
|
cur.execute(f'SELECT {pk} AS pk, "graph" AS graph FROM {table}')
|
||||||
|
rows: Iterable[Dict[str, Any]] = cur.fetchall()
|
||||||
|
for row in rows:
|
||||||
|
counts["scanned"] += 1
|
||||||
|
before = _loadGraph(row.get("graph"))
|
||||||
|
if not before:
|
||||||
|
continue
|
||||||
|
after = materializeFeatureInstanceRefs(before)
|
||||||
|
if before == after:
|
||||||
|
continue
|
||||||
|
rewritten = _countMigrations(before, after)
|
||||||
|
if rewritten == 0:
|
||||||
|
continue
|
||||||
|
counts["rowsChanged"] += 1
|
||||||
|
counts["fieldsRewritten"] += rewritten
|
||||||
|
logger.info(
|
||||||
|
"%s id=%s: %d featureInstanceId value(s) %s",
|
||||||
|
table,
|
||||||
|
row["pk"],
|
||||||
|
rewritten,
|
||||||
|
"would be migrated [dry-run]" if dryRun else "migrated",
|
||||||
|
)
|
||||||
|
if not dryRun:
|
||||||
|
with conn.cursor() as updCur:
|
||||||
|
updCur.execute(
|
||||||
|
f'UPDATE {table} SET "graph" = %s WHERE {pk} = %s',
|
||||||
|
(Json(after), row["pk"]),
|
||||||
|
)
|
||||||
|
if not dryRun:
|
||||||
|
conn.commit()
|
||||||
|
return counts
|
||||||
|
|
||||||
|
|
||||||
|
def migrate(dryRun: bool = False) -> Dict[str, Dict[str, int]]:
|
||||||
|
"""Walk all tracked tables and migrate. Returns per-table counts."""
|
||||||
|
summary: Dict[str, Dict[str, int]] = {}
|
||||||
|
conn = _connect()
|
||||||
|
try:
|
||||||
|
for table, pk in _TABLES_AND_PK:
|
||||||
|
summary[table] = _migrateOneTable(conn, table, pk, dryRun=dryRun)
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
return summary
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Persist materializeFeatureInstanceRefs into stored workflow graphs."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action="store_true",
|
||||||
|
help="Report what would be migrated without writing back.",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Starting featureInstanceRef DB migration (dry-run=%s, db=%s)",
|
||||||
|
args.dry_run,
|
||||||
|
_DB_NAME,
|
||||||
|
)
|
||||||
|
summary = migrate(dryRun=args.dry_run)
|
||||||
|
totalRows = sum(s["rowsChanged"] for s in summary.values())
|
||||||
|
totalFields = sum(s["fieldsRewritten"] for s in summary.values())
|
||||||
|
for table, counts in summary.items():
|
||||||
|
logger.info(
|
||||||
|
"%s: scanned=%d rowsChanged=%d fieldsRewritten=%d",
|
||||||
|
table,
|
||||||
|
counts["scanned"],
|
||||||
|
counts["rowsChanged"],
|
||||||
|
counts["fieldsRewritten"],
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
"%s: %d row(s) %s, %d featureInstanceId value(s) total.",
|
||||||
|
"Dry-run summary" if args.dry_run else "Migration summary",
|
||||||
|
totalRows,
|
||||||
|
"would be updated" if args.dry_run else "updated",
|
||||||
|
totalFields,
|
||||||
|
)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
2
tests/integration/automation2/__init__.py
Normal file
2
tests/integration/automation2/__init__.py
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""Integration tests for automation2 typed bindings (Phase-5 Schicht-4)."""
|
||||||
189
tests/integration/automation2/test_pick_not_push_migration_v2.py
Normal file
189
tests/integration/automation2/test_pick_not_push_migration_v2.py
Normal file
|
|
@ -0,0 +1,189 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""
|
||||||
|
Phase-5 Schicht-4 integration test (T11): the typed-bindings pipeline must
|
||||||
|
produce identical action-call parameters whether a workflow stores
|
||||||
|
``featureInstanceId`` as a legacy raw UUID or as a typed
|
||||||
|
``FeatureInstanceRef`` envelope.
|
||||||
|
|
||||||
|
The pipeline under test::
|
||||||
|
|
||||||
|
saved graph
|
||||||
|
-> materializeFeatureInstanceRefs (Phase-5, this test)
|
||||||
|
-> materializeConnectionRefs (existing pick-not-push helper)
|
||||||
|
-> resolveParameterReferences (typed bindings + envelope unwrap)
|
||||||
|
-> action params (what the action implementation would receive)
|
||||||
|
|
||||||
|
This is the integration counterpart to the focused unit tests in
|
||||||
|
``tests/unit/workflows/test_featureInstanceRefMigration.py``.
|
||||||
|
|
||||||
|
Plan: ``wiki/c-work/1-plan/2026-04-typed-action-architecture.md``.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from modules.workflows.automation2.featureInstanceRefMigration import (
|
||||||
|
materializeFeatureInstanceRefs,
|
||||||
|
)
|
||||||
|
from modules.workflows.automation2.graphUtils import resolveParameterReferences
|
||||||
|
from modules.workflows.automation2.pickNotPushMigration import materializeConnectionRefs
|
||||||
|
|
||||||
|
|
||||||
|
_TRUSTEE_INSTANCE_UUID = "f1e2d3c4-b5a6-7890-1234-567890abcdef"
|
||||||
|
|
||||||
|
|
||||||
|
def _resolveActionParams(graph: Dict[str, Any], nodeId: str) -> Dict[str, Any]:
|
||||||
|
"""Apply the full Schicht-4 pipeline and return the resolved action params
|
||||||
|
that ``ActionNodeExecutor`` would forward to ``ActionExecutor.executeAction``."""
|
||||||
|
g = materializeFeatureInstanceRefs(graph)
|
||||||
|
g = materializeConnectionRefs(g)
|
||||||
|
targetNode = next(n for n in g["nodes"] if n["id"] == nodeId)
|
||||||
|
rawParams = dict(targetNode.get("parameters") or {})
|
||||||
|
return resolveParameterReferences(rawParams, nodeOutputs={})
|
||||||
|
|
||||||
|
|
||||||
|
def _legacyTrusteeGraph() -> Dict[str, Any]:
|
||||||
|
"""Trustee Spesenbelege-shape graph with raw UUIDs (pre-migration)."""
|
||||||
|
return {
|
||||||
|
"nodes": [
|
||||||
|
{"id": "n1", "type": "trigger.manual", "parameters": {}},
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": _TRUSTEE_INSTANCE_UUID,
|
||||||
|
"prompt": "extract expenses",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n6",
|
||||||
|
"type": "trustee.processDocuments",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": _TRUSTEE_INSTANCE_UUID,
|
||||||
|
"documentList": {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "n5",
|
||||||
|
"path": ["documents"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n7",
|
||||||
|
"type": "trustee.syncToAccounting",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": _TRUSTEE_INSTANCE_UUID,
|
||||||
|
"documentList": {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "n6",
|
||||||
|
"path": ["documents"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"connections": [
|
||||||
|
{"source": "n1", "target": "n5"},
|
||||||
|
{"source": "n5", "target": "n6"},
|
||||||
|
{"source": "n6", "target": "n7"},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _migratedTrusteeGraph() -> Dict[str, Any]:
|
||||||
|
"""The same graph but already in the migrated (typed envelope) shape."""
|
||||||
|
g = _legacyTrusteeGraph()
|
||||||
|
envelope = {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": _TRUSTEE_INSTANCE_UUID,
|
||||||
|
"featureCode": "trustee",
|
||||||
|
}
|
||||||
|
for node in g["nodes"]:
|
||||||
|
if node.get("type", "").startswith("trustee."):
|
||||||
|
node["parameters"]["featureInstanceId"] = copy.deepcopy(envelope)
|
||||||
|
return g
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Round-trip: legacy + migrated graphs produce identical action params
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestTrusteeBindingsPipeline:
|
||||||
|
@pytest.mark.parametrize("nodeId", ["n5", "n6", "n7"])
|
||||||
|
def test_legacyAndMigratedGraphsResolveToSameFeatureInstanceId(self, nodeId):
|
||||||
|
legacyParams = _resolveActionParams(_legacyTrusteeGraph(), nodeId)
|
||||||
|
migratedParams = _resolveActionParams(_migratedTrusteeGraph(), nodeId)
|
||||||
|
assert legacyParams["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
|
||||||
|
assert migratedParams["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
|
||||||
|
assert legacyParams == migratedParams
|
||||||
|
|
||||||
|
def test_legacyGraphIsConvertedToTypedEnvelopeInPlaceOfRawUuid(self):
|
||||||
|
legacy = _legacyTrusteeGraph()
|
||||||
|
migrated = materializeFeatureInstanceRefs(legacy)
|
||||||
|
for node in migrated["nodes"]:
|
||||||
|
if not node.get("type", "").startswith("trustee."):
|
||||||
|
continue
|
||||||
|
param = node["parameters"]["featureInstanceId"]
|
||||||
|
assert isinstance(param, dict), f"node {node['id']} not migrated"
|
||||||
|
assert param["$type"] == "FeatureInstanceRef"
|
||||||
|
assert param["id"] == _TRUSTEE_INSTANCE_UUID
|
||||||
|
assert param["featureCode"] == "trustee"
|
||||||
|
|
||||||
|
def test_migrationIsIdempotentAcrossPipeline(self):
|
||||||
|
once = materializeFeatureInstanceRefs(_legacyTrusteeGraph())
|
||||||
|
twice = materializeFeatureInstanceRefs(once)
|
||||||
|
assert once == twice
|
||||||
|
|
||||||
|
def test_otherParamsArePreservedAcrossMigration(self):
|
||||||
|
legacy = _legacyTrusteeGraph()
|
||||||
|
migrated = materializeFeatureInstanceRefs(legacy)
|
||||||
|
n5 = next(n for n in migrated["nodes"] if n["id"] == "n5")
|
||||||
|
assert n5["parameters"]["prompt"] == "extract expenses"
|
||||||
|
n6 = next(n for n in migrated["nodes"] if n["id"] == "n6")
|
||||||
|
# documentList DataRef must survive untouched (only the
|
||||||
|
# featureInstanceId key is rewritten).
|
||||||
|
assert n6["parameters"]["documentList"] == {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "n5",
|
||||||
|
"path": ["documents"],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Cross-feature: same migration handles redmine / clickup / sharepoint
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestCrossFeatureMigration:
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"nodeType,expectedCode",
|
||||||
|
[
|
||||||
|
("redmine.createIssue", "redmine"),
|
||||||
|
("clickup.createTask", "clickup"),
|
||||||
|
("sharepoint.listFiles", "sharepoint"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_nonTrusteeNodesAreMigratedWithCorrectFeatureCode(
|
||||||
|
self, nodeType, expectedCode
|
||||||
|
):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n",
|
||||||
|
"type": nodeType,
|
||||||
|
"parameters": {"featureInstanceId": "uuid-x"},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
env = out["nodes"][0]["parameters"]["featureInstanceId"]
|
||||||
|
assert env == {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": "uuid-x",
|
||||||
|
"featureCode": expectedCode,
|
||||||
|
}
|
||||||
|
# And the resolver still hands back the raw UUID for legacy actions.
|
||||||
|
resolved = resolveParameterReferences(env, nodeOutputs={})
|
||||||
|
assert resolved == "uuid-x"
|
||||||
4
tests/integration/trustee/__init__.py
Normal file
4
tests/integration/trustee/__init__.py
Normal file
|
|
@ -0,0 +1,4 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Trustee feature integration tests.
|
||||||
474
tests/integration/trustee/test_spesenbelege_workflow_e2e.py
Normal file
474
tests/integration/trustee/test_spesenbelege_workflow_e2e.py
Normal file
|
|
@ -0,0 +1,474 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Plan #2 Track A2 (T4): Trustee Spesenbelege Live-E2E Integration-Test.
|
||||||
|
|
||||||
|
Runs the canonical Trustee Spesenbelege chain end-to-end through
|
||||||
|
``executeGraph``::
|
||||||
|
|
||||||
|
trigger.manual
|
||||||
|
-> trustee.processDocuments (real action)
|
||||||
|
-> trustee.syncToAccounting (real action)
|
||||||
|
|
||||||
|
with:
|
||||||
|
|
||||||
|
* an in-memory **TrusteeInterface** fake (records createDocument /
|
||||||
|
createPosition / updatePosition calls and assigns deterministic IDs),
|
||||||
|
* an in-memory **AccountingBridge** fake (records pushBatchToAccounting
|
||||||
|
calls and returns one success result per positionId),
|
||||||
|
* a literal upstream ``documentList`` (no AI / SharePoint involved — the
|
||||||
|
extraction step is replaced by a canned ActionDocument list so this
|
||||||
|
test focuses on the bindings + action layer, exactly as the Track A2
|
||||||
|
plan requires: "Mock SharePoint + AI + Trustee-DB, echtes
|
||||||
|
processDocuments + syncToAccounting").
|
||||||
|
|
||||||
|
The test exercises the **Schicht-4 typed bindings pipeline** end-to-end:
|
||||||
|
|
||||||
|
* ``featureInstanceId`` is provided as a typed ``FeatureInstanceRef``
|
||||||
|
envelope on the producer node and as a raw legacy UUID on the consumer
|
||||||
|
node — both must reach the action layer as the bare UUID string after
|
||||||
|
``materializeFeatureInstanceRefs`` + ``resolveParameterReferences``.
|
||||||
|
* ``documentList`` on ``trustee.syncToAccounting`` is a ``DataRef`` on
|
||||||
|
``processDocuments[documents]`` (Pick-not-Push) — must resolve to the
|
||||||
|
ActionDocument list produced by ``processDocuments``.
|
||||||
|
|
||||||
|
Plan: ``wiki/c-work/1-plan/2026-04-typed-action-followups.md`` (A2 / T4).
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from modules.workflows.automation2.executionEngine import executeGraph
|
||||||
|
from modules.workflows.automation2.runEnvelope import default_run_envelope
|
||||||
|
|
||||||
|
|
||||||
|
_TRUSTEE_INSTANCE_UUID = "11111111-2222-3333-4444-555555555555"
|
||||||
|
_MANDATE_ID = "mandate-zh-001"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# In-memory fakes for the Trustee feature
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeTrusteeDocument:
|
||||||
|
"""Minimal stand-in for ``TrusteeDocument`` with attribute access."""
|
||||||
|
|
||||||
|
def __init__(self, payload: Dict[str, Any]):
|
||||||
|
self.id = str(uuid.uuid4())
|
||||||
|
for k, v in payload.items():
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
def model_dump(self) -> Dict[str, Any]:
|
||||||
|
return {k: v for k, v in self.__dict__.items()}
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeTrusteePosition:
|
||||||
|
"""Minimal stand-in for ``TrusteePosition`` with attribute access."""
|
||||||
|
|
||||||
|
def __init__(self, payload: Dict[str, Any]):
|
||||||
|
self.id = str(uuid.uuid4())
|
||||||
|
for k, v in payload.items():
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
def model_dump(self) -> Dict[str, Any]:
|
||||||
|
return {k: v for k, v in self.__dict__.items()}
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeTrusteeDb:
|
||||||
|
"""Captures ``getRecordset`` calls so processDocuments' bank-match
|
||||||
|
auto-linking path can be exercised without a real DB."""
|
||||||
|
|
||||||
|
def __init__(self, positions: List[_FakeTrusteePosition]):
|
||||||
|
self._positions = positions
|
||||||
|
self.calls: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
|
def getRecordset(self, model, recordFilter=None):
|
||||||
|
self.calls.append({"model": getattr(model, "__name__", str(model)),
|
||||||
|
"filter": recordFilter})
|
||||||
|
return list(self._positions)
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeTrusteeInterface:
|
||||||
|
"""In-memory replacement for the live trustee interface."""
|
||||||
|
|
||||||
|
def __init__(self, mandateId: str, featureInstanceId: str):
|
||||||
|
self.mandateId = mandateId
|
||||||
|
self.featureInstanceId = featureInstanceId
|
||||||
|
self.documents: List[_FakeTrusteeDocument] = []
|
||||||
|
self.positions: List[_FakeTrusteePosition] = []
|
||||||
|
self.updates: List[Dict[str, Any]] = []
|
||||||
|
self.db = _FakeTrusteeDb(self.positions)
|
||||||
|
|
||||||
|
def createDocument(self, payload: Dict[str, Any]) -> _FakeTrusteeDocument:
|
||||||
|
doc = _FakeTrusteeDocument({
|
||||||
|
"fileId": payload.get("fileId"),
|
||||||
|
"documentName": payload.get("documentName"),
|
||||||
|
"documentMimeType": payload.get("documentMimeType"),
|
||||||
|
"sourceType": payload.get("sourceType"),
|
||||||
|
"documentType": payload.get("documentType"),
|
||||||
|
"mandateId": self.mandateId,
|
||||||
|
"featureInstanceId": self.featureInstanceId,
|
||||||
|
})
|
||||||
|
self.documents.append(doc)
|
||||||
|
return doc
|
||||||
|
|
||||||
|
def createPosition(self, payload: Dict[str, Any]) -> _FakeTrusteePosition:
|
||||||
|
pos = _FakeTrusteePosition({**payload})
|
||||||
|
self.positions.append(pos)
|
||||||
|
return pos
|
||||||
|
|
||||||
|
def updatePosition(self, positionId: str, patch: Dict[str, Any]) -> Optional[_FakeTrusteePosition]:
|
||||||
|
self.updates.append({"id": positionId, "patch": dict(patch)})
|
||||||
|
for pos in self.positions:
|
||||||
|
if getattr(pos, "id", None) == positionId:
|
||||||
|
for k, v in patch.items():
|
||||||
|
setattr(pos, k, v)
|
||||||
|
return pos
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeAccountingResult:
|
||||||
|
def __init__(self, success: bool = True, errorMessage: Optional[str] = None):
|
||||||
|
self.success = success
|
||||||
|
self.errorMessage = errorMessage
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeAccountingBridge:
|
||||||
|
"""Records pushBatchToAccounting invocations and returns one success
|
||||||
|
per positionId."""
|
||||||
|
|
||||||
|
pushBatchCalls: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
|
def __init__(self, trusteeInterface):
|
||||||
|
self.trusteeInterface = trusteeInterface
|
||||||
|
|
||||||
|
async def pushBatchToAccounting(self, featureInstanceId: str,
|
||||||
|
positionIds: List[str]):
|
||||||
|
type(self).pushBatchCalls.append({
|
||||||
|
"featureInstanceId": featureInstanceId,
|
||||||
|
"positionIds": list(positionIds),
|
||||||
|
})
|
||||||
|
return [_FakeAccountingResult(success=True) for _ in positionIds]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Test fixtures: mock services + module-level patches
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def trusteeInterface():
|
||||||
|
return _FakeTrusteeInterface(_MANDATE_ID, _TRUSTEE_INSTANCE_UUID)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def resetAccountingBridgeCalls():
|
||||||
|
_FakeAccountingBridge.pushBatchCalls = []
|
||||||
|
yield
|
||||||
|
_FakeAccountingBridge.pushBatchCalls = []
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def patchTrustee(monkeypatch, trusteeInterface):
|
||||||
|
"""Patches ``getInterface`` + ``AccountingBridge`` in both action
|
||||||
|
modules so the real action code runs against the in-memory fakes."""
|
||||||
|
from modules.workflows.methods.methodTrustee.actions import (
|
||||||
|
processDocuments as _procMod,
|
||||||
|
syncToAccounting as _syncMod,
|
||||||
|
)
|
||||||
|
from modules.features.trustee import (
|
||||||
|
interfaceFeatureTrustee as _ifaceMod,
|
||||||
|
)
|
||||||
|
from modules.features.trustee.accounting import accountingBridge as _bridgeMod
|
||||||
|
|
||||||
|
def _fakeGetInterface(*_args, **_kwargs):
|
||||||
|
return trusteeInterface
|
||||||
|
|
||||||
|
monkeypatch.setattr(_ifaceMod, "getInterface", _fakeGetInterface, raising=True)
|
||||||
|
monkeypatch.setattr(_bridgeMod, "AccountingBridge", _FakeAccountingBridge, raising=True)
|
||||||
|
return trusteeInterface
|
||||||
|
|
||||||
|
|
||||||
|
def _services():
|
||||||
|
"""Minimal services container for executeGraph.
|
||||||
|
|
||||||
|
The ``ActionExecutor`` only needs ``services`` to be passed through to
|
||||||
|
the trustee actions. The trustee actions only touch
|
||||||
|
``services.mandateId`` and ``services.featureInstanceId`` directly
|
||||||
|
(everything else is provided via ``parameters``); ``services.chat`` is
|
||||||
|
looked up but only used as a fallback that we do not exercise here.
|
||||||
|
"""
|
||||||
|
class _S:
|
||||||
|
mandateId = _MANDATE_ID
|
||||||
|
featureInstanceId = _TRUSTEE_INSTANCE_UUID
|
||||||
|
user = None
|
||||||
|
chat = None
|
||||||
|
return _S()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Canned upstream extraction result
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _expenseReceiptExtraction() -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"documentType": "EXPENSE_RECEIPT",
|
||||||
|
"fileId": "file-001",
|
||||||
|
"fileName": "tankbeleg.pdf",
|
||||||
|
"extractedData": [
|
||||||
|
{
|
||||||
|
"documentType": "expense_receipt",
|
||||||
|
"valuta": "2026-04-12",
|
||||||
|
"transactionDateTime": 1744675200,
|
||||||
|
"company": "Migrolino Tankstelle Zürich AG",
|
||||||
|
"desc": "Tankfüllung Bleifrei 95, 42.30 L à 1.799 CHF/L",
|
||||||
|
"bookingCurrency": "CHF",
|
||||||
|
"bookingAmount": "76.10",
|
||||||
|
"originalCurrency": "CHF",
|
||||||
|
"originalAmount": "76.10",
|
||||||
|
"vatPercentage": "8.1",
|
||||||
|
"vatAmount": "5.71",
|
||||||
|
"debitAccountNumber": "6200 Fahrzeugaufwand",
|
||||||
|
"creditAccountNumber": "1020 Bank",
|
||||||
|
"tags": ["fuel", "vehicle"],
|
||||||
|
"bookingReference": "RB-2026-04-12-001",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _bankDocumentExtraction() -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"documentType": "BANK_DOCUMENT",
|
||||||
|
"fileId": "file-002",
|
||||||
|
"fileName": "kontoauszug_april.pdf",
|
||||||
|
"extractedData": [
|
||||||
|
{
|
||||||
|
"documentType": "bank_document",
|
||||||
|
"valuta": "2026-04-13",
|
||||||
|
"company": "Migrolino Tankstelle Zürich AG",
|
||||||
|
"desc": "Lastschrift Tankfüllung 12.04.2026, Ref RB-2026-04-12-001",
|
||||||
|
"bookingCurrency": "CHF",
|
||||||
|
"bookingAmount": "-76.10",
|
||||||
|
"creditAccountNumber": "1020 Bank",
|
||||||
|
"bookingReference": "RB-2026-04-12-001",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _cannedExtractionDocuments() -> List[Dict[str, Any]]:
|
||||||
|
"""Two ActionDocument-shaped dicts: one expense receipt + one bank
|
||||||
|
document. processDocuments' ``_resolveDocumentList`` accepts this
|
||||||
|
shape directly when ``documentName`` / ``documentData`` are present."""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"documentName": "tankbeleg.json",
|
||||||
|
"documentData": json.dumps(_expenseReceiptExtraction()),
|
||||||
|
"mimeType": "application/json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"documentName": "kontoauszug_april.json",
|
||||||
|
"documentData": json.dumps(_bankDocumentExtraction()),
|
||||||
|
"mimeType": "application/json",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Graph builder
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _buildGraph(featureInstanceIdOnProcess, featureInstanceIdOnSync) -> Dict[str, Any]:
|
||||||
|
"""Trustee Spesenbelege chain.
|
||||||
|
|
||||||
|
The ``trigger.manual`` node emits an ``ActionResult`` port, which is
|
||||||
|
not assignable into ``trustee.processDocuments[in:0]`` (accepts only
|
||||||
|
``DocumentList`` / ``Transit``). Production graphs solve this by
|
||||||
|
going through ``trustee.extractFromFiles`` (DocumentList output)
|
||||||
|
first; this test bypasses that step (we ship a literal canned
|
||||||
|
extraction list instead of running AI/SharePoint), so we simply
|
||||||
|
leave ``trigger.manual`` orphaned and start the data plane at
|
||||||
|
``process``."""
|
||||||
|
return {
|
||||||
|
"nodes": [
|
||||||
|
{"id": "trigger", "type": "trigger.manual", "parameters": {}},
|
||||||
|
{
|
||||||
|
"id": "process",
|
||||||
|
"type": "trustee.processDocuments",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": featureInstanceIdOnProcess,
|
||||||
|
"documentList": _cannedExtractionDocuments(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "sync",
|
||||||
|
"type": "trustee.syncToAccounting",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": featureInstanceIdOnSync,
|
||||||
|
"documentList": {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "process",
|
||||||
|
"path": ["documents"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"connections": [
|
||||||
|
{"source": "process", "target": "sync"},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestSpesenbelegeEndToEnd:
|
||||||
|
"""End-to-end Trustee Spesenbelege graph through executeGraph."""
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_processAndSyncWritesDocumentsPositionsAndAccountingPush(
|
||||||
|
self, patchTrustee
|
||||||
|
):
|
||||||
|
"""Happy-path: 1 expense receipt + 1 bank document.
|
||||||
|
|
||||||
|
Asserts at all three layers: bindings, action results, and side
|
||||||
|
effects on the (faked) trustee + accounting infrastructure."""
|
||||||
|
trustee = patchTrustee
|
||||||
|
envelope = {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": _TRUSTEE_INSTANCE_UUID,
|
||||||
|
"featureCode": "trustee",
|
||||||
|
}
|
||||||
|
graph = _buildGraph(
|
||||||
|
featureInstanceIdOnProcess=copy.deepcopy(envelope),
|
||||||
|
featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
)
|
||||||
|
runEnvelope = default_run_envelope("manual", payload={})
|
||||||
|
|
||||||
|
result = await executeGraph(
|
||||||
|
graph,
|
||||||
|
services=_services(),
|
||||||
|
run_envelope=runEnvelope,
|
||||||
|
userId="test-user",
|
||||||
|
mandateId=_MANDATE_ID,
|
||||||
|
instanceId=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.get("success") is True, result
|
||||||
|
|
||||||
|
# --- Layer 1: bindings — both nodes must see the unwrapped UUID ---
|
||||||
|
assert len(trustee.documents) == 2
|
||||||
|
for doc in trustee.documents:
|
||||||
|
assert doc.featureInstanceId == _TRUSTEE_INSTANCE_UUID
|
||||||
|
|
||||||
|
# --- Layer 2: action results -----------------------------------
|
||||||
|
nodeOutputs = result["nodeOutputs"]
|
||||||
|
processOut = nodeOutputs["process"]
|
||||||
|
assert processOut.get("success") is True
|
||||||
|
assert processOut.get("error") in (None, "", False)
|
||||||
|
assert isinstance(processOut.get("documents"), list)
|
||||||
|
assert len(processOut["documents"]) == 1
|
||||||
|
processedDoc = processOut["documents"][0]
|
||||||
|
assert processedDoc.get("documentName") == "process_documents_result.json"
|
||||||
|
payload = json.loads(processedDoc["documentData"])
|
||||||
|
assert len(payload["documentIds"]) == 2
|
||||||
|
assert len(payload["positionIds"]) == 2
|
||||||
|
# Bank document auto-link found the matching expense (same
|
||||||
|
# bookingReference RB-2026-04-12-001), so exactly one position
|
||||||
|
# was matched.
|
||||||
|
assert len(payload["autoMatchedPositionIds"]) == 1
|
||||||
|
|
||||||
|
syncOut = nodeOutputs["sync"]
|
||||||
|
assert syncOut.get("success") is True
|
||||||
|
assert syncOut.get("error") in (None, "", False)
|
||||||
|
syncDoc = syncOut["documents"][0]
|
||||||
|
syncSummary = json.loads(syncDoc["documentData"])
|
||||||
|
assert syncSummary["pushed"] == 2
|
||||||
|
assert syncSummary["total"] == 2
|
||||||
|
assert all(r["success"] is True for r in syncSummary["results"])
|
||||||
|
|
||||||
|
# --- Layer 3: side effects -------------------------------------
|
||||||
|
assert len(trustee.positions) == 2
|
||||||
|
# Bank document update propagated through updatePosition
|
||||||
|
assert len(trustee.updates) == 1
|
||||||
|
assert "bankDocumentId" in trustee.updates[0]["patch"]
|
||||||
|
|
||||||
|
# Accounting bridge was called once with the resolved positionIds
|
||||||
|
# and the unwrapped UUID, NOT the typed envelope.
|
||||||
|
assert len(_FakeAccountingBridge.pushBatchCalls) == 1
|
||||||
|
call = _FakeAccountingBridge.pushBatchCalls[0]
|
||||||
|
assert call["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
|
||||||
|
assert sorted(call["positionIds"]) == sorted(payload["positionIds"])
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_legacyRawUuidFeatureInstanceIdAlsoWorks(self, patchTrustee):
|
||||||
|
"""A pre-Schicht-4 graph storing ``featureInstanceId`` as a raw
|
||||||
|
UUID must produce the same end-to-end behaviour after the
|
||||||
|
runtime ``materializeFeatureInstanceRefs`` migration."""
|
||||||
|
trustee = patchTrustee
|
||||||
|
graph = _buildGraph(
|
||||||
|
featureInstanceIdOnProcess=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
)
|
||||||
|
result = await executeGraph(
|
||||||
|
graph,
|
||||||
|
services=_services(),
|
||||||
|
run_envelope=default_run_envelope("manual", payload={}),
|
||||||
|
userId="test-user",
|
||||||
|
mandateId=_MANDATE_ID,
|
||||||
|
instanceId=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
)
|
||||||
|
assert result.get("success") is True, result
|
||||||
|
assert len(trustee.documents) == 2
|
||||||
|
assert len(trustee.positions) == 2
|
||||||
|
assert _FakeAccountingBridge.pushBatchCalls[0]["featureInstanceId"] == _TRUSTEE_INSTANCE_UUID
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_emptyExtractionListIsHandledGracefully(self, patchTrustee):
|
||||||
|
"""When processDocuments receives no documents, syncToAccounting
|
||||||
|
must surface a graceful "No positionIds in document" message and
|
||||||
|
never call the accounting bridge."""
|
||||||
|
trustee = patchTrustee
|
||||||
|
graph = _buildGraph(
|
||||||
|
featureInstanceIdOnProcess=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
featureInstanceIdOnSync=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
)
|
||||||
|
# Replace the canned documents with a no-records extraction.
|
||||||
|
emptyExtraction = {
|
||||||
|
"documentType": "EXPENSE_RECEIPT",
|
||||||
|
"fileId": "file-empty",
|
||||||
|
"fileName": "empty.json",
|
||||||
|
"extractedData": [],
|
||||||
|
}
|
||||||
|
graph["nodes"][1]["parameters"]["documentList"] = [{
|
||||||
|
"documentName": "empty.json",
|
||||||
|
"documentData": json.dumps(emptyExtraction),
|
||||||
|
"mimeType": "application/json",
|
||||||
|
}]
|
||||||
|
result = await executeGraph(
|
||||||
|
graph,
|
||||||
|
services=_services(),
|
||||||
|
run_envelope=default_run_envelope("manual", payload={}),
|
||||||
|
userId="test-user",
|
||||||
|
mandateId=_MANDATE_ID,
|
||||||
|
instanceId=_TRUSTEE_INSTANCE_UUID,
|
||||||
|
)
|
||||||
|
assert result.get("success") is True, result
|
||||||
|
assert len(trustee.documents) == 0
|
||||||
|
assert len(trustee.positions) == 0
|
||||||
|
syncSummary = json.loads(
|
||||||
|
result["nodeOutputs"]["sync"]["documents"][0]["documentData"]
|
||||||
|
)
|
||||||
|
assert syncSummary["pushed"] == 0
|
||||||
|
assert _FakeAccountingBridge.pushBatchCalls == []
|
||||||
|
|
@ -0,0 +1,9 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
from modules.workflows.automation2.executors.actionNodeExecutor import _buildConnectionRefDict
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_connection_ref_dict_from_logical_string():
|
||||||
|
d = _buildConnectionRefDict("connection:msft:user@example.com", None, None)
|
||||||
|
assert d is not None
|
||||||
|
assert d["authority"] == "msft"
|
||||||
|
assert d["label"] == "connection:msft:user@example.com"
|
||||||
352
tests/unit/graphicalEditor/test_adapter_validator.py
Normal file
352
tests/unit/graphicalEditor/test_adapter_validator.py
Normal file
|
|
@ -0,0 +1,352 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Tests for the Schicht-3 Adapter Validator (Phase 3).
|
||||||
|
|
||||||
|
Validates the 5 drift rules between Editor-Node Adapters and the
|
||||||
|
Schicht-2 Actions they bind to:
|
||||||
|
|
||||||
|
Rule 1: every userParams.actionArg exists in the Action
|
||||||
|
Rule 2: every required Action arg is covered (userParams or contextParams)
|
||||||
|
Rule 3: every Action parameter type exists in PORT_TYPE_CATALOG
|
||||||
|
Rule 4: Action outputType exists in PORT_TYPE_CATALOG
|
||||||
|
Rule 5: every Action with dynamicMode=False has an Editor adapter
|
||||||
|
|
||||||
|
Plus a healthy-state test that runs the validator against the live
|
||||||
|
STATIC_NODE_TYPES + every shipping Method instance, and asserts no drift.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
import sys
|
||||||
|
import types
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from modules.datamodels.datamodelWorkflowActions import (
|
||||||
|
WorkflowActionDefinition,
|
||||||
|
WorkflowActionParameter,
|
||||||
|
)
|
||||||
|
from modules.features.graphicalEditor.adapterValidator import (
|
||||||
|
AdapterValidationReport,
|
||||||
|
_buildActionsRegistryFromMethods,
|
||||||
|
_formatAdapterReport,
|
||||||
|
_validateAdapterAgainstAction,
|
||||||
|
_validateAllAdapters,
|
||||||
|
)
|
||||||
|
from modules.features.graphicalEditor.nodeAdapter import (
|
||||||
|
NodeAdapter,
|
||||||
|
UserParamMapping,
|
||||||
|
)
|
||||||
|
from modules.shared.frontendTypes import FrontendType
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Test factories
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _makeParam(typeStr: str, *, required: bool = False, **kwargs) -> WorkflowActionParameter:
|
||||||
|
defaults = {
|
||||||
|
"name": "p",
|
||||||
|
"type": typeStr,
|
||||||
|
"frontendType": FrontendType.TEXT,
|
||||||
|
"required": required,
|
||||||
|
"description": "",
|
||||||
|
}
|
||||||
|
defaults.update(kwargs)
|
||||||
|
return WorkflowActionParameter(**defaults)
|
||||||
|
|
||||||
|
|
||||||
|
def _makeAction(
|
||||||
|
actionId: str = "trustee.processDocuments",
|
||||||
|
parameters: dict | None = None,
|
||||||
|
outputType: str = "TrusteeProcessResult",
|
||||||
|
dynamicMode: bool = False,
|
||||||
|
) -> WorkflowActionDefinition:
|
||||||
|
return WorkflowActionDefinition(
|
||||||
|
actionId=actionId,
|
||||||
|
description="t",
|
||||||
|
parameters=parameters or {},
|
||||||
|
outputType=outputType,
|
||||||
|
dynamicMode=dynamicMode,
|
||||||
|
execute=lambda *a, **k: None,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _makeAdapter(
|
||||||
|
*,
|
||||||
|
userArgs: list[str] | None = None,
|
||||||
|
contextArgs: list[str] | None = None,
|
||||||
|
) -> NodeAdapter:
|
||||||
|
return NodeAdapter(
|
||||||
|
nodeId="trustee.processDocuments",
|
||||||
|
bindsAction="trustee.processDocuments",
|
||||||
|
category="trustee",
|
||||||
|
label="Verarbeiten",
|
||||||
|
description="...",
|
||||||
|
userParams=[UserParamMapping(actionArg=a) for a in (userArgs or [])],
|
||||||
|
contextParams={k: f"$session.{k}" for k in (contextArgs or [])},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Per-rule unit tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestRule1_UserParamArgExistsInAction:
|
||||||
|
def test_okWhenAllArgsExist(self):
|
||||||
|
action = _makeAction(parameters={
|
||||||
|
"documentList": _makeParam("DocumentList", required=True),
|
||||||
|
"featureInstanceId": _makeParam("FeatureInstanceRef", required=True),
|
||||||
|
})
|
||||||
|
adapter = _makeAdapter(userArgs=["documentList", "featureInstanceId"])
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert report.isHealthy, report.errors
|
||||||
|
|
||||||
|
def test_failsWhenAdapterReferencesUnknownArg(self):
|
||||||
|
action = _makeAction(parameters={"documentList": _makeParam("DocumentList", required=True),
|
||||||
|
"featureInstanceId": _makeParam("FeatureInstanceRef", required=True)})
|
||||||
|
adapter = _makeAdapter(userArgs=["documentList", "featureInstanceId", "ghostArg"])
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert any("ghostArg" in e for e in report.errors)
|
||||||
|
|
||||||
|
|
||||||
|
class TestRule2_RequiredArgsCovered:
|
||||||
|
def test_failsWhenRequiredArgMissing(self):
|
||||||
|
action = _makeAction(parameters={
|
||||||
|
"documentList": _makeParam("DocumentList", required=True),
|
||||||
|
"featureInstanceId": _makeParam("FeatureInstanceRef", required=True),
|
||||||
|
})
|
||||||
|
adapter = _makeAdapter(userArgs=["documentList"]) # missing featureInstanceId
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert any("featureInstanceId" in e for e in report.errors)
|
||||||
|
|
||||||
|
def test_okWhenRequiredArgInContext(self):
|
||||||
|
action = _makeAction(parameters={
|
||||||
|
"documentList": _makeParam("DocumentList", required=True),
|
||||||
|
"mandateId": _makeParam("str", required=True),
|
||||||
|
})
|
||||||
|
adapter = _makeAdapter(userArgs=["documentList"], contextArgs=["mandateId"])
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert report.isHealthy, report.errors
|
||||||
|
|
||||||
|
def test_optionalArgMayBeUnset(self):
|
||||||
|
action = _makeAction(parameters={
|
||||||
|
"documentList": _makeParam("DocumentList", required=True),
|
||||||
|
"prompt": _makeParam("str", required=False),
|
||||||
|
})
|
||||||
|
adapter = _makeAdapter(userArgs=["documentList"])
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert report.isHealthy, report.errors
|
||||||
|
|
||||||
|
|
||||||
|
class TestRule3_ActionParamTypesInCatalog:
|
||||||
|
def test_failsForUnknownType(self):
|
||||||
|
action = _makeAction(parameters={"documentList": _makeParam("Foobar", required=True)})
|
||||||
|
adapter = _makeAdapter(userArgs=["documentList"])
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert any("Foobar" in e for e in report.errors)
|
||||||
|
|
||||||
|
|
||||||
|
class TestRule4_OutputTypeInCatalog:
|
||||||
|
def test_failsForUnknownOutputType(self):
|
||||||
|
action = _makeAction(outputType="Nonsense")
|
||||||
|
adapter = _makeAdapter()
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert any("Nonsense" in e for e in report.errors)
|
||||||
|
|
||||||
|
def test_okForActionResult(self):
|
||||||
|
action = _makeAction(outputType="ActionResult")
|
||||||
|
adapter = _makeAdapter()
|
||||||
|
report = _validateAdapterAgainstAction(adapter, action)
|
||||||
|
assert report.isHealthy, report.errors
|
||||||
|
|
||||||
|
|
||||||
|
class TestRule5_OrphanActionsAcrossRegistry:
|
||||||
|
def test_warnsForActionWithoutAdapter(self):
|
||||||
|
action = _makeAction(actionId="trustee.queryData")
|
||||||
|
registry = {"trustee": {"queryData": action}}
|
||||||
|
report = _validateAllAdapters([], registry)
|
||||||
|
assert any("trustee.queryData" in w for w in report.warnings)
|
||||||
|
|
||||||
|
def test_dynamicModeActionDoesNotWarn(self):
|
||||||
|
action = _makeAction(actionId="trustee.queryData", dynamicMode=True)
|
||||||
|
registry = {"trustee": {"queryData": action}}
|
||||||
|
report = _validateAllAdapters([], registry)
|
||||||
|
assert report.warnings == []
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Aggregator + report formatter
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestValidateAllAdapters:
|
||||||
|
def test_passesWithFullCoverage(self):
|
||||||
|
node = {
|
||||||
|
"id": "trustee.processDocuments",
|
||||||
|
"category": "trustee",
|
||||||
|
"label": "X", "description": "Y",
|
||||||
|
"parameters": [
|
||||||
|
{"name": "documentList", "type": "DocumentList"},
|
||||||
|
{"name": "featureInstanceId", "type": "FeatureInstanceRef"},
|
||||||
|
],
|
||||||
|
"inputs": 1, "outputs": 1,
|
||||||
|
"inputPorts": {0: {"accepts": ["DocumentList"]}},
|
||||||
|
"_method": "trustee", "_action": "processDocuments",
|
||||||
|
}
|
||||||
|
action = _makeAction(parameters={
|
||||||
|
"documentList": _makeParam("DocumentList", required=True),
|
||||||
|
"featureInstanceId": _makeParam("FeatureInstanceRef", required=True),
|
||||||
|
})
|
||||||
|
registry = {"trustee": {"processDocuments": action}}
|
||||||
|
report = _validateAllAdapters([node], registry)
|
||||||
|
assert report.isHealthy, report.errors
|
||||||
|
|
||||||
|
def test_reportsMissingAction(self):
|
||||||
|
node = {
|
||||||
|
"id": "trustee.processDocuments",
|
||||||
|
"_method": "trustee", "_action": "ghostAction",
|
||||||
|
"parameters": [], "inputs": 0,
|
||||||
|
}
|
||||||
|
report = _validateAllAdapters([node], {"trustee": {}})
|
||||||
|
assert any("ghostAction" in e for e in report.errors)
|
||||||
|
|
||||||
|
|
||||||
|
class TestFormatReport:
|
||||||
|
def test_healthy(self):
|
||||||
|
out = _formatAdapterReport(AdapterValidationReport())
|
||||||
|
assert "healthy" in out.lower()
|
||||||
|
|
||||||
|
def test_withErrorsAndWarnings(self):
|
||||||
|
rep = AdapterValidationReport(errors=["e1"], warnings=["w1"])
|
||||||
|
out = _formatAdapterReport(rep)
|
||||||
|
assert "ERROR" in out and "WARN" in out
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Healthy-state: live methods + STATIC_NODE_TYPES
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class _NullRbac:
|
||||||
|
def getUserPermissions(self, **kwargs):
|
||||||
|
class _P:
|
||||||
|
view = read = create = update = delete = True
|
||||||
|
return _P()
|
||||||
|
|
||||||
|
|
||||||
|
class _StubServices:
|
||||||
|
def __init__(self):
|
||||||
|
self.rbac = _NullRbac()
|
||||||
|
self.user = type("U", (), {"id": "test-user", "roleLabels": []})()
|
||||||
|
self.mandateId = None
|
||||||
|
self.featureInstanceId = None
|
||||||
|
|
||||||
|
|
||||||
|
def _ensureOptionalDeps():
|
||||||
|
class _AnyAttrModule(types.ModuleType):
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return type(name, (), {})
|
||||||
|
|
||||||
|
for name in ("aiohttp",):
|
||||||
|
if name not in sys.modules:
|
||||||
|
sys.modules[name] = _AnyAttrModule(name)
|
||||||
|
|
||||||
|
|
||||||
|
_LIVE_METHODS = [
|
||||||
|
("modules.workflows.methods.methodTrustee.methodTrustee", "MethodTrustee", "trustee"),
|
||||||
|
("modules.workflows.methods.methodRedmine.methodRedmine", "MethodRedmine", "redmine"),
|
||||||
|
("modules.workflows.methods.methodSharepoint.methodSharepoint", "MethodSharepoint", "sharepoint"),
|
||||||
|
("modules.workflows.methods.methodOutlook.methodOutlook", "MethodOutlook", "outlook"),
|
||||||
|
("modules.workflows.methods.methodAi.methodAi", "MethodAi", "ai"),
|
||||||
|
("modules.workflows.methods.methodClickup.methodClickup", "MethodClickup", "clickup"),
|
||||||
|
("modules.workflows.methods.methodFile.methodFile", "MethodFile", "file"),
|
||||||
|
("modules.workflows.methods.methodContext.methodContext", "MethodContext", "context"),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _instantiateLiveMethods() -> dict:
|
||||||
|
"""Best-effort instantiation of every shipping Method with stub services.
|
||||||
|
|
||||||
|
Returns {shortName: instance}. Methods that can't be instantiated in the
|
||||||
|
test env (missing dependencies) are skipped silently — Phase 2 has its
|
||||||
|
own healthy-state test that catches per-method drift.
|
||||||
|
"""
|
||||||
|
_ensureOptionalDeps()
|
||||||
|
out: dict = {}
|
||||||
|
for modulePath, className, shortName in _LIVE_METHODS:
|
||||||
|
try:
|
||||||
|
module = importlib.import_module(modulePath)
|
||||||
|
cls = getattr(module, className, None)
|
||||||
|
if cls is None:
|
||||||
|
continue
|
||||||
|
instance = cls(_StubServices())
|
||||||
|
out[shortName] = instance
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# Snapshot of pre-Phase-3 drift discovered when the validator was first run
|
||||||
|
# against the live STATIC_NODE_TYPES + live Method registry.
|
||||||
|
#
|
||||||
|
# After Phase-4 Adapter-Drift-Cleanup (Plan #4) this set is intentionally
|
||||||
|
# empty: every Editor adapter must align cleanly with its Schicht-2 Action,
|
||||||
|
# and the regression net below now uses `assert report.errors == []`.
|
||||||
|
#
|
||||||
|
# History of removed drifts:
|
||||||
|
# wiki/c-work/4-done/2026-04-adapter-drift-cleanup.md
|
||||||
|
#
|
||||||
|
# Rule: this set MUST stay empty. New drift => fix the adapter or the action,
|
||||||
|
# not the snapshot.
|
||||||
|
_KNOWN_ADAPTER_DRIFTS: frozenset[tuple[str, str]] = frozenset()
|
||||||
|
|
||||||
|
|
||||||
|
def _extractDriftKey(errorMessage: str) -> tuple[str, str] | None:
|
||||||
|
"""Parse a validator error message into a (nodeId, fieldName) drift key.
|
||||||
|
|
||||||
|
Recognises both rule-1 ("userParams.actionArg 'X' does not exist…") and
|
||||||
|
rule-2 ("required action arg 'X' is neither in userParams…") patterns.
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
m = re.search(
|
||||||
|
r"adapter '([^']+)' bindsAction '[^']+': userParams\.actionArg '([^']+)'",
|
||||||
|
errorMessage,
|
||||||
|
)
|
||||||
|
if m:
|
||||||
|
return (m.group(1), m.group(2))
|
||||||
|
m = re.search(
|
||||||
|
r"adapter '([^']+)' bindsAction '[^']+': required action arg '([^']+)'",
|
||||||
|
errorMessage,
|
||||||
|
)
|
||||||
|
if m:
|
||||||
|
return (m.group(1), m.group(2))
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def test_staticNodesHaveNoDriftAgainstLiveMethods():
|
||||||
|
"""Strict regression: every Editor adapter in STATIC_NODE_TYPES must align
|
||||||
|
with its Schicht-2 Action signature.
|
||||||
|
|
||||||
|
Phase 3 shipped the validator with a tracked drift snapshot
|
||||||
|
(`_KNOWN_ADAPTER_DRIFTS`); Phase 4 cleaned the backlog so the snapshot is
|
||||||
|
empty and we now demand zero errors. Any new drift fails immediately —
|
||||||
|
fix the adapter or the action, never the assertion.
|
||||||
|
|
||||||
|
History: wiki/c-work/4-done/2026-04-adapter-drift-cleanup.md
|
||||||
|
"""
|
||||||
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
|
|
||||||
|
instances = _instantiateLiveMethods()
|
||||||
|
if not instances:
|
||||||
|
pytest.skip("no methods could be instantiated in this test env")
|
||||||
|
|
||||||
|
registry = _buildActionsRegistryFromMethods(instances)
|
||||||
|
report = _validateAllAdapters(list(STATIC_NODE_TYPES), registry)
|
||||||
|
|
||||||
|
assert _KNOWN_ADAPTER_DRIFTS == frozenset(), (
|
||||||
|
"_KNOWN_ADAPTER_DRIFTS must stay empty after Phase-4 cleanup. "
|
||||||
|
"Do not add new entries — fix the drift instead."
|
||||||
|
)
|
||||||
|
assert report.errors == [], (
|
||||||
|
"Adapter↔Action drift detected:\n" + "\n".join(report.errors)
|
||||||
|
)
|
||||||
170
tests/unit/graphicalEditor/test_node_adapter.py
Normal file
170
tests/unit/graphicalEditor/test_node_adapter.py
Normal file
|
|
@ -0,0 +1,170 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Tests for the Schicht-3 NodeAdapter projection (Phase 3).
|
||||||
|
|
||||||
|
Covers the pure projection helpers in nodeAdapter.py:
|
||||||
|
- identifying method-bound vs framework-primitive nodes
|
||||||
|
- extracting bindsAction
|
||||||
|
- building UserParamMapping from legacy parameter dicts
|
||||||
|
- converting inputPorts dict-of-dicts into per-port accepts lists
|
||||||
|
- end-to-end legacy-node → NodeAdapter projection
|
||||||
|
|
||||||
|
These tests do NOT touch live methods; they verify the projection logic
|
||||||
|
in isolation so it is robust before the adapterValidator composes with it.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from modules.features.graphicalEditor.nodeAdapter import (
|
||||||
|
NodeAdapter,
|
||||||
|
UserParamMapping,
|
||||||
|
_adapterFromLegacyNode,
|
||||||
|
_bindsActionFromLegacy,
|
||||||
|
_extractVisibleWhen,
|
||||||
|
_isMethodBoundNode,
|
||||||
|
_projectAllAdapters,
|
||||||
|
_projectInputAccepts,
|
||||||
|
_userParamFromLegacyParam,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _legacyMethodNode(**overrides):
|
||||||
|
base = {
|
||||||
|
"id": "trustee.processDocuments",
|
||||||
|
"category": "trustee",
|
||||||
|
"label": "Verarbeiten",
|
||||||
|
"description": "...",
|
||||||
|
"parameters": [
|
||||||
|
{"name": "documentList", "type": "DocumentList", "required": True,
|
||||||
|
"frontendType": "dataRef", "description": "Eingabe"},
|
||||||
|
{"name": "featureInstanceId", "type": "FeatureInstanceRef", "required": True,
|
||||||
|
"frontendType": "hidden", "description": "Trustee-Instanz"},
|
||||||
|
],
|
||||||
|
"inputs": 1,
|
||||||
|
"outputs": 1,
|
||||||
|
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
|
||||||
|
"outputPorts": {0: {"schema": "TrusteeProcessResult"}},
|
||||||
|
"meta": {"icon": "mdi-x", "color": "#000", "usesAi": False},
|
||||||
|
"_method": "trustee",
|
||||||
|
"_action": "processDocuments",
|
||||||
|
}
|
||||||
|
base.update(overrides)
|
||||||
|
return base
|
||||||
|
|
||||||
|
|
||||||
|
def _primitiveNode(**overrides):
|
||||||
|
base = {
|
||||||
|
"id": "flow.loop",
|
||||||
|
"category": "flow",
|
||||||
|
"label": "Schleife",
|
||||||
|
"parameters": [{"name": "items", "type": "string", "required": True}],
|
||||||
|
"inputs": 1,
|
||||||
|
"outputs": 1,
|
||||||
|
"inputPorts": {0: {"accepts": ["Transit"]}},
|
||||||
|
"executor": "flow",
|
||||||
|
}
|
||||||
|
base.update(overrides)
|
||||||
|
return base
|
||||||
|
|
||||||
|
|
||||||
|
class TestIsMethodBound:
|
||||||
|
def test_methodBoundIsTrue(self):
|
||||||
|
assert _isMethodBoundNode(_legacyMethodNode()) is True
|
||||||
|
|
||||||
|
def test_primitiveIsFalse(self):
|
||||||
|
assert _isMethodBoundNode(_primitiveNode()) is False
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("partial", [
|
||||||
|
{"_method": "trustee"}, # missing _action
|
||||||
|
{"_action": "processDocuments"}, # missing _method
|
||||||
|
{},
|
||||||
|
])
|
||||||
|
def test_partialBindingIsFalse(self, partial):
|
||||||
|
node = _primitiveNode(**partial)
|
||||||
|
assert _isMethodBoundNode(node) is False
|
||||||
|
|
||||||
|
|
||||||
|
class TestBindsActionFromLegacy:
|
||||||
|
def test_returnsCanonicalFqn(self):
|
||||||
|
assert _bindsActionFromLegacy(_legacyMethodNode()) == "trustee.processDocuments"
|
||||||
|
|
||||||
|
def test_returnsNoneForPrimitive(self):
|
||||||
|
assert _bindsActionFromLegacy(_primitiveNode()) is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestUserParamFromLegacy:
|
||||||
|
def test_carriesEditorOverridesOnly(self):
|
||||||
|
legacy = {"name": "documentList", "type": "DocumentList", "required": True,
|
||||||
|
"frontendType": "dataRef", "description": "Eingabe", "default": []}
|
||||||
|
mapping = _userParamFromLegacyParam(legacy)
|
||||||
|
assert isinstance(mapping, UserParamMapping)
|
||||||
|
assert mapping.actionArg == "documentList"
|
||||||
|
assert mapping.uiHint == "dataRef"
|
||||||
|
assert mapping.description == "Eingabe"
|
||||||
|
assert mapping.defaultValue == []
|
||||||
|
assert mapping.frontendOptions is None
|
||||||
|
|
||||||
|
def test_extractsConditionalVisibility(self):
|
||||||
|
legacy = {
|
||||||
|
"name": "filterJson",
|
||||||
|
"type": "string",
|
||||||
|
"frontendType": "textarea",
|
||||||
|
"frontendOptions": {"dependsOn": "mode", "showWhen": ["raw", "aggregate"]},
|
||||||
|
}
|
||||||
|
mapping = _userParamFromLegacyParam(legacy)
|
||||||
|
assert mapping.visibleWhen == {"actionArg": "mode", "in": ["raw", "aggregate"]}
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtractVisibleWhen:
|
||||||
|
def test_returnsNoneForMissingHint(self):
|
||||||
|
assert _extractVisibleWhen(None) is None
|
||||||
|
assert _extractVisibleWhen({}) is None
|
||||||
|
assert _extractVisibleWhen({"dependsOn": "x"}) is None
|
||||||
|
|
||||||
|
def test_normalizesScalarShowWhen(self):
|
||||||
|
out = _extractVisibleWhen({"dependsOn": "entity", "showWhen": "tenant"})
|
||||||
|
assert out == {"actionArg": "entity", "in": ["tenant"]}
|
||||||
|
|
||||||
|
|
||||||
|
class TestProjectInputAccepts:
|
||||||
|
def test_perPortAcceptsList(self):
|
||||||
|
node = _legacyMethodNode()
|
||||||
|
assert _projectInputAccepts(node) == [["DocumentList", "Transit"]]
|
||||||
|
|
||||||
|
def test_emptyForZeroInputs(self):
|
||||||
|
node = _legacyMethodNode(inputs=0, inputPorts={})
|
||||||
|
assert _projectInputAccepts(node) == []
|
||||||
|
|
||||||
|
def test_handlesStringKeys(self):
|
||||||
|
node = _legacyMethodNode(inputPorts={"0": {"accepts": ["Transit"]}})
|
||||||
|
assert _projectInputAccepts(node) == [["Transit"]]
|
||||||
|
|
||||||
|
def test_missingPortReturnsEmptyList(self):
|
||||||
|
node = _legacyMethodNode(inputs=2, inputPorts={0: {"accepts": ["Transit"]}})
|
||||||
|
assert _projectInputAccepts(node) == [["Transit"], []]
|
||||||
|
|
||||||
|
|
||||||
|
class TestAdapterFromLegacyNode:
|
||||||
|
def test_buildsAdapter(self):
|
||||||
|
adapter = _adapterFromLegacyNode(_legacyMethodNode())
|
||||||
|
assert isinstance(adapter, NodeAdapter)
|
||||||
|
assert adapter.nodeId == "trustee.processDocuments"
|
||||||
|
assert adapter.bindsAction == "trustee.processDocuments"
|
||||||
|
assert adapter.category == "trustee"
|
||||||
|
assert len(adapter.userParams) == 2
|
||||||
|
assert adapter.userParams[0].actionArg == "documentList"
|
||||||
|
assert adapter.inputAccepts == [["DocumentList", "Transit"]]
|
||||||
|
assert adapter.contextParams == {}
|
||||||
|
assert adapter.meta.get("icon") == "mdi-x"
|
||||||
|
|
||||||
|
def test_returnsNoneForPrimitive(self):
|
||||||
|
assert _adapterFromLegacyNode(_primitiveNode()) is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestProjectAllAdapters:
|
||||||
|
def test_skipsPrimitives(self):
|
||||||
|
nodes = [_legacyMethodNode(), _primitiveNode()]
|
||||||
|
out = _projectAllAdapters(nodes)
|
||||||
|
assert list(out.keys()) == ["trustee.processDocuments"]
|
||||||
257
tests/unit/graphicalEditor/test_portTypes_catalog.py
Normal file
257
tests/unit/graphicalEditor/test_portTypes_catalog.py
Normal file
|
|
@ -0,0 +1,257 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""
|
||||||
|
Catalog integrity + new Phase-1 schemas
|
||||||
|
(see wiki/c-work/1-plan/2026-04-typed-action-architecture.md).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from modules.features.graphicalEditor.portTypes import (
|
||||||
|
PORT_TYPE_CATALOG,
|
||||||
|
PRIMITIVE_TYPES,
|
||||||
|
PortField,
|
||||||
|
PortSchema,
|
||||||
|
_stripContainer,
|
||||||
|
_validateCatalog,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Validator behaviour
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def test_catalogIsHealthy():
|
||||||
|
"""The shipped catalog must validate without errors."""
|
||||||
|
errors = _validateCatalog()
|
||||||
|
assert errors == [], "Catalog has integrity errors:\n - " + "\n - ".join(errors)
|
||||||
|
|
||||||
|
|
||||||
|
def test_validatorDetectsUnknownType(monkeypatch):
|
||||||
|
"""Inject a bad schema and ensure it is reported."""
|
||||||
|
bad = PortSchema(name="_BadTest", fields=[
|
||||||
|
PortField(name="x", type="DoesNotExist"),
|
||||||
|
])
|
||||||
|
monkeypatch.setitem(PORT_TYPE_CATALOG, "_BadTest", bad)
|
||||||
|
errors = _validateCatalog()
|
||||||
|
assert any("DoesNotExist" in e for e in errors)
|
||||||
|
|
||||||
|
|
||||||
|
def test_validatorDetectsBadDiscriminatorType(monkeypatch):
|
||||||
|
bad = PortSchema(name="_BadDisc", fields=[
|
||||||
|
PortField(name="x", type="int", discriminator=True),
|
||||||
|
])
|
||||||
|
monkeypatch.setitem(PORT_TYPE_CATALOG, "_BadDisc", bad)
|
||||||
|
errors = _validateCatalog()
|
||||||
|
assert any("discriminator must be 'str'" in e for e in errors)
|
||||||
|
|
||||||
|
|
||||||
|
def test_validatorDetectsMultipleDiscriminators(monkeypatch):
|
||||||
|
bad = PortSchema(name="_DoubleDisc", fields=[
|
||||||
|
PortField(name="a", type="str", discriminator=True),
|
||||||
|
PortField(name="b", type="str", discriminator=True),
|
||||||
|
])
|
||||||
|
monkeypatch.setitem(PORT_TYPE_CATALOG, "_DoubleDisc", bad)
|
||||||
|
errors = _validateCatalog()
|
||||||
|
assert any("max 1 allowed" in e for e in errors)
|
||||||
|
|
||||||
|
|
||||||
|
def test_validatorDetectsKeyNameMismatch(monkeypatch):
|
||||||
|
bad = PortSchema(name="DifferentName", fields=[
|
||||||
|
PortField(name="x", type="str"),
|
||||||
|
])
|
||||||
|
monkeypatch.setitem(PORT_TYPE_CATALOG, "_KeyMismatch", bad)
|
||||||
|
errors = _validateCatalog()
|
||||||
|
assert any("does not match schema.name" in e for e in errors)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _stripContainer helper
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("typeStr,expected", [
|
||||||
|
("str", ["str"]),
|
||||||
|
("int", ["int"]),
|
||||||
|
("ConnectionRef", ["ConnectionRef"]),
|
||||||
|
("List[Document]", ["Document"]),
|
||||||
|
("List[ProcessError]", ["ProcessError"]),
|
||||||
|
("Dict[str,Any]", ["str", "Any"]),
|
||||||
|
("Dict[str,int]", ["str", "int"]),
|
||||||
|
("", []),
|
||||||
|
])
|
||||||
|
def test_stripContainer(typeStr, expected):
|
||||||
|
assert _stripContainer(typeStr) == expected
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Phase-1 new Refs
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def test_featureInstanceRefHasDiscriminator():
|
||||||
|
s = PORT_TYPE_CATALOG["FeatureInstanceRef"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert names == {"id", "featureCode", "label", "mandateId"}
|
||||||
|
discriminators = [f for f in s.fields if f.discriminator]
|
||||||
|
assert len(discriminators) == 1
|
||||||
|
assert discriminators[0].name == "featureCode"
|
||||||
|
assert discriminators[0].type == "str"
|
||||||
|
|
||||||
|
|
||||||
|
def test_connectionRefAuthorityIsDiscriminator():
|
||||||
|
s = PORT_TYPE_CATALOG["ConnectionRef"]
|
||||||
|
discriminators = [f for f in s.fields if f.discriminator]
|
||||||
|
assert len(discriminators) == 1
|
||||||
|
assert discriminators[0].name == "authority"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clickUpListRefExists():
|
||||||
|
s = PORT_TYPE_CATALOG["ClickUpListRef"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert "listId" in names
|
||||||
|
assert "connection" in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_promptTemplateRefExists():
|
||||||
|
s = PORT_TYPE_CATALOG["PromptTemplateRef"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert "id" in names
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Phase-1 Trustee Result Schemas
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def test_trusteeRefreshResultStructure():
|
||||||
|
s = PORT_TYPE_CATALOG["TrusteeRefreshResult"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert {"syncCounts", "oldestBookingDate", "newestBookingDate",
|
||||||
|
"featureInstance", "errors"}.issubset(names)
|
||||||
|
|
||||||
|
|
||||||
|
def test_trusteeProcessResultExposesDocuments():
|
||||||
|
s = PORT_TYPE_CATALOG["TrusteeProcessResult"]
|
||||||
|
docField = next((f for f in s.fields if f.name == "documents"), None)
|
||||||
|
assert docField is not None
|
||||||
|
assert docField.type == "List[Document]"
|
||||||
|
assert docField.required is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_trusteeSyncResultHasJournalLines():
|
||||||
|
s = PORT_TYPE_CATALOG["TrusteeSyncResult"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert "syncedCount" in names
|
||||||
|
assert "journalLines" in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_journalLineHasAccountingFields():
|
||||||
|
s = PORT_TYPE_CATALOG["JournalLine"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
for required in ("bookingDate", "account", "amount"):
|
||||||
|
assert required in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_processErrorHasStageAndMessage():
|
||||||
|
s = PORT_TYPE_CATALOG["ProcessError"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert {"stage", "message"}.issubset(names)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Phase-1 Redmine Result Schemas
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def test_redmineTicketHasCoreFields():
|
||||||
|
s = PORT_TYPE_CATALOG["RedmineTicket"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
for required in ("id", "subject", "status"):
|
||||||
|
assert required in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_redmineTicketListReferencesTicket():
|
||||||
|
s = PORT_TYPE_CATALOG["RedmineTicketList"]
|
||||||
|
ticketsField = next((f for f in s.fields if f.name == "tickets"), None)
|
||||||
|
assert ticketsField is not None
|
||||||
|
assert ticketsField.type == "List[RedmineTicket]"
|
||||||
|
|
||||||
|
|
||||||
|
def test_redmineStatsExists():
|
||||||
|
s = PORT_TYPE_CATALOG["RedmineStats"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert "kpis" in names
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Phase-1 Expressions / Misc
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def test_cronExpressionExists():
|
||||||
|
s = PORT_TYPE_CATALOG["CronExpression"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert "expression" in names
|
||||||
|
assert "timezone" in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_conditionExpressionHasSyntaxEnum():
|
||||||
|
s = PORT_TYPE_CATALOG["ConditionExpression"]
|
||||||
|
syntaxField = next((f for f in s.fields if f.name == "syntax"), None)
|
||||||
|
assert syntaxField is not None
|
||||||
|
assert syntaxField.enumValues
|
||||||
|
assert "jmespath" in syntaxField.enumValues
|
||||||
|
|
||||||
|
|
||||||
|
def test_attachmentSpecHasSourceEnum():
|
||||||
|
s = PORT_TYPE_CATALOG["AttachmentSpec"]
|
||||||
|
sourceField = next((f for f in s.fields if f.name == "source"), None)
|
||||||
|
assert sourceField is not None
|
||||||
|
assert set(sourceField.enumValues or []) == {"path", "document", "url"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_taskAttachmentRefExists():
|
||||||
|
s = PORT_TYPE_CATALOG["TaskAttachmentRef"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert "taskId" in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_dateTimeAndUrlSemanticPrimitivesExist():
|
||||||
|
assert "DateTime" in PORT_TYPE_CATALOG
|
||||||
|
assert "Url" in PORT_TYPE_CATALOG
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Cross-cutting: every Trustee/Redmine result references FeatureInstanceRef
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("schemaName", [
|
||||||
|
"TrusteeRefreshResult",
|
||||||
|
"TrusteeProcessResult",
|
||||||
|
"TrusteeSyncResult",
|
||||||
|
"RedmineTicket",
|
||||||
|
"RedmineTicketList",
|
||||||
|
"RedmineStats",
|
||||||
|
])
|
||||||
|
def test_resultSchemasReferenceFeatureInstance(schemaName):
|
||||||
|
s = PORT_TYPE_CATALOG[schemaName]
|
||||||
|
fiField = next((f for f in s.fields if f.name == "featureInstance"), None)
|
||||||
|
assert fiField is not None, f"{schemaName} should expose featureInstance for traceability"
|
||||||
|
assert fiField.type == "FeatureInstanceRef"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Serialization stays compatible (frontend reads model_dump output)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def test_portFieldDumpsDiscriminatorFlag():
|
||||||
|
f = PortField(name="x", type="str", discriminator=True)
|
||||||
|
dumped = f.model_dump()
|
||||||
|
assert dumped["discriminator"] is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_defaultDiscriminatorIsFalse():
|
||||||
|
f = PortField(name="x", type="str")
|
||||||
|
dumped = f.model_dump()
|
||||||
|
assert dumped["discriminator"] is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_primitiveTypesFrozenSet():
|
||||||
|
assert "str" in PRIMITIVE_TYPES
|
||||||
|
assert "Any" in PRIMITIVE_TYPES
|
||||||
|
assert "DoesNotExist" not in PRIMITIVE_TYPES
|
||||||
24
tests/unit/graphicalEditor/test_port_schema_recursive.py
Normal file
24
tests/unit/graphicalEditor/test_port_schema_recursive.py
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""Port type catalog: nested provenance schemas (Typed Generic Handover)."""
|
||||||
|
|
||||||
|
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, _defaultForType
|
||||||
|
|
||||||
|
|
||||||
|
def test_connection_ref_in_catalog():
|
||||||
|
s = PORT_TYPE_CATALOG["ConnectionRef"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert names == {"id", "authority", "label"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_document_list_has_provenance_fields():
|
||||||
|
s = PORT_TYPE_CATALOG["DocumentList"]
|
||||||
|
names = {f.name for f in s.fields}
|
||||||
|
assert "documents" in names
|
||||||
|
assert "connection" in names
|
||||||
|
assert "source" in names
|
||||||
|
assert "count" in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_default_for_nested_schema_type():
|
||||||
|
assert _defaultForType("ConnectionRef") == {}
|
||||||
|
assert _defaultForType("List[Document]") == []
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths
|
||||||
|
from modules.workflows.automation2.graphUtils import parse_graph_defined_schema, validateGraph
|
||||||
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
|
|
||||||
|
|
||||||
|
def test_compute_upstream_paths_includes_form_dynamic_fields():
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "form1",
|
||||||
|
"type": "input.form",
|
||||||
|
"parameters": {
|
||||||
|
"fields": [{"name": "custName", "type": "str", "label": "Name", "required": True}],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{"id": "ai1", "type": "ai.prompt", "parameters": {"aiPrompt": "hi"}},
|
||||||
|
],
|
||||||
|
"connections": [
|
||||||
|
{"source": "form1", "target": "ai1", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
paths = compute_upstream_paths(graph, "ai1")
|
||||||
|
labels = [p["label"] for p in paths if p.get("producerNodeId") == "form1"]
|
||||||
|
assert any("custName" in lbl for lbl in labels), labels
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_graph_defined_schema_fields():
|
||||||
|
node = {"parameters": {"fields": [{"name": "a", "type": "str", "label": "A", "required": False}]}}
|
||||||
|
sch = parse_graph_defined_schema(node, "fields")
|
||||||
|
assert sch and sch["name"] == "FormPayload_dynamic"
|
||||||
|
assert sch["fields"][0]["name"] == "a"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_graph_defined_schema_nested_group():
|
||||||
|
node = {
|
||||||
|
"parameters": {
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "addr",
|
||||||
|
"type": "group",
|
||||||
|
"label": "Addr",
|
||||||
|
"fields": [{"name": "zip", "type": "str", "label": "ZIP"}],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sch = parse_graph_defined_schema(node, "fields")
|
||||||
|
names = [f["name"] for f in sch["fields"]]
|
||||||
|
assert "addr.zip" in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_graph_port_mismatch_errors():
|
||||||
|
node_type_ids = {n["id"] for n in STATIC_NODE_TYPES}
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{"id": "t1", "type": "trigger.manual", "parameters": {}},
|
||||||
|
{"id": "e1", "type": "email.checkEmail", "parameters": {"connectionReference": "x"}},
|
||||||
|
{"id": "a1", "type": "ai.prompt", "parameters": {"aiPrompt": "summarize"}},
|
||||||
|
],
|
||||||
|
"connections": [
|
||||||
|
{"source": "t1", "target": "e1", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
{"source": "e1", "target": "a1", "sourceOutput": 0, "targetInput": 0},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
errors = validateGraph(graph, node_type_ids)
|
||||||
|
assert any("Port mismatch" in e for e in errors), errors
|
||||||
0
tests/unit/methods/__init__.py
Normal file
0
tests/unit/methods/__init__.py
Normal file
289
tests/unit/methods/test_action_signature_validator.py
Normal file
289
tests/unit/methods/test_action_signature_validator.py
Normal file
|
|
@ -0,0 +1,289 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Tests for the action-signature validator (Phase 2 of the Typed Action
|
||||||
|
Architecture, see wiki/c-work/1-plan/2026-04-typed-action-architecture.md).
|
||||||
|
|
||||||
|
Two parts:
|
||||||
|
A) Unit tests for the validator itself (positive + negative cases)
|
||||||
|
B) Healthy-state test: every Method discovered by methodDiscovery passes
|
||||||
|
validation. This is the regression net that catches drift between an
|
||||||
|
action's declared types and the type catalog.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from modules.datamodels.datamodelWorkflowActions import (
|
||||||
|
WorkflowActionDefinition,
|
||||||
|
WorkflowActionParameter,
|
||||||
|
)
|
||||||
|
from modules.shared.frontendTypes import FrontendType
|
||||||
|
from modules.workflows.methods._actionSignatureValidator import (
|
||||||
|
_formatValidationReport,
|
||||||
|
_validateActionDefinition,
|
||||||
|
_validateActionParameter,
|
||||||
|
_validateActionsDict,
|
||||||
|
_validateMethods,
|
||||||
|
_validateTypeRef,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _makeParam(typeStr: str, **kwargs) -> WorkflowActionParameter:
|
||||||
|
defaults = {
|
||||||
|
"name": "p",
|
||||||
|
"type": typeStr,
|
||||||
|
"frontendType": FrontendType.TEXT,
|
||||||
|
"required": False,
|
||||||
|
"description": "",
|
||||||
|
}
|
||||||
|
defaults.update(kwargs)
|
||||||
|
return WorkflowActionParameter(**defaults)
|
||||||
|
|
||||||
|
|
||||||
|
def _makeAction(
|
||||||
|
actionId: str = "test.x",
|
||||||
|
parameters: dict | None = None,
|
||||||
|
outputType: str = "ActionResult",
|
||||||
|
) -> WorkflowActionDefinition:
|
||||||
|
return WorkflowActionDefinition(
|
||||||
|
actionId=actionId,
|
||||||
|
description="t",
|
||||||
|
parameters=parameters or {},
|
||||||
|
outputType=outputType,
|
||||||
|
execute=lambda *a, **k: None,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# A) Unit tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestValidateTypeRef:
|
||||||
|
"""Single-type validation."""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("t", [
|
||||||
|
"str", "int", "bool", "float", "Any",
|
||||||
|
"ConnectionRef", "FeatureInstanceRef", "DocumentList",
|
||||||
|
"TrusteeProcessResult", "RedmineTicket",
|
||||||
|
"List[str]", "List[int]", "List[Any]",
|
||||||
|
"Dict[str,Any]", "Dict[str,Document]",
|
||||||
|
"List[FeatureInstanceRef]",
|
||||||
|
])
|
||||||
|
def test_validTypes(self, t):
|
||||||
|
assert _validateTypeRef(t) == []
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("t", [
|
||||||
|
"list", # too generic
|
||||||
|
"dict", # too generic
|
||||||
|
"Foobar", # unknown schema
|
||||||
|
"List[Foo]", # unknown inner
|
||||||
|
"Dict[str,Foo]", # unknown inner value
|
||||||
|
"", # empty
|
||||||
|
])
|
||||||
|
def test_invalidTypes(self, t):
|
||||||
|
errors = _validateTypeRef(t)
|
||||||
|
assert errors, f"expected validation errors for {t!r}"
|
||||||
|
|
||||||
|
|
||||||
|
class TestValidateActionParameter:
|
||||||
|
def test_validParam(self):
|
||||||
|
p = _makeParam("ConnectionRef")
|
||||||
|
assert _validateActionParameter("ai.x", "p", p) == []
|
||||||
|
|
||||||
|
def test_invalidParam(self):
|
||||||
|
p = _makeParam("Foobar")
|
||||||
|
errors = _validateActionParameter("ai.x", "myParam", p)
|
||||||
|
assert errors and errors[0].startswith("ai.x.myParam:")
|
||||||
|
|
||||||
|
|
||||||
|
class TestValidateActionDefinition:
|
||||||
|
def test_valid(self):
|
||||||
|
action = _makeAction(
|
||||||
|
parameters={"a": _makeParam("ConnectionRef", name="a")},
|
||||||
|
outputType="DocumentList",
|
||||||
|
)
|
||||||
|
assert _validateActionDefinition(action) == []
|
||||||
|
|
||||||
|
def test_invalidOutputType(self):
|
||||||
|
action = _makeAction(outputType="DoesNotExist")
|
||||||
|
errors = _validateActionDefinition(action)
|
||||||
|
assert any("<outputType>" in e for e in errors)
|
||||||
|
|
||||||
|
def test_genericOutputAllowed(self):
|
||||||
|
# ActionResult and Transit are allowed as fire-and-forget outputs
|
||||||
|
for t in ("ActionResult", "Transit"):
|
||||||
|
assert _validateActionDefinition(_makeAction(outputType=t)) == []
|
||||||
|
|
||||||
|
|
||||||
|
class TestValidateActionsDict:
|
||||||
|
def test_emptyDictOk(self):
|
||||||
|
assert _validateActionsDict("m", {}) == []
|
||||||
|
|
||||||
|
def test_nonActionDefinitionRejected(self):
|
||||||
|
errors = _validateActionsDict("m", {"x": "not an action"})
|
||||||
|
assert any("not a WorkflowActionDefinition" in e for e in errors)
|
||||||
|
|
||||||
|
def test_collectsErrorsAcrossActions(self):
|
||||||
|
actions = {
|
||||||
|
"good": _makeAction(
|
||||||
|
parameters={"a": _makeParam("str", name="a")},
|
||||||
|
outputType="DocumentList",
|
||||||
|
),
|
||||||
|
"bad": _makeAction(
|
||||||
|
actionId="m.bad",
|
||||||
|
parameters={"x": _makeParam("Foobar", name="x")},
|
||||||
|
outputType="AlsoUnknown",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
errors = _validateActionsDict("m", actions)
|
||||||
|
# bad action contributes 2 errors, good contributes 0
|
||||||
|
assert len(errors) == 2
|
||||||
|
|
||||||
|
|
||||||
|
class TestValidateMethods:
|
||||||
|
def test_emptyOk(self):
|
||||||
|
assert _validateMethods([]) == []
|
||||||
|
|
||||||
|
def test_methodLikeObject(self):
|
||||||
|
class FakeMethod:
|
||||||
|
name = "fake"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._actions = {
|
||||||
|
"a": _makeAction(
|
||||||
|
parameters={"p": _makeParam("ConnectionRef", name="p")},
|
||||||
|
outputType="DocumentList",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
assert _validateMethods([FakeMethod()]) == []
|
||||||
|
|
||||||
|
def test_methodWithDrift(self):
|
||||||
|
class FakeMethod:
|
||||||
|
name = "fake"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._actions = {
|
||||||
|
"broken": _makeAction(
|
||||||
|
actionId="fake.broken",
|
||||||
|
parameters={"p": _makeParam("Unknown", name="p")},
|
||||||
|
outputType="ActionResult",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
errors = _validateMethods([FakeMethod()])
|
||||||
|
assert errors and "fake.broken.p" in errors[0]
|
||||||
|
|
||||||
|
|
||||||
|
class TestFormatValidationReport:
|
||||||
|
def test_healthyMessage(self):
|
||||||
|
assert "healthy" in _formatValidationReport([]).lower()
|
||||||
|
|
||||||
|
def test_errorReport(self):
|
||||||
|
msg = _formatValidationReport(["a.x: bad", "b.y: also bad"])
|
||||||
|
assert "Found 2 action-signature drift" in msg
|
||||||
|
assert "a.x: bad" in msg
|
||||||
|
assert "b.y: also bad" in msg
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# B) Healthy-state test for the real Method registry
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class _NullRbac:
|
||||||
|
"""Minimal RBAC stub so MethodBase.__init__ does not crash."""
|
||||||
|
|
||||||
|
def getUserPermissions(self, **kwargs): # noqa: D401
|
||||||
|
class _P:
|
||||||
|
view = True
|
||||||
|
read = True
|
||||||
|
create = True
|
||||||
|
update = True
|
||||||
|
delete = True
|
||||||
|
return _P()
|
||||||
|
|
||||||
|
|
||||||
|
class _StubServices:
|
||||||
|
"""Minimal services container required by MethodBase.__init__."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.rbac = _NullRbac()
|
||||||
|
self.user = type("U", (), {"id": "test-user", "roleLabels": []})()
|
||||||
|
self.mandateId = None
|
||||||
|
self.featureInstanceId = None
|
||||||
|
|
||||||
|
|
||||||
|
def _ensureOptionalDeps():
|
||||||
|
"""Patch sys.modules with stubs for optional deps that some Methods
|
||||||
|
import at module-load time but that the test env might not have.
|
||||||
|
|
||||||
|
This is purely so the validator can inspect the action signatures —
|
||||||
|
no real network calls happen in these tests.
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
import types
|
||||||
|
|
||||||
|
class _AnyAttrModule(types.ModuleType):
|
||||||
|
"""Module stub that lazily creates dummy classes for any attribute,
|
||||||
|
so type annotations like `aiohttp.ClientSession` resolve."""
|
||||||
|
|
||||||
|
def __getattr__(self, name): # noqa: D401
|
||||||
|
return type(name, (), {})
|
||||||
|
|
||||||
|
for name in ("aiohttp",):
|
||||||
|
if name not in sys.modules:
|
||||||
|
sys.modules[name] = _AnyAttrModule(name)
|
||||||
|
|
||||||
|
|
||||||
|
def _instantiateMethod(methodCls):
|
||||||
|
"""Try to instantiate a Method with a stub services object.
|
||||||
|
|
||||||
|
Some Methods do extra work in __init__ (e.g. helper imports). We
|
||||||
|
accept failures and return None; missing Methods are skipped.
|
||||||
|
"""
|
||||||
|
_ensureOptionalDeps()
|
||||||
|
try:
|
||||||
|
return methodCls(_StubServices())
|
||||||
|
except Exception as exc: # pragma: no cover - environment dependent
|
||||||
|
pytest.skip(f"could not instantiate {methodCls.__name__}: {exc}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("modulePath,className", [
|
||||||
|
("modules.workflows.methods.methodTrustee.methodTrustee", "MethodTrustee"),
|
||||||
|
("modules.workflows.methods.methodRedmine.methodRedmine", "MethodRedmine"),
|
||||||
|
("modules.workflows.methods.methodSharepoint.methodSharepoint", "MethodSharepoint"),
|
||||||
|
("modules.workflows.methods.methodOutlook.methodOutlook", "MethodOutlook"),
|
||||||
|
("modules.workflows.methods.methodAi.methodAi", "MethodAi"),
|
||||||
|
("modules.workflows.methods.methodClickup.methodClickup", "MethodClickup"),
|
||||||
|
("modules.workflows.methods.methodFile.methodFile", "MethodFile"),
|
||||||
|
("modules.workflows.methods.methodContext.methodContext", "MethodContext"),
|
||||||
|
("modules.workflows.methods.methodJira.methodJira", "MethodJira"),
|
||||||
|
("modules.workflows.methods.methodChatbot.methodChatbot", "MethodChatbot"),
|
||||||
|
])
|
||||||
|
def test_methodSignaturesAreHealthy(modulePath, className):
|
||||||
|
"""Each shipping Method's _actions must validate against the catalog."""
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
try:
|
||||||
|
module = importlib.import_module(modulePath)
|
||||||
|
except ImportError as exc:
|
||||||
|
pytest.skip(f"module not importable: {exc}")
|
||||||
|
return
|
||||||
|
|
||||||
|
cls = getattr(module, className, None)
|
||||||
|
if cls is None:
|
||||||
|
pytest.skip(f"{className} not found in {modulePath}")
|
||||||
|
return
|
||||||
|
|
||||||
|
instance = _instantiateMethod(cls)
|
||||||
|
if instance is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
errors = _validateMethods([instance])
|
||||||
|
assert errors == [], _formatValidationReport(errors)
|
||||||
188
tests/unit/nodeDefinitions/test_trustee_schema_compliance.py
Normal file
188
tests/unit/nodeDefinitions/test_trustee_schema_compliance.py
Normal file
|
|
@ -0,0 +1,188 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""Trustee node schema-compliance under the Pick-not-Push typed port system.
|
||||||
|
|
||||||
|
Verifies that:
|
||||||
|
- All three trustee actions (extractFromFiles, processDocuments,
|
||||||
|
syncToAccounting) declare ``ActionResult`` as output, matching what the
|
||||||
|
Python implementations actually return at runtime
|
||||||
|
(``ActionResult.isSuccess(documents=[...])``).
|
||||||
|
- processDocuments / syncToAccounting accept ``ActionResult`` (the producer
|
||||||
|
schema) plus ``DocumentList`` and ``Transit`` for back-compat.
|
||||||
|
- The ``documentList`` parameter is required, typed ``List[ActionDocument]``
|
||||||
|
(the concrete shape consumed by ``_resolveDocumentList``) and rendered via
|
||||||
|
the dataRef picker so the user can bind it to ``upstream → documents``.
|
||||||
|
- The end-to-end Trustee pipeline graph (extract -> process -> sync) passes
|
||||||
|
hard port-compat validation (validateGraph).
|
||||||
|
- actionNodeExecutor produces canonical ``documents`` field — no legacy
|
||||||
|
``documentList`` alias — so that DataRef path=['documents'] is the single
|
||||||
|
source of truth.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
|
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
|
||||||
|
from modules.workflows.automation2.executors import actionNodeExecutor as _actionExec
|
||||||
|
from modules.workflows.automation2.graphUtils import validateGraph
|
||||||
|
|
||||||
|
|
||||||
|
def _node(nodeId: str) -> dict:
|
||||||
|
return next(n for n in STATIC_NODE_TYPES if n["id"] == nodeId)
|
||||||
|
|
||||||
|
|
||||||
|
def test_extractFromFiles_outputs_ActionResult():
|
||||||
|
"""Runtime returns ActionResult.isSuccess(documents=[...]) — see
|
||||||
|
actions/extractFromFiles.py. The adapter must declare the same."""
|
||||||
|
n = _node("trustee.extractFromFiles")
|
||||||
|
assert n["outputPorts"][0]["schema"] == "ActionResult"
|
||||||
|
|
||||||
|
|
||||||
|
def test_processDocuments_outputs_ActionResult():
|
||||||
|
n = _node("trustee.processDocuments")
|
||||||
|
assert n["outputPorts"][0]["schema"] == "ActionResult"
|
||||||
|
|
||||||
|
|
||||||
|
def test_syncToAccounting_outputs_ActionResult():
|
||||||
|
n = _node("trustee.syncToAccounting")
|
||||||
|
assert n["outputPorts"][0]["schema"] == "ActionResult"
|
||||||
|
|
||||||
|
|
||||||
|
def test_processDocuments_accepts_ActionResult_and_legacy():
|
||||||
|
"""processDocuments must accept ActionResult (the new producer schema for
|
||||||
|
extractFromFiles) plus DocumentList / Transit for back-compat."""
|
||||||
|
n = _node("trustee.processDocuments")
|
||||||
|
accepts = n["inputPorts"][0]["accepts"]
|
||||||
|
assert "ActionResult" in accepts
|
||||||
|
assert "DocumentList" in accepts
|
||||||
|
assert "Transit" in accepts
|
||||||
|
assert "UdmDocument" not in accepts, (
|
||||||
|
"UdmDocument was dropped from accepts during the Pick-not-Push schema cleanup."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_syncToAccounting_accepts_ActionResult_and_legacy():
|
||||||
|
n = _node("trustee.syncToAccounting")
|
||||||
|
accepts = n["inputPorts"][0]["accepts"]
|
||||||
|
assert "ActionResult" in accepts
|
||||||
|
assert "DocumentList" in accepts
|
||||||
|
assert "Transit" in accepts
|
||||||
|
|
||||||
|
|
||||||
|
def test_processDocuments_documentList_param_typed_required_dataRef():
|
||||||
|
"""documentList is a Pick-not-Push DataRef parameter — must be visible
|
||||||
|
and typed exactly like the producer field (``ActionResult.documents`` is
|
||||||
|
``List[ActionDocument]``) so DataPicker's strict-filter accepts it.
|
||||||
|
"""
|
||||||
|
params = {p["name"]: p for p in _node("trustee.processDocuments")["parameters"]}
|
||||||
|
p = params["documentList"]
|
||||||
|
assert p["type"] == "List[ActionDocument]", (
|
||||||
|
"documentList must declare the concrete producer type so the DataPicker "
|
||||||
|
"strict-filter resolves upstream ActionResult.documents as compatible."
|
||||||
|
)
|
||||||
|
assert p["required"] is True
|
||||||
|
assert p["frontendType"] == "dataRef", (
|
||||||
|
"documentList must use the dataRef renderer so the binding is visible"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_syncToAccounting_documentList_param_typed_required_dataRef():
|
||||||
|
params = {p["name"]: p for p in _node("trustee.syncToAccounting")["parameters"]}
|
||||||
|
p = params["documentList"]
|
||||||
|
assert p["type"] == "List[ActionDocument]", (
|
||||||
|
"documentList must declare the concrete producer type so the DataPicker "
|
||||||
|
"strict-filter resolves upstream ActionResult.documents as compatible."
|
||||||
|
)
|
||||||
|
assert p["required"] is True
|
||||||
|
assert p["frontendType"] == "dataRef", (
|
||||||
|
"documentList must use the dataRef renderer so the binding is visible"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_trustee_pipeline_graph_passes_hard_port_validation():
|
||||||
|
"""End-to-end pipeline: trigger.manual -> extract -> process -> sync.
|
||||||
|
|
||||||
|
Mirrors what frontend_nyla/.../trusteePipelineGraph.ts builds for
|
||||||
|
_buildScanUploadGraph. Port-compat must hold without warnings.
|
||||||
|
"""
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{"id": "trigger-manual", "type": "trigger.manual", "parameters": {}},
|
||||||
|
{
|
||||||
|
"id": "extract",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {
|
||||||
|
"fileIds": ["f1"],
|
||||||
|
"featureInstanceId": "inst-1",
|
||||||
|
"prompt": "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "process",
|
||||||
|
"type": "trustee.processDocuments",
|
||||||
|
"parameters": {
|
||||||
|
"documentList": {"type": "ref", "nodeId": "extract", "path": ["documents"]},
|
||||||
|
"featureInstanceId": "inst-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "sync",
|
||||||
|
"type": "trustee.syncToAccounting",
|
||||||
|
"parameters": {
|
||||||
|
"documentList": {"type": "ref", "nodeId": "process", "path": ["documents"]},
|
||||||
|
"featureInstanceId": "inst-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"connections": [
|
||||||
|
{"source": "trigger-manual", "sourceOutput": 0, "target": "extract", "targetInput": 0},
|
||||||
|
{"source": "extract", "sourceOutput": 0, "target": "process", "targetInput": 0},
|
||||||
|
{"source": "process", "sourceOutput": 0, "target": "sync", "targetInput": 0},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
nodeTypeIds = {n["id"] for n in STATIC_NODE_TYPES}
|
||||||
|
errors = validateGraph(graph, nodeTypeIds)
|
||||||
|
portMismatches = [e for e in errors if "Port mismatch" in e]
|
||||||
|
assert not portMismatches, f"Trustee pipeline must be port-compatible: {portMismatches}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_catalog_ActionResult_exposes_documents_field():
|
||||||
|
"""Without ``documents`` on the ActionResult schema the DataPicker cannot
|
||||||
|
surface the canonical list-of-documents path that every downstream node
|
||||||
|
(processDocuments, syncToAccounting, AI consumers, ...) needs to bind to.
|
||||||
|
"""
|
||||||
|
schema = PORT_TYPE_CATALOG.get("ActionResult")
|
||||||
|
assert schema is not None
|
||||||
|
fieldNames = {f.name for f in schema.fields}
|
||||||
|
assert "documents" in fieldNames, (
|
||||||
|
"ActionResult.documents must be in PORT_TYPE_CATALOG so the frontend "
|
||||||
|
"DataPicker can offer it as a bindable path."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_catalog_ActionDocument_is_registered():
|
||||||
|
"""ActionResult.documents is List[ActionDocument]; the inner schema must
|
||||||
|
be registered so the picker can drill down to ``documents → * → documentName``.
|
||||||
|
"""
|
||||||
|
schema = PORT_TYPE_CATALOG.get("ActionDocument")
|
||||||
|
assert schema is not None
|
||||||
|
fieldNames = {f.name for f in schema.fields}
|
||||||
|
assert {"documentName", "documentData", "mimeType"}.issubset(fieldNames), (
|
||||||
|
"ActionDocument schema must mirror datamodelChat.ActionDocument."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_actionNodeExecutor_does_not_emit_legacy_documentList_alias():
|
||||||
|
"""Source-code assertion: out dict in execute() must not write documentList alias.
|
||||||
|
|
||||||
|
Pick-not-Push canonicalises on ``documents``. Removing the alias prevents
|
||||||
|
DataRefs from drifting back to the legacy field name.
|
||||||
|
"""
|
||||||
|
src = inspect.getsource(_actionExec)
|
||||||
|
assert '"documentList": docsList' not in src, (
|
||||||
|
"Legacy alias ``documentList`` must be removed from actionNodeExecutor "
|
||||||
|
"out-dict (use canonical ``documents`` only — see issues.md "
|
||||||
|
"'Trustee Schema-Compliance')."
|
||||||
|
)
|
||||||
|
assert '"documents": docsList' in src, (
|
||||||
|
"Canonical ``documents`` field missing from actionNodeExecutor out-dict."
|
||||||
|
)
|
||||||
2
tests/unit/scripts/__init__.py
Normal file
2
tests/unit/scripts/__init__.py
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
289
tests/unit/scripts/test_migrate_feature_instance_refs.py
Normal file
289
tests/unit/scripts/test_migrate_feature_instance_refs.py
Normal file
|
|
@ -0,0 +1,289 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Tests for ``scripts/script_migrate_feature_instance_refs.py``.
|
||||||
|
|
||||||
|
The script touches the live ``poweron_graphicaleditor`` DB. Tests run against
|
||||||
|
an in-memory fake psycopg2 connection so we exercise the full code path
|
||||||
|
(SELECT -> migrate -> UPDATE) without requiring a real Postgres server.
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
_gatewayPath = Path(__file__).resolve().parents[3]
|
||||||
|
_scriptsPath = _gatewayPath / "scripts"
|
||||||
|
if str(_scriptsPath) not in sys.path:
|
||||||
|
sys.path.insert(0, str(_scriptsPath))
|
||||||
|
|
||||||
|
migrationModule = importlib.import_module("script_migrate_feature_instance_refs")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Fake psycopg2 connection / cursor
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class _FakeCursor:
|
||||||
|
"""Mimics enough of psycopg2's RealDictCursor + plain cursor for the script."""
|
||||||
|
|
||||||
|
def __init__(self, rowsByTable: Dict[str, List[Dict[str, Any]]], updates: List[Tuple[str, str, Any]]):
|
||||||
|
self._rowsByTable = rowsByTable
|
||||||
|
self._updates = updates
|
||||||
|
self._lastFetch: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc, tb):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def execute(self, query: str, params: Any = None):
|
||||||
|
if query.strip().upper().startswith("SELECT"):
|
||||||
|
for table, rows in self._rowsByTable.items():
|
||||||
|
if table in query:
|
||||||
|
self._lastFetch = list(rows)
|
||||||
|
return
|
||||||
|
self._lastFetch = []
|
||||||
|
return
|
||||||
|
if query.strip().upper().startswith("UPDATE"):
|
||||||
|
for table in self._rowsByTable:
|
||||||
|
if table in query:
|
||||||
|
graphValue, pk = params
|
||||||
|
if hasattr(graphValue, "adapted"):
|
||||||
|
graphValue = graphValue.adapted
|
||||||
|
self._updates.append((table, pk, graphValue))
|
||||||
|
return
|
||||||
|
return
|
||||||
|
|
||||||
|
def fetchall(self):
|
||||||
|
return self._lastFetch
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeConn:
|
||||||
|
def __init__(self, rowsByTable: Dict[str, List[Dict[str, Any]]]):
|
||||||
|
self._rowsByTable = rowsByTable
|
||||||
|
self.updates: List[Tuple[str, str, Any]] = []
|
||||||
|
self.committed = False
|
||||||
|
self.closed = False
|
||||||
|
|
||||||
|
def cursor(self, cursor_factory: Any = None):
|
||||||
|
return _FakeCursor(self._rowsByTable, self.updates)
|
||||||
|
|
||||||
|
def commit(self):
|
||||||
|
self.committed = True
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.closed = True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def graphsByTable() -> Dict[str, List[Dict[str, Any]]]:
|
||||||
|
return {
|
||||||
|
'"Automation2Workflow"': [
|
||||||
|
{
|
||||||
|
"pk": "wf-legacy",
|
||||||
|
"graph": {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n1",
|
||||||
|
"type": "trustee.processDocuments",
|
||||||
|
"parameters": {"featureInstanceId": "11111111-1111-1111-1111-111111111111"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n2",
|
||||||
|
"type": "redmine.createIssue",
|
||||||
|
"parameters": {"featureInstanceId": "22222222-2222-2222-2222-222222222222"},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"connections": [],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pk": "wf-already-typed",
|
||||||
|
"graph": {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n1",
|
||||||
|
"type": "trustee.processDocuments",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": "33333333-3333-3333-3333-333333333333",
|
||||||
|
"featureCode": "trustee",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"connections": [],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pk": "wf-empty-graph",
|
||||||
|
"graph": {},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pk": "wf-graph-as-string",
|
||||||
|
"graph": json.dumps({
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n1",
|
||||||
|
"type": "outlook.sendMail",
|
||||||
|
"parameters": {"featureInstanceId": "44444444-4444-4444-4444-444444444444"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"connections": [],
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'"AutoVersion"': [
|
||||||
|
{
|
||||||
|
"pk": "ver-legacy",
|
||||||
|
"graph": {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n1",
|
||||||
|
"type": "ai.runPrompt",
|
||||||
|
"parameters": {"featureInstanceId": "55555555-5555-5555-5555-555555555555"},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"connections": [],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helper-level tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestLoadGraph:
|
||||||
|
def test_dictPassesThrough(self):
|
||||||
|
assert migrationModule._loadGraph({"a": 1}) == {"a": 1}
|
||||||
|
|
||||||
|
def test_jsonStringIsParsed(self):
|
||||||
|
assert migrationModule._loadGraph('{"a": 2}') == {"a": 2}
|
||||||
|
|
||||||
|
def test_emptyOrInvalidYieldsEmptyDict(self):
|
||||||
|
assert migrationModule._loadGraph(None) == {}
|
||||||
|
assert migrationModule._loadGraph("") == {}
|
||||||
|
assert migrationModule._loadGraph("not json") == {}
|
||||||
|
|
||||||
|
def test_bytesStringIsParsed(self):
|
||||||
|
assert migrationModule._loadGraph(b'{"a": 3}') == {"a": 3}
|
||||||
|
|
||||||
|
|
||||||
|
class TestCountMigrations:
|
||||||
|
def test_zeroWhenIdentical(self):
|
||||||
|
g = {"nodes": [{"id": "n", "parameters": {"featureInstanceId": "uuid"}}]}
|
||||||
|
assert migrationModule._countMigrations(g, g) == 0
|
||||||
|
|
||||||
|
def test_countsMigratedFields(self):
|
||||||
|
before = {
|
||||||
|
"nodes": [
|
||||||
|
{"id": "n1", "parameters": {"featureInstanceId": "u1"}},
|
||||||
|
{"id": "n2", "parameters": {"featureInstanceId": "u2"}},
|
||||||
|
{"id": "n3", "parameters": {"featureInstanceId": "u3"}},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
after = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n1",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": {"$type": "FeatureInstanceRef", "id": "u1"}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{"id": "n2", "parameters": {"featureInstanceId": "u2"}},
|
||||||
|
{
|
||||||
|
"id": "n3",
|
||||||
|
"parameters": {
|
||||||
|
"featureInstanceId": {"$type": "FeatureInstanceRef", "id": "u3"}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
assert migrationModule._countMigrations(before, after) == 2
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# End-to-end migrate() tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestMigrate:
|
||||||
|
def test_dryRunDoesNotWriteOrCommit(self, monkeypatch, graphsByTable):
|
||||||
|
conn = _FakeConn(graphsByTable)
|
||||||
|
monkeypatch.setattr(migrationModule, "_connect", lambda: conn)
|
||||||
|
|
||||||
|
summary = migrationModule.migrate(dryRun=True)
|
||||||
|
|
||||||
|
assert conn.updates == []
|
||||||
|
assert conn.committed is False
|
||||||
|
assert conn.closed is True
|
||||||
|
assert summary['"Automation2Workflow"']["scanned"] == 4
|
||||||
|
assert summary['"Automation2Workflow"']["rowsChanged"] == 2
|
||||||
|
assert summary['"Automation2Workflow"']["fieldsRewritten"] == 3
|
||||||
|
assert summary['"AutoVersion"']["rowsChanged"] == 1
|
||||||
|
assert summary['"AutoVersion"']["fieldsRewritten"] == 1
|
||||||
|
|
||||||
|
def test_liveRunWritesAndCommits(self, monkeypatch, graphsByTable):
|
||||||
|
conn = _FakeConn(graphsByTable)
|
||||||
|
monkeypatch.setattr(migrationModule, "_connect", lambda: conn)
|
||||||
|
|
||||||
|
summary = migrationModule.migrate(dryRun=False)
|
||||||
|
|
||||||
|
assert conn.committed is True
|
||||||
|
assert conn.closed is True
|
||||||
|
|
||||||
|
updatesByPk = {pk: graph for (_table, pk, graph) in conn.updates}
|
||||||
|
assert set(updatesByPk.keys()) == {"wf-legacy", "wf-graph-as-string", "ver-legacy"}
|
||||||
|
|
||||||
|
legacyGraph = updatesByPk["wf-legacy"]
|
||||||
|
n1Param = legacyGraph["nodes"][0]["parameters"]["featureInstanceId"]
|
||||||
|
n2Param = legacyGraph["nodes"][1]["parameters"]["featureInstanceId"]
|
||||||
|
assert n1Param["$type"] == "FeatureInstanceRef"
|
||||||
|
assert n1Param["featureCode"] == "trustee"
|
||||||
|
assert n1Param["id"] == "11111111-1111-1111-1111-111111111111"
|
||||||
|
assert n2Param["featureCode"] == "redmine"
|
||||||
|
|
||||||
|
verParam = updatesByPk["ver-legacy"]["nodes"][0]["parameters"]["featureInstanceId"]
|
||||||
|
assert verParam["featureCode"] == "ai"
|
||||||
|
|
||||||
|
stringSourcedGraph = updatesByPk["wf-graph-as-string"]
|
||||||
|
outlookParam = stringSourcedGraph["nodes"][0]["parameters"]["featureInstanceId"]
|
||||||
|
assert outlookParam["featureCode"] == "outlook"
|
||||||
|
|
||||||
|
assert summary['"Automation2Workflow"']["fieldsRewritten"] == 3
|
||||||
|
assert summary['"AutoVersion"']["fieldsRewritten"] == 1
|
||||||
|
|
||||||
|
def test_idempotency(self, monkeypatch, graphsByTable):
|
||||||
|
conn1 = _FakeConn(graphsByTable)
|
||||||
|
monkeypatch.setattr(migrationModule, "_connect", lambda: conn1)
|
||||||
|
migrationModule.migrate(dryRun=False)
|
||||||
|
|
||||||
|
firstUpdates = {pk: graph for (_t, pk, graph) in conn1.updates}
|
||||||
|
nextRows = {
|
||||||
|
'"Automation2Workflow"': [
|
||||||
|
{"pk": pk, "graph": graph}
|
||||||
|
for pk, graph in firstUpdates.items()
|
||||||
|
if pk.startswith("wf")
|
||||||
|
],
|
||||||
|
'"AutoVersion"': [
|
||||||
|
{"pk": pk, "graph": graph}
|
||||||
|
for pk, graph in firstUpdates.items()
|
||||||
|
if pk.startswith("ver")
|
||||||
|
],
|
||||||
|
}
|
||||||
|
conn2 = _FakeConn(nextRows)
|
||||||
|
monkeypatch.setattr(migrationModule, "_connect", lambda: conn2)
|
||||||
|
summary2 = migrationModule.migrate(dryRun=False)
|
||||||
|
|
||||||
|
assert conn2.updates == []
|
||||||
|
for table, counts in summary2.items():
|
||||||
|
assert counts["rowsChanged"] == 0, f"{table} not idempotent"
|
||||||
|
assert counts["fieldsRewritten"] == 0, f"{table} not idempotent"
|
||||||
127
tests/unit/serviceAgent/test_action_tool_adapter_typed.py
Normal file
127
tests/unit/serviceAgent/test_action_tool_adapter_typed.py
Normal file
|
|
@ -0,0 +1,127 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
Tests for the catalog-driven JSON-Schema generator in actionToolAdapter
|
||||||
|
(Phase 3 of the Typed Action Architecture).
|
||||||
|
|
||||||
|
Covers `_catalogTypeToJsonSchema` and `_convertParameterSchema` with:
|
||||||
|
- Primitives (str/int/bool/float/Any)
|
||||||
|
- Catalog object schemas (recursive expansion, required fields, enums)
|
||||||
|
- List[X] (array with typed items)
|
||||||
|
- Dict[K,V] (object with typed additionalProperties)
|
||||||
|
- Unknown type → safe fallback (string)
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from modules.serviceCenter.services.serviceAgent.actionToolAdapter import (
|
||||||
|
_catalogTypeToJsonSchema,
|
||||||
|
_convertParameterSchema,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestPrimitives:
|
||||||
|
def test_str(self):
|
||||||
|
assert _catalogTypeToJsonSchema("str") == {"type": "string"}
|
||||||
|
|
||||||
|
def test_int(self):
|
||||||
|
assert _catalogTypeToJsonSchema("int") == {"type": "integer"}
|
||||||
|
|
||||||
|
def test_bool(self):
|
||||||
|
assert _catalogTypeToJsonSchema("bool") == {"type": "boolean"}
|
||||||
|
|
||||||
|
def test_float(self):
|
||||||
|
assert _catalogTypeToJsonSchema("float") == {"type": "number"}
|
||||||
|
|
||||||
|
def test_anyHasNoTypeField(self):
|
||||||
|
# JSON Schema "any" is best expressed as an empty schema.
|
||||||
|
assert _catalogTypeToJsonSchema("Any") == {}
|
||||||
|
|
||||||
|
|
||||||
|
class TestContainers:
|
||||||
|
def test_listOfPrimitive(self):
|
||||||
|
assert _catalogTypeToJsonSchema("List[str]") == {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_listOfCatalogSchema(self):
|
||||||
|
out = _catalogTypeToJsonSchema("List[Document]")
|
||||||
|
assert out["type"] == "array"
|
||||||
|
assert out["items"]["type"] == "object"
|
||||||
|
# Recursive expansion delivered Document fields:
|
||||||
|
propsName = out["items"]["properties"].get("name", {})
|
||||||
|
assert propsName.get("type") == "string"
|
||||||
|
|
||||||
|
def test_dictWithPrimitiveValue(self):
|
||||||
|
assert _catalogTypeToJsonSchema("Dict[str,Any]") == {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_dictWithCatalogValue(self):
|
||||||
|
out = _catalogTypeToJsonSchema("Dict[str,Document]")
|
||||||
|
assert out["type"] == "object"
|
||||||
|
assert out["additionalProperties"]["type"] == "object"
|
||||||
|
assert "properties" in out["additionalProperties"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestCatalogObjects:
|
||||||
|
def test_connectionRefExpands(self):
|
||||||
|
out = _catalogTypeToJsonSchema("ConnectionRef")
|
||||||
|
assert out["type"] == "object"
|
||||||
|
assert "properties" in out
|
||||||
|
# ConnectionRef has 'id' (required) and 'authority' (required, discriminator)
|
||||||
|
assert "id" in out["properties"]
|
||||||
|
assert "authority" in out["properties"]
|
||||||
|
assert "id" in out["required"]
|
||||||
|
assert "authority" in out["required"]
|
||||||
|
|
||||||
|
def test_featureInstanceRefExpands(self):
|
||||||
|
out = _catalogTypeToJsonSchema("FeatureInstanceRef")
|
||||||
|
assert out["type"] == "object"
|
||||||
|
# mandateId is optional → must NOT be in required
|
||||||
|
assert "mandateId" not in out.get("required", [])
|
||||||
|
assert "id" in out["required"]
|
||||||
|
|
||||||
|
def test_descriptionPreserved(self):
|
||||||
|
out = _catalogTypeToJsonSchema("ConnectionRef")
|
||||||
|
assert "description" in out
|
||||||
|
assert "ConnectionRef" in out["description"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestUnknownFallback:
|
||||||
|
def test_unknownDefaultsToString(self):
|
||||||
|
out = _catalogTypeToJsonSchema("CompletelyUnknownType")
|
||||||
|
assert out["type"] == "string"
|
||||||
|
assert "unknown" in out.get("description", "").lower()
|
||||||
|
|
||||||
|
def test_emptyStringDefaultsToString(self):
|
||||||
|
assert _catalogTypeToJsonSchema("") == {"type": "string"}
|
||||||
|
|
||||||
|
|
||||||
|
class TestConvertParameterSchema:
|
||||||
|
def test_buildsObjectSchemaWithRequiredList(self):
|
||||||
|
actionParams = {
|
||||||
|
"documentList": {"type": "DocumentList", "required": True, "description": "Eingabe"},
|
||||||
|
"prompt": {"type": "str", "required": False, "description": "Prompt-Text"},
|
||||||
|
}
|
||||||
|
schema = _convertParameterSchema(actionParams)
|
||||||
|
assert schema["type"] == "object"
|
||||||
|
assert "documentList" in schema["properties"]
|
||||||
|
assert "prompt" in schema["properties"]
|
||||||
|
assert schema["required"] == ["documentList"]
|
||||||
|
assert schema["properties"]["documentList"]["description"] == "Eingabe"
|
||||||
|
# documentList expands to an object schema (DocumentList is a catalog object)
|
||||||
|
assert schema["properties"]["documentList"]["type"] == "object"
|
||||||
|
|
||||||
|
def test_handlesMalformedParamsGracefully(self):
|
||||||
|
actionParams = {"weird": "not-a-dict"}
|
||||||
|
schema = _convertParameterSchema(actionParams)
|
||||||
|
assert schema["properties"]["weird"]["type"] == "string"
|
||||||
|
|
||||||
|
def test_typedRefProducesObjectNotString(self):
|
||||||
|
"""Regression: pre-Phase-3 behaviour collapsed catalog refs to 'string'."""
|
||||||
|
actionParams = {"connection": {"type": "ConnectionRef", "required": True}}
|
||||||
|
schema = _convertParameterSchema(actionParams)
|
||||||
|
assert schema["properties"]["connection"]["type"] == "object"
|
||||||
|
assert "id" in schema["properties"]["connection"]["properties"]
|
||||||
0
tests/unit/teamsbot/__init__.py
Normal file
0
tests/unit/teamsbot/__init__.py
Normal file
604
tests/unit/teamsbot/test_directorPrompts.py
Normal file
604
tests/unit/teamsbot/test_directorPrompts.py
Normal file
|
|
@ -0,0 +1,604 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Unit tests for Teamsbot Director Prompts (Plan #5).
|
||||||
|
|
||||||
|
Covers:
|
||||||
|
* Datamodel limits, defaults, enum-validation
|
||||||
|
* SpeechTeamsResponse needsAgent / agentReason fields
|
||||||
|
* TeamsbotService._buildPersistentDirectorContext rendering
|
||||||
|
* TeamsbotService.submitDirectorPrompt: queues, emits SSE event, returns created
|
||||||
|
* TeamsbotService._processDirectorPrompt lifecycle:
|
||||||
|
queued -> running -> succeeded/consumed (one-shot vs persistent)
|
||||||
|
* TeamsbotService._processDirectorPrompt failure path drops persistent prompt
|
||||||
|
* TeamsbotService.removePersistentPrompt
|
||||||
|
* getActiveService / _activeServices registry
|
||||||
|
* TeamsbotObjects.getActivePersistentPrompts filtering
|
||||||
|
|
||||||
|
The TeamsbotService constructor instantiates BrowserBotConnector, which is
|
||||||
|
harmless (no network until joinMeeting). All DB / agent / SSE side-effects
|
||||||
|
are stubbed via monkeypatch.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from pydantic import ValidationError
|
||||||
|
|
||||||
|
from modules.features.teamsbot import service as serviceModule
|
||||||
|
from modules.features.teamsbot.datamodelTeamsbot import (
|
||||||
|
DIRECTOR_PROMPT_FILE_LIMIT,
|
||||||
|
DIRECTOR_PROMPT_TEXT_LIMIT,
|
||||||
|
SpeechTeamsResponse,
|
||||||
|
TeamsbotConfig,
|
||||||
|
TeamsbotDirectorPrompt,
|
||||||
|
TeamsbotDirectorPromptCreateRequest,
|
||||||
|
TeamsbotDirectorPromptMode,
|
||||||
|
TeamsbotDirectorPromptStatus,
|
||||||
|
)
|
||||||
|
from modules.features.teamsbot.service import (
|
||||||
|
TeamsbotService,
|
||||||
|
_activeServices,
|
||||||
|
_sessionEvents,
|
||||||
|
getActiveService,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Helpers
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class _FakeUser:
|
||||||
|
"""Minimal stand-in for modules.datamodels.datamodelUam.User used by the
|
||||||
|
service layer. TeamsbotService only needs ``id`` for logging / interface
|
||||||
|
keying."""
|
||||||
|
|
||||||
|
def __init__(self, userId: str = "user-op-1") -> None:
|
||||||
|
self.id = userId
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeInterface:
|
||||||
|
"""In-memory stand-in for TeamsbotObjects (only the director-prompt API).
|
||||||
|
|
||||||
|
Behaves like the real DB interface for the calls used by the service:
|
||||||
|
``createDirectorPrompt``, ``updateDirectorPrompt``, ``getDirectorPrompt``,
|
||||||
|
``getActivePersistentPrompts``, ``getActiveSystemBot``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.prompts: Dict[str, Dict[str, Any]] = {}
|
||||||
|
self.created: List[Dict[str, Any]] = []
|
||||||
|
self.updates: List[Dict[str, Any]] = []
|
||||||
|
self.deleted: List[str] = []
|
||||||
|
|
||||||
|
def createDirectorPrompt(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
record = dict(data)
|
||||||
|
if "id" not in record:
|
||||||
|
record["id"] = f"prompt-{len(self.prompts)+1}"
|
||||||
|
self.prompts[record["id"]] = record
|
||||||
|
self.created.append(record)
|
||||||
|
return record
|
||||||
|
|
||||||
|
def updateDirectorPrompt(self, promptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||||
|
if promptId not in self.prompts:
|
||||||
|
return None
|
||||||
|
self.prompts[promptId].update(updates)
|
||||||
|
self.updates.append({"id": promptId, **updates})
|
||||||
|
return self.prompts[promptId]
|
||||||
|
|
||||||
|
def getDirectorPrompt(self, promptId: str) -> Optional[Dict[str, Any]]:
|
||||||
|
return self.prompts.get(promptId)
|
||||||
|
|
||||||
|
def getActivePersistentPrompts(self, sessionId: str) -> List[Dict[str, Any]]:
|
||||||
|
terminal = {
|
||||||
|
TeamsbotDirectorPromptStatus.CONSUMED.value,
|
||||||
|
TeamsbotDirectorPromptStatus.FAILED.value,
|
||||||
|
}
|
||||||
|
return [
|
||||||
|
p
|
||||||
|
for p in self.prompts.values()
|
||||||
|
if p.get("sessionId") == sessionId
|
||||||
|
and p.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value
|
||||||
|
and p.get("status") not in terminal
|
||||||
|
]
|
||||||
|
|
||||||
|
def getActiveSystemBot(self, mandateId: str) -> Optional[Dict[str, Any]]:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class _CapturedEvents(list):
|
||||||
|
"""Helper to collect SSE events emitted by ``_emitSessionEvent``."""
|
||||||
|
|
||||||
|
async def append_event(self, sessionId: str, eventType: str, data: Any) -> None:
|
||||||
|
self.append({"sessionId": sessionId, "type": eventType, "data": data})
|
||||||
|
|
||||||
|
|
||||||
|
def _patchInterface(monkeypatch: pytest.MonkeyPatch, fakeInterface: _FakeInterface) -> None:
|
||||||
|
"""Replace ``getInterface`` in the teamsbot service module so the service
|
||||||
|
talks to our in-memory fake instead of PostgreSQL."""
|
||||||
|
from modules.features.teamsbot import interfaceFeatureTeamsbot as interfaceDb
|
||||||
|
|
||||||
|
monkeypatch.setattr(interfaceDb, "getInterface", lambda *args, **kwargs: fakeInterface)
|
||||||
|
|
||||||
|
|
||||||
|
def _patchEmit(monkeypatch: pytest.MonkeyPatch) -> _CapturedEvents:
|
||||||
|
captured = _CapturedEvents()
|
||||||
|
|
||||||
|
async def _stubEmit(sessionId: str, eventType: str, data: Any) -> None:
|
||||||
|
await captured.append_event(sessionId, eventType, data)
|
||||||
|
|
||||||
|
monkeypatch.setattr(serviceModule, "_emitSessionEvent", _stubEmit)
|
||||||
|
return captured
|
||||||
|
|
||||||
|
|
||||||
|
def _buildService() -> TeamsbotService:
|
||||||
|
"""Build a TeamsbotService with a minimal config. BrowserBotConnector is
|
||||||
|
instantiated but never reached in these tests."""
|
||||||
|
config = TeamsbotConfig(botName="UnitTest Bot")
|
||||||
|
svc = TeamsbotService(
|
||||||
|
currentUser=_FakeUser(),
|
||||||
|
mandateId="mandate-x",
|
||||||
|
instanceId="instance-y",
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
svc._activeSessionId = "session-1"
|
||||||
|
return svc
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def _resetGlobals():
|
||||||
|
"""Avoid cross-test bleed in module-level globals."""
|
||||||
|
_activeServices.clear()
|
||||||
|
_sessionEvents.clear()
|
||||||
|
yield
|
||||||
|
_activeServices.clear()
|
||||||
|
_sessionEvents.clear()
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# 1) Datamodel
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TestDirectorPromptDatamodel:
|
||||||
|
def test_directorPromptDefaults(self):
|
||||||
|
prompt = TeamsbotDirectorPrompt(
|
||||||
|
sessionId="s1",
|
||||||
|
instanceId="i1",
|
||||||
|
operatorUserId="u1",
|
||||||
|
text="Hello world",
|
||||||
|
)
|
||||||
|
assert prompt.mode == TeamsbotDirectorPromptMode.ONE_SHOT
|
||||||
|
assert prompt.status == TeamsbotDirectorPromptStatus.QUEUED
|
||||||
|
assert prompt.fileIds == []
|
||||||
|
assert prompt.consumedAt is None
|
||||||
|
assert prompt.responseText is None
|
||||||
|
assert prompt.id # uuid auto-filled
|
||||||
|
assert prompt.createdAt # iso timestamp auto-filled
|
||||||
|
|
||||||
|
def test_directorPromptTextLimitEnforced(self):
|
||||||
|
with pytest.raises(ValidationError):
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
sessionId="s1",
|
||||||
|
instanceId="i1",
|
||||||
|
operatorUserId="u1",
|
||||||
|
text="x" * (DIRECTOR_PROMPT_TEXT_LIMIT + 1),
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_directorPromptCreateRequestDefaults(self):
|
||||||
|
body = TeamsbotDirectorPromptCreateRequest(text="quick prompt")
|
||||||
|
assert body.mode == TeamsbotDirectorPromptMode.ONE_SHOT
|
||||||
|
assert body.fileIds == []
|
||||||
|
|
||||||
|
def test_directorPromptCreateRequestEmptyTextRejected(self):
|
||||||
|
with pytest.raises(ValidationError):
|
||||||
|
TeamsbotDirectorPromptCreateRequest(text="")
|
||||||
|
|
||||||
|
def test_directorPromptCreateRequestTooLongRejected(self):
|
||||||
|
with pytest.raises(ValidationError):
|
||||||
|
TeamsbotDirectorPromptCreateRequest(text="x" * (DIRECTOR_PROMPT_TEXT_LIMIT + 1))
|
||||||
|
|
||||||
|
def test_directorPromptStatusEnum(self):
|
||||||
|
assert TeamsbotDirectorPromptStatus.QUEUED.value == "queued"
|
||||||
|
assert TeamsbotDirectorPromptStatus.RUNNING.value == "running"
|
||||||
|
assert TeamsbotDirectorPromptStatus.SUCCEEDED.value == "succeeded"
|
||||||
|
assert TeamsbotDirectorPromptStatus.CONSUMED.value == "consumed"
|
||||||
|
assert TeamsbotDirectorPromptStatus.FAILED.value == "failed"
|
||||||
|
|
||||||
|
def test_directorPromptModeEnum(self):
|
||||||
|
assert TeamsbotDirectorPromptMode.ONE_SHOT.value == "oneShot"
|
||||||
|
assert TeamsbotDirectorPromptMode.PERSISTENT.value == "persistent"
|
||||||
|
|
||||||
|
def test_fileLimitConstantHasSaneValue(self):
|
||||||
|
assert DIRECTOR_PROMPT_FILE_LIMIT == 10
|
||||||
|
assert DIRECTOR_PROMPT_TEXT_LIMIT == 8000
|
||||||
|
|
||||||
|
|
||||||
|
class TestSpeechTeamsResponseHybrid:
|
||||||
|
def test_needsAgentDefaultFalse(self):
|
||||||
|
resp = SpeechTeamsResponse(shouldRespond=False)
|
||||||
|
assert resp.needsAgent is False
|
||||||
|
assert resp.agentReason is None
|
||||||
|
|
||||||
|
def test_needsAgentEscalation(self):
|
||||||
|
resp = SpeechTeamsResponse(
|
||||||
|
shouldRespond=True,
|
||||||
|
responseText="Moment, ich recherchiere.",
|
||||||
|
needsAgent=True,
|
||||||
|
agentReason="webSearch SBB Schweiz",
|
||||||
|
detectedIntent="addressed",
|
||||||
|
)
|
||||||
|
assert resp.needsAgent is True
|
||||||
|
assert resp.agentReason == "webSearch SBB Schweiz"
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# 2) Persistent Director Context Renderer
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TestBuildPersistentDirectorContext:
|
||||||
|
def test_emptyWhenNoPrompts(self):
|
||||||
|
svc = _buildService()
|
||||||
|
svc._activePersistentPrompts = []
|
||||||
|
assert svc._buildPersistentDirectorContext() == ""
|
||||||
|
|
||||||
|
def test_singlePrompt(self):
|
||||||
|
svc = _buildService()
|
||||||
|
svc._activePersistentPrompts = [
|
||||||
|
{"id": "p1", "text": "Antworte immer in Englisch."},
|
||||||
|
]
|
||||||
|
rendered = svc._buildPersistentDirectorContext()
|
||||||
|
assert "OPERATOR_DIRECTIVES" in rendered
|
||||||
|
assert "- Antworte immer in Englisch." in rendered
|
||||||
|
assert "private" in rendered
|
||||||
|
|
||||||
|
def test_skipsBlankText(self):
|
||||||
|
svc = _buildService()
|
||||||
|
svc._activePersistentPrompts = [
|
||||||
|
{"id": "p1", "text": " "},
|
||||||
|
{"id": "p2", "text": "Sei hoeflich."},
|
||||||
|
]
|
||||||
|
rendered = svc._buildPersistentDirectorContext()
|
||||||
|
assert "- Sei hoeflich." in rendered
|
||||||
|
assert "p1" not in rendered # the blank one is filtered out
|
||||||
|
|
||||||
|
def test_allBlankPromptsResultInEmpty(self):
|
||||||
|
svc = _buildService()
|
||||||
|
svc._activePersistentPrompts = [
|
||||||
|
{"id": "p1", "text": ""},
|
||||||
|
{"id": "p2", "text": " "},
|
||||||
|
]
|
||||||
|
assert svc._buildPersistentDirectorContext() == ""
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# 3) submitDirectorPrompt
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TestSubmitDirectorPrompt:
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_oneShotQueuesAndEmits(self, monkeypatch):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
events = _patchEmit(monkeypatch)
|
||||||
|
_patchInterface(monkeypatch, fake)
|
||||||
|
|
||||||
|
# Block the auto-process task from running, otherwise it would call
|
||||||
|
# the real agent service. We replace the coroutine factory.
|
||||||
|
async def _noProcess(prompt):
|
||||||
|
return None
|
||||||
|
|
||||||
|
svc = _buildService()
|
||||||
|
monkeypatch.setattr(svc, "_processDirectorPrompt", _noProcess)
|
||||||
|
|
||||||
|
created = await svc.submitDirectorPrompt(
|
||||||
|
sessionId="session-1",
|
||||||
|
operatorUserId="user-op-1",
|
||||||
|
text="Recherchier das im Internet.",
|
||||||
|
mode=TeamsbotDirectorPromptMode.ONE_SHOT,
|
||||||
|
fileIds=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert created["status"] == TeamsbotDirectorPromptStatus.QUEUED.value
|
||||||
|
assert created["mode"] == TeamsbotDirectorPromptMode.ONE_SHOT.value
|
||||||
|
assert created["text"] == "Recherchier das im Internet."
|
||||||
|
assert created["sessionId"] == "session-1"
|
||||||
|
assert created["instanceId"] == "instance-y"
|
||||||
|
assert created["operatorUserId"] == "user-op-1"
|
||||||
|
|
||||||
|
# SSE event with the queued lifecycle marker
|
||||||
|
assert any(
|
||||||
|
e["type"] == "directorPrompt"
|
||||||
|
and e["data"]["status"] == TeamsbotDirectorPromptStatus.QUEUED.value
|
||||||
|
and e["data"]["mode"] == TeamsbotDirectorPromptMode.ONE_SHOT.value
|
||||||
|
for e in events
|
||||||
|
)
|
||||||
|
|
||||||
|
# In-memory persistent registry remains empty for one-shot.
|
||||||
|
assert svc._activePersistentPrompts == []
|
||||||
|
|
||||||
|
# Allow the (no-op) background task to settle so the loop is clean.
|
||||||
|
await asyncio.sleep(0)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_persistentPromptAppendsToInMemoryRegistry(self, monkeypatch):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
_patchEmit(monkeypatch)
|
||||||
|
_patchInterface(monkeypatch, fake)
|
||||||
|
|
||||||
|
async def _noProcess(prompt):
|
||||||
|
return None
|
||||||
|
|
||||||
|
svc = _buildService()
|
||||||
|
monkeypatch.setattr(svc, "_processDirectorPrompt", _noProcess)
|
||||||
|
|
||||||
|
created = await svc.submitDirectorPrompt(
|
||||||
|
sessionId="session-1",
|
||||||
|
operatorUserId="user-op-1",
|
||||||
|
text="Antworte immer in Englisch.",
|
||||||
|
mode=TeamsbotDirectorPromptMode.PERSISTENT,
|
||||||
|
fileIds=["file-a", "file-b"],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert created["mode"] == TeamsbotDirectorPromptMode.PERSISTENT.value
|
||||||
|
assert created["fileIds"] == ["file-a", "file-b"]
|
||||||
|
assert len(svc._activePersistentPrompts) == 1
|
||||||
|
assert svc._activePersistentPrompts[0]["id"] == created["id"]
|
||||||
|
|
||||||
|
await asyncio.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# 4) _processDirectorPrompt lifecycle
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TestProcessDirectorPromptLifecycle:
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_oneShotSuccessTransitionsRunningThenConsumed(self, monkeypatch):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
prompt = fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
id="prompt-success-1",
|
||||||
|
sessionId="session-1",
|
||||||
|
instanceId="instance-y",
|
||||||
|
operatorUserId="user-op-1",
|
||||||
|
text="Was ist die Hauptstadt von Frankreich?",
|
||||||
|
mode=TeamsbotDirectorPromptMode.ONE_SHOT,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
events = _patchEmit(monkeypatch)
|
||||||
|
_patchInterface(monkeypatch, fake)
|
||||||
|
|
||||||
|
svc = _buildService()
|
||||||
|
|
||||||
|
async def _stubAgent(**kwargs):
|
||||||
|
return "Paris."
|
||||||
|
|
||||||
|
monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgent)
|
||||||
|
|
||||||
|
await svc._processDirectorPrompt(prompt)
|
||||||
|
|
||||||
|
statuses = [u.get("status") for u in fake.updates if u["id"] == prompt["id"]]
|
||||||
|
assert TeamsbotDirectorPromptStatus.RUNNING.value in statuses
|
||||||
|
assert TeamsbotDirectorPromptStatus.CONSUMED.value in statuses
|
||||||
|
|
||||||
|
final = fake.prompts[prompt["id"]]
|
||||||
|
assert final["status"] == TeamsbotDirectorPromptStatus.CONSUMED.value
|
||||||
|
assert final["responseText"] == "Paris."
|
||||||
|
assert final.get("consumedAt")
|
||||||
|
|
||||||
|
emittedStatuses = [
|
||||||
|
e["data"].get("status") for e in events if e["type"] == "directorPrompt"
|
||||||
|
]
|
||||||
|
assert TeamsbotDirectorPromptStatus.RUNNING.value in emittedStatuses
|
||||||
|
assert TeamsbotDirectorPromptStatus.CONSUMED.value in emittedStatuses
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_persistentSuccessStaysSucceededNotConsumed(self, monkeypatch):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
prompt = fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
id="prompt-persist-1",
|
||||||
|
sessionId="session-1",
|
||||||
|
instanceId="instance-y",
|
||||||
|
operatorUserId="user-op-1",
|
||||||
|
text="Antworte immer in Englisch.",
|
||||||
|
mode=TeamsbotDirectorPromptMode.PERSISTENT,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
_patchEmit(monkeypatch)
|
||||||
|
_patchInterface(monkeypatch, fake)
|
||||||
|
|
||||||
|
svc = _buildService()
|
||||||
|
|
||||||
|
async def _stubAgent(**kwargs):
|
||||||
|
return "Acknowledged."
|
||||||
|
|
||||||
|
monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgent)
|
||||||
|
|
||||||
|
await svc._processDirectorPrompt(prompt)
|
||||||
|
|
||||||
|
final = fake.prompts[prompt["id"]]
|
||||||
|
assert final["status"] == TeamsbotDirectorPromptStatus.SUCCEEDED.value
|
||||||
|
assert final["responseText"] == "Acknowledged."
|
||||||
|
# Persistent prompts must stay alive beyond the run.
|
||||||
|
assert final.get("consumedAt") is None
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_failureMarksFailedAndDropsFromActivePersistent(self, monkeypatch):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
prompt = fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
id="prompt-fail-1",
|
||||||
|
sessionId="session-1",
|
||||||
|
instanceId="instance-y",
|
||||||
|
operatorUserId="user-op-1",
|
||||||
|
text="Mach was Komplexes.",
|
||||||
|
mode=TeamsbotDirectorPromptMode.PERSISTENT,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
events = _patchEmit(monkeypatch)
|
||||||
|
_patchInterface(monkeypatch, fake)
|
||||||
|
|
||||||
|
svc = _buildService()
|
||||||
|
svc._activePersistentPrompts = [prompt]
|
||||||
|
|
||||||
|
async def _stubAgentBoom(**kwargs):
|
||||||
|
raise RuntimeError("agent down")
|
||||||
|
|
||||||
|
monkeypatch.setattr(svc, "_runAgentForMeeting", _stubAgentBoom)
|
||||||
|
|
||||||
|
await svc._processDirectorPrompt(prompt)
|
||||||
|
|
||||||
|
final = fake.prompts[prompt["id"]]
|
||||||
|
assert final["status"] == TeamsbotDirectorPromptStatus.FAILED.value
|
||||||
|
assert "RuntimeError" in (final.get("statusMessage") or "")
|
||||||
|
|
||||||
|
# The failed persistent prompt is removed from the in-memory directives.
|
||||||
|
assert all(p["id"] != prompt["id"] for p in svc._activePersistentPrompts)
|
||||||
|
|
||||||
|
emittedStatuses = [
|
||||||
|
e["data"].get("status") for e in events if e["type"] == "directorPrompt"
|
||||||
|
]
|
||||||
|
assert TeamsbotDirectorPromptStatus.FAILED.value in emittedStatuses
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# 5) removePersistentPrompt
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TestRemovePersistentPrompt:
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_removePersistentPromptMarksConsumedAndDrops(self, monkeypatch):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
prompt = fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
id="prompt-rm-1",
|
||||||
|
sessionId="session-1",
|
||||||
|
instanceId="instance-y",
|
||||||
|
operatorUserId="user-op-1",
|
||||||
|
text="Bleib hoeflich.",
|
||||||
|
mode=TeamsbotDirectorPromptMode.PERSISTENT,
|
||||||
|
status=TeamsbotDirectorPromptStatus.SUCCEEDED,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
events = _patchEmit(monkeypatch)
|
||||||
|
_patchInterface(monkeypatch, fake)
|
||||||
|
|
||||||
|
svc = _buildService()
|
||||||
|
svc._activePersistentPrompts = [prompt]
|
||||||
|
|
||||||
|
ok = await svc.removePersistentPrompt(prompt["id"])
|
||||||
|
assert ok is True
|
||||||
|
|
||||||
|
final = fake.prompts[prompt["id"]]
|
||||||
|
assert final["status"] == TeamsbotDirectorPromptStatus.CONSUMED.value
|
||||||
|
assert final.get("consumedAt")
|
||||||
|
assert final.get("statusMessage") == "Removed by operator"
|
||||||
|
assert svc._activePersistentPrompts == []
|
||||||
|
|
||||||
|
assert any(
|
||||||
|
e["type"] == "directorPrompt"
|
||||||
|
and e["data"].get("removed") is True
|
||||||
|
and e["data"].get("status") == TeamsbotDirectorPromptStatus.CONSUMED.value
|
||||||
|
for e in events
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_removeUnknownPromptReturnsFalse(self, monkeypatch):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
_patchEmit(monkeypatch)
|
||||||
|
_patchInterface(monkeypatch, fake)
|
||||||
|
svc = _buildService()
|
||||||
|
ok = await svc.removePersistentPrompt("unknown-id")
|
||||||
|
assert ok is False
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# 6) _activeServices Registry
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TestActiveServicesRegistry:
|
||||||
|
def test_getActiveServiceReturnsNoneByDefault(self):
|
||||||
|
assert getActiveService("not-active") is None
|
||||||
|
|
||||||
|
def test_getActiveServiceReturnsRegistered(self):
|
||||||
|
svc = _buildService()
|
||||||
|
_activeServices["session-XYZ"] = svc
|
||||||
|
assert getActiveService("session-XYZ") is svc
|
||||||
|
|
||||||
|
def test_distinctSessionsMapToDistinctServices(self):
|
||||||
|
a = _buildService()
|
||||||
|
b = _buildService()
|
||||||
|
_activeServices["s1"] = a
|
||||||
|
_activeServices["s2"] = b
|
||||||
|
assert getActiveService("s1") is a
|
||||||
|
assert getActiveService("s2") is b
|
||||||
|
assert getActiveService("s1") is not getActiveService("s2")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# 7) Interface-level filtering for active persistent prompts
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class TestGetActivePersistentPromptsFiltering:
|
||||||
|
"""The interface-level helper is the source of truth for what gets
|
||||||
|
re-loaded into _activePersistentPrompts on (re)connect."""
|
||||||
|
|
||||||
|
def test_onlyPersistentNonTerminal(self):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
# All four lifecycle states for the same session
|
||||||
|
for status in TeamsbotDirectorPromptStatus:
|
||||||
|
fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
sessionId="s1",
|
||||||
|
instanceId="i1",
|
||||||
|
operatorUserId="u1",
|
||||||
|
text=f"persist-{status.value}",
|
||||||
|
mode=TeamsbotDirectorPromptMode.PERSISTENT,
|
||||||
|
status=status,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
# one-shot persistent-failure-irrelevant
|
||||||
|
fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
sessionId="s1",
|
||||||
|
instanceId="i1",
|
||||||
|
operatorUserId="u1",
|
||||||
|
text="oneShot-running",
|
||||||
|
mode=TeamsbotDirectorPromptMode.ONE_SHOT,
|
||||||
|
status=TeamsbotDirectorPromptStatus.RUNNING,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
|
||||||
|
active = fake.getActivePersistentPrompts("s1")
|
||||||
|
statuses = {p.get("status") for p in active}
|
||||||
|
|
||||||
|
# CONSUMED and FAILED are terminal; ONE_SHOT is not persistent.
|
||||||
|
assert TeamsbotDirectorPromptStatus.CONSUMED.value not in statuses
|
||||||
|
assert TeamsbotDirectorPromptStatus.FAILED.value not in statuses
|
||||||
|
# All returned prompts are persistent
|
||||||
|
assert all(
|
||||||
|
p.get("mode") == TeamsbotDirectorPromptMode.PERSISTENT.value for p in active
|
||||||
|
)
|
||||||
|
# Non-terminal persistent: QUEUED, RUNNING, SUCCEEDED -> 3 records
|
||||||
|
assert len(active) == 3
|
||||||
|
|
||||||
|
def test_filtersBySession(self):
|
||||||
|
fake = _FakeInterface()
|
||||||
|
fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
sessionId="s1", instanceId="i1", operatorUserId="u1",
|
||||||
|
text="A", mode=TeamsbotDirectorPromptMode.PERSISTENT,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
fake.createDirectorPrompt(
|
||||||
|
TeamsbotDirectorPrompt(
|
||||||
|
sessionId="s2", instanceId="i1", operatorUserId="u1",
|
||||||
|
text="B", mode=TeamsbotDirectorPromptMode.PERSISTENT,
|
||||||
|
).model_dump()
|
||||||
|
)
|
||||||
|
assert len(fake.getActivePersistentPrompts("s1")) == 1
|
||||||
|
assert len(fake.getActivePersistentPrompts("s2")) == 1
|
||||||
|
assert fake.getActivePersistentPrompts("ghost") == []
|
||||||
|
|
@ -2,12 +2,11 @@
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
||||||
from modules.features.graphicalEditor.portTypes import (
|
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
|
||||||
PORT_TYPE_CATALOG,
|
from modules.workflows.automation2.udmUpstreamShapes import (
|
||||||
INPUT_EXTRACTORS,
|
_coerceConsolidateResultInput,
|
||||||
_extractUdmDocument,
|
_coerceUdmDocumentInput,
|
||||||
_extractUdmNodeList,
|
_coerceUdmNodeListInput,
|
||||||
_extractConsolidateResult,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -32,34 +31,28 @@ def test_udm_port_types_registered():
|
||||||
assert "ConsolidateResult" in PORT_TYPE_CATALOG
|
assert "ConsolidateResult" in PORT_TYPE_CATALOG
|
||||||
|
|
||||||
|
|
||||||
def test_udm_extractors_registered():
|
def test_coerceUdmDocument_from_direct():
|
||||||
assert "UdmDocument" in INPUT_EXTRACTORS
|
|
||||||
assert "UdmNodeList" in INPUT_EXTRACTORS
|
|
||||||
assert "ConsolidateResult" in INPUT_EXTRACTORS
|
|
||||||
|
|
||||||
|
|
||||||
def test_extractUdmDocument_from_direct():
|
|
||||||
upstream = {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []}
|
upstream = {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []}
|
||||||
result = _extractUdmDocument(upstream)
|
result = _coerceUdmDocumentInput(upstream)
|
||||||
assert result["sourceType"] == "pdf"
|
assert result["sourceType"] == "pdf"
|
||||||
|
|
||||||
|
|
||||||
def test_extractUdmDocument_from_nested():
|
def test_coerceUdmDocument_from_nested():
|
||||||
upstream = {"udm": {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []}, "other": 1}
|
upstream = {"udm": {"id": "d1", "sourceType": "pdf", "sourcePath": "/a.pdf", "children": []}, "other": 1}
|
||||||
result = _extractUdmDocument(upstream)
|
result = _coerceUdmDocumentInput(upstream)
|
||||||
assert result["sourceType"] == "pdf"
|
assert result["sourceType"] == "pdf"
|
||||||
|
|
||||||
|
|
||||||
def test_extractUdmNodeList():
|
def test_coerceUdmNodeList():
|
||||||
upstream = {"nodes": [{"id": "n1"}, {"id": "n2"}], "count": 2}
|
upstream = {"nodes": [{"id": "n1"}, {"id": "n2"}], "count": 2}
|
||||||
result = _extractUdmNodeList(upstream)
|
result = _coerceUdmNodeListInput(upstream)
|
||||||
assert result["count"] == 2
|
assert result["count"] == 2
|
||||||
assert len(result["nodes"]) == 2
|
assert len(result["nodes"]) == 2
|
||||||
|
|
||||||
|
|
||||||
def test_extractConsolidateResult():
|
def test_coerceConsolidateResult():
|
||||||
upstream = {"result": {"headers": [], "rows": []}, "mode": "table", "count": 3}
|
upstream = {"result": {"headers": [], "rows": []}, "mode": "table", "count": 3}
|
||||||
result = _extractConsolidateResult(upstream)
|
result = _coerceConsolidateResultInput(upstream)
|
||||||
assert result["mode"] == "table"
|
assert result["mode"] == "table"
|
||||||
assert result["count"] == 3
|
assert result["count"] == 3
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -65,3 +65,102 @@ class TestResolveParameterReferences:
|
||||||
node_outputs = {"n1": {"country": "CH"}}
|
node_outputs = {"n1": {"country": "CH"}}
|
||||||
value = "Land: {{n1.country}}"
|
value = "Land: {{n1.country}}"
|
||||||
assert resolveParameterReferences(value, node_outputs) == "Land: CH"
|
assert resolveParameterReferences(value, node_outputs) == "Land: CH"
|
||||||
|
|
||||||
|
|
||||||
|
class TestWildcardIteration:
|
||||||
|
"""Phase-4 typed Bindings-Resolver: ``*`` segment iterates over a list.
|
||||||
|
|
||||||
|
Path semantics:
|
||||||
|
["docs", "*", "name"] ⇒ map "name" over each item in docs
|
||||||
|
["docs", "*"] ⇒ the docs list itself (after passing through *)
|
||||||
|
Drops items whose remainder resolves to ``None`` (missing field).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_wildcard_maps_over_list_to_field(self):
|
||||||
|
node_outputs = {
|
||||||
|
"src": {
|
||||||
|
"documents": [
|
||||||
|
{"name": "a.pdf", "size": 10},
|
||||||
|
{"name": "b.pdf", "size": 20},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
value = {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "src",
|
||||||
|
"path": ["documents", "*", "name"],
|
||||||
|
}
|
||||||
|
assert resolveParameterReferences(value, node_outputs) == ["a.pdf", "b.pdf"]
|
||||||
|
|
||||||
|
def test_wildcard_terminal_returns_list_copy(self):
|
||||||
|
node_outputs = {"src": {"items": ["x", "y", "z"]}}
|
||||||
|
value = {"type": "ref", "nodeId": "src", "path": ["items", "*"]}
|
||||||
|
assert resolveParameterReferences(value, node_outputs) == ["x", "y", "z"]
|
||||||
|
|
||||||
|
def test_wildcard_drops_missing_fields(self):
|
||||||
|
node_outputs = {
|
||||||
|
"src": {
|
||||||
|
"rows": [
|
||||||
|
{"name": "a"},
|
||||||
|
{"otherField": 1},
|
||||||
|
{"name": "c"},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
value = {"type": "ref", "nodeId": "src", "path": ["rows", "*", "name"]}
|
||||||
|
assert resolveParameterReferences(value, node_outputs) == ["a", "c"]
|
||||||
|
|
||||||
|
def test_wildcard_on_non_list_returns_none(self):
|
||||||
|
node_outputs = {"src": {"docs": {"not": "a list"}}}
|
||||||
|
value = {"type": "ref", "nodeId": "src", "path": ["docs", "*", "name"]}
|
||||||
|
assert resolveParameterReferences(value, node_outputs) is None
|
||||||
|
|
||||||
|
def test_wildcard_nested(self):
|
||||||
|
node_outputs = {
|
||||||
|
"src": {
|
||||||
|
"groups": [
|
||||||
|
{"items": [{"v": 1}, {"v": 2}]},
|
||||||
|
{"items": [{"v": 3}]},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
value = {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "src",
|
||||||
|
"path": ["groups", "*", "items", "*", "v"],
|
||||||
|
}
|
||||||
|
assert resolveParameterReferences(value, node_outputs) == [[1, 2], [3]]
|
||||||
|
|
||||||
|
def test_wildcard_inside_transit_envelope(self):
|
||||||
|
node_outputs = {
|
||||||
|
"src": {
|
||||||
|
"_transit": True,
|
||||||
|
"data": {"documents": [{"name": "p.pdf"}, {"name": "q.pdf"}]},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
value = {
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "src",
|
||||||
|
"path": ["documents", "*", "name"],
|
||||||
|
}
|
||||||
|
assert resolveParameterReferences(value, node_outputs) == ["p.pdf", "q.pdf"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestPathContainsWildcard:
|
||||||
|
"""``_pathContainsWildcard`` lets the engine decide between a scalar bind
|
||||||
|
and an iteration target (e.g. wrap a Loop container around the consumer).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_detects_wildcard(self):
|
||||||
|
from modules.workflows.automation2.graphUtils import _pathContainsWildcard
|
||||||
|
assert _pathContainsWildcard(["docs", "*", "name"]) is True
|
||||||
|
assert _pathContainsWildcard(["*"]) is True
|
||||||
|
|
||||||
|
def test_no_wildcard(self):
|
||||||
|
from modules.workflows.automation2.graphUtils import _pathContainsWildcard
|
||||||
|
assert _pathContainsWildcard(["docs", 0, "name"]) is False
|
||||||
|
assert _pathContainsWildcard([]) is False
|
||||||
|
|
||||||
|
def test_literal_star_in_int_segment_does_not_match(self):
|
||||||
|
from modules.workflows.automation2.graphUtils import _pathContainsWildcard
|
||||||
|
assert _pathContainsWildcard([1, 2, 3]) is False
|
||||||
|
|
|
||||||
310
tests/unit/workflows/test_featureInstanceRefMigration.py
Normal file
310
tests/unit/workflows/test_featureInstanceRefMigration.py
Normal file
|
|
@ -0,0 +1,310 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
"""
|
||||||
|
Phase-5 Schicht-4 — unit tests for ``materializeFeatureInstanceRefs`` and the
|
||||||
|
runtime envelope unwrap in ``graphUtils.resolveParameterReferences``.
|
||||||
|
|
||||||
|
Plan: ``wiki/c-work/1-plan/2026-04-typed-action-architecture.md`` (T11).
|
||||||
|
"""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from modules.workflows.automation2.featureInstanceRefMigration import (
|
||||||
|
materializeFeatureInstanceRefs,
|
||||||
|
)
|
||||||
|
from modules.workflows.automation2.graphUtils import (
|
||||||
|
_isTypedRefEnvelope,
|
||||||
|
_unwrapTypedRef,
|
||||||
|
resolveParameterReferences,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Migration: raw UUID -> typed envelope
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestMaterializeFeatureInstanceRefs:
|
||||||
|
def test_emptyGraphIsReturnedAsIs(self):
|
||||||
|
out = materializeFeatureInstanceRefs({})
|
||||||
|
assert out == {}
|
||||||
|
|
||||||
|
def test_nonDictInputIsPassthrough(self):
|
||||||
|
# Defensive: callers may pass a None / list by accident.
|
||||||
|
assert materializeFeatureInstanceRefs(None) is None
|
||||||
|
assert materializeFeatureInstanceRefs([]) == []
|
||||||
|
|
||||||
|
def test_graphWithoutFeatureInstanceIdIsUnchanged(self):
|
||||||
|
graph = {"nodes": [{"id": "n1", "type": "trigger.manual", "parameters": {}}]}
|
||||||
|
original = copy.deepcopy(graph)
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
assert out == original
|
||||||
|
|
||||||
|
def test_inputIsNotMutated(self):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {"featureInstanceId": "abc-123"},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
snapshot = copy.deepcopy(graph)
|
||||||
|
materializeFeatureInstanceRefs(graph)
|
||||||
|
assert graph == snapshot
|
||||||
|
|
||||||
|
def test_rawUuidIsConvertedToEnvelope(self):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {"featureInstanceId": "abc-123"},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
param = out["nodes"][0]["parameters"]["featureInstanceId"]
|
||||||
|
assert param == {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": "abc-123",
|
||||||
|
"featureCode": "trustee",
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_rawUuidPreservedWhitespaceIsTrimmed(self):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {"featureInstanceId": " abc-123 "},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
assert out["nodes"][0]["parameters"]["featureInstanceId"]["id"] == "abc-123"
|
||||||
|
|
||||||
|
def test_emptyStringIsLeftUntouched(self):
|
||||||
|
# Empty featureInstanceId is the editor placeholder for "not yet bound";
|
||||||
|
# the migration must NOT pretend an empty value is a real UUID.
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {"featureInstanceId": ""},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
assert out["nodes"][0]["parameters"]["featureInstanceId"] == ""
|
||||||
|
|
||||||
|
def test_alreadyTypedEnvelopeIsIdempotent(self):
|
||||||
|
envelope = {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": "abc-123",
|
||||||
|
"featureCode": "trustee",
|
||||||
|
}
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {"featureInstanceId": envelope},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
assert out["nodes"][0]["parameters"]["featureInstanceId"] == envelope
|
||||||
|
|
||||||
|
def test_runMigrationTwiceProducesSameResult(self):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {"featureInstanceId": "abc-123"},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
once = materializeFeatureInstanceRefs(graph)
|
||||||
|
twice = materializeFeatureInstanceRefs(once)
|
||||||
|
assert once == twice
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"nodeType,expectedFeatureCode",
|
||||||
|
[
|
||||||
|
("trustee.extractFromFiles", "trustee"),
|
||||||
|
("trustee.processDocuments", "trustee"),
|
||||||
|
("redmine.createIssue", "redmine"),
|
||||||
|
("clickup.createTask", "clickup"),
|
||||||
|
("sharepoint.listFiles", "sharepoint"),
|
||||||
|
("outlook.readEmails", "outlook"),
|
||||||
|
("email.searchEmail", "outlook"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_featureCodeIsDerivedFromNodeTypePrefix(
|
||||||
|
self, nodeType, expectedFeatureCode
|
||||||
|
):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n",
|
||||||
|
"type": nodeType,
|
||||||
|
"parameters": {"featureInstanceId": "uuid-x"},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
env = out["nodes"][0]["parameters"]["featureInstanceId"]
|
||||||
|
assert env["featureCode"] == expectedFeatureCode
|
||||||
|
|
||||||
|
def test_unknownNodeTypePrefixOmitsFeatureCode(self):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n",
|
||||||
|
"type": "weird.unknown.action",
|
||||||
|
"parameters": {"featureInstanceId": "uuid-x"},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
env = out["nodes"][0]["parameters"]["featureInstanceId"]
|
||||||
|
assert env == {"$type": "FeatureInstanceRef", "id": "uuid-x"}
|
||||||
|
|
||||||
|
def test_multipleNodesAreAllMigrated(self):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "n5",
|
||||||
|
"type": "trustee.extractFromFiles",
|
||||||
|
"parameters": {"featureInstanceId": "uuid-1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n6",
|
||||||
|
"type": "trustee.queryData",
|
||||||
|
"parameters": {"featureInstanceId": "uuid-2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "n9",
|
||||||
|
"type": "trustee.processDocuments",
|
||||||
|
"parameters": {"featureInstanceId": "uuid-3"},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
ids = [n["parameters"]["featureInstanceId"]["id"] for n in out["nodes"]]
|
||||||
|
assert ids == ["uuid-1", "uuid-2", "uuid-3"]
|
||||||
|
|
||||||
|
def test_nodesWithoutParametersAreSkipped(self):
|
||||||
|
graph = {
|
||||||
|
"nodes": [
|
||||||
|
{"id": "n1", "type": "trigger.manual"},
|
||||||
|
{"id": "n2", "type": "trustee.queryData"}, # no parameters key
|
||||||
|
{
|
||||||
|
"id": "n3",
|
||||||
|
"type": "trustee.processDocuments",
|
||||||
|
"parameters": None,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
out = materializeFeatureInstanceRefs(graph)
|
||||||
|
assert out == graph
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Runtime envelope unwrap (graphUtils._unwrapTypedRef + resolveParameterReferences)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestIsTypedRefEnvelope:
|
||||||
|
def test_recognisesFeatureInstanceRef(self):
|
||||||
|
env = {"$type": "FeatureInstanceRef", "id": "abc"}
|
||||||
|
assert _isTypedRefEnvelope(env) is True
|
||||||
|
|
||||||
|
def test_recognisesConnectionRef(self):
|
||||||
|
env = {"$type": "ConnectionRef", "id": "abc"}
|
||||||
|
assert _isTypedRefEnvelope(env) is True
|
||||||
|
|
||||||
|
def test_rejectsRawDict(self):
|
||||||
|
assert _isTypedRefEnvelope({"id": "abc"}) is False
|
||||||
|
|
||||||
|
def test_rejectsUnknownType(self):
|
||||||
|
assert _isTypedRefEnvelope({"$type": "Foobar", "id": "abc"}) is False
|
||||||
|
|
||||||
|
def test_rejectsNonDict(self):
|
||||||
|
assert _isTypedRefEnvelope("abc") is False
|
||||||
|
assert _isTypedRefEnvelope(None) is False
|
||||||
|
assert _isTypedRefEnvelope(["abc"]) is False
|
||||||
|
|
||||||
|
|
||||||
|
class TestUnwrapTypedRef:
|
||||||
|
def test_unwrapsFeatureInstanceRefToId(self):
|
||||||
|
env = {"$type": "FeatureInstanceRef", "id": "uuid-x", "featureCode": "trustee"}
|
||||||
|
assert _unwrapTypedRef(env) == "uuid-x"
|
||||||
|
|
||||||
|
def test_unwrapsConnectionRefToId(self):
|
||||||
|
env = {"$type": "ConnectionRef", "id": "conn-y", "authority": "msft"}
|
||||||
|
assert _unwrapTypedRef(env) == "conn-y"
|
||||||
|
|
||||||
|
def test_unwrapsSharePointFileRefToFilePath(self):
|
||||||
|
env = {"$type": "SharePointFileRef", "filePath": "/Sites/X/file.pdf"}
|
||||||
|
assert _unwrapTypedRef(env) == "/Sites/X/file.pdf"
|
||||||
|
|
||||||
|
def test_passthroughForNonEnvelope(self):
|
||||||
|
assert _unwrapTypedRef("plain-string") == "plain-string"
|
||||||
|
assert _unwrapTypedRef({"id": "abc"}) == {"id": "abc"}
|
||||||
|
assert _unwrapTypedRef(None) is None
|
||||||
|
|
||||||
|
def test_returnsEnvelopeIfPrimaryFieldMissing(self):
|
||||||
|
# Defensive: malformed envelope without ``id`` falls back to itself
|
||||||
|
# rather than silently dropping data.
|
||||||
|
env = {"$type": "FeatureInstanceRef", "featureCode": "trustee"}
|
||||||
|
assert _unwrapTypedRef(env) == env
|
||||||
|
|
||||||
|
|
||||||
|
class TestResolveParameterReferencesUnwrap:
|
||||||
|
def test_typedEnvelopeAtTopLevelIsUnwrapped(self):
|
||||||
|
env = {"$type": "FeatureInstanceRef", "id": "uuid-z", "featureCode": "trustee"}
|
||||||
|
out = resolveParameterReferences(env, nodeOutputs={})
|
||||||
|
assert out == "uuid-z"
|
||||||
|
|
||||||
|
def test_typedEnvelopeNestedInDictIsUnwrapped(self):
|
||||||
|
params = {
|
||||||
|
"featureInstanceId": {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": "uuid-z",
|
||||||
|
"featureCode": "trustee",
|
||||||
|
},
|
||||||
|
"mode": "lookup",
|
||||||
|
}
|
||||||
|
out = resolveParameterReferences(params, nodeOutputs={})
|
||||||
|
assert out == {"featureInstanceId": "uuid-z", "mode": "lookup"}
|
||||||
|
|
||||||
|
def test_typedEnvelopesInListAreUnwrappedElementwise(self):
|
||||||
|
params = [
|
||||||
|
{"$type": "FeatureInstanceRef", "id": "u1"},
|
||||||
|
{"$type": "FeatureInstanceRef", "id": "u2"},
|
||||||
|
"static",
|
||||||
|
]
|
||||||
|
out = resolveParameterReferences(params, nodeOutputs={})
|
||||||
|
assert out == ["u1", "u2", "static"]
|
||||||
|
|
||||||
|
def test_typedEnvelopeIsResolvedBeforeRefLookup(self):
|
||||||
|
# If a workflow somehow contains both shapes, the typed envelope wins;
|
||||||
|
# ref-resolution is for upstream-bound DataRefs which never carry
|
||||||
|
# ``$type`` at the top level.
|
||||||
|
env = {
|
||||||
|
"$type": "FeatureInstanceRef",
|
||||||
|
"id": "uuid-z",
|
||||||
|
# nonsensical ``type: ref`` shadow — must be ignored.
|
||||||
|
"type": "ref",
|
||||||
|
"nodeId": "nope",
|
||||||
|
"path": ["whatever"],
|
||||||
|
}
|
||||||
|
out = resolveParameterReferences(env, nodeOutputs={"nope": {"whatever": "x"}})
|
||||||
|
assert out == "uuid-z"
|
||||||
Loading…
Reference in a new issue