1263 lines
51 KiB
Python
1263 lines
51 KiB
Python
# Copyright (c) 2025 Patrick Motsch
|
|
# All rights reserved.
|
|
"""
|
|
Workflow Toolbox - AI-assisted graph manipulation tools for the GraphicalEditor.
|
|
Tools: readWorkflowGraph, addNode, removeNode, connectNodes, setNodeParameter,
|
|
listAvailableNodeTypes, describeNodeType, autoLayoutWorkflow,
|
|
validateGraph, listWorkflowHistory, readWorkflowMessages.
|
|
|
|
Conventions enforced here (matches coreTools / actionToolAdapter):
|
|
- Every ``ToolResult(...)`` provides ``toolCallId`` and ``toolName`` (pydantic
|
|
requires both); ``ToolRegistry.dispatch`` overwrites ``toolCallId`` later
|
|
but the model still validates at construction.
|
|
- ``ToolResult.data`` is a ``str``; structured payloads are JSON-encoded.
|
|
- ``workflowId`` and ``instanceId`` are auto-injected from the agent
|
|
``context`` dict (``workflowId``, ``featureInstanceId``) when the model
|
|
omits them — the editor agent always runs in exactly one workflow.
|
|
"""
|
|
|
|
import json
|
|
import logging
|
|
import uuid
|
|
from typing import Dict, Any, List, Tuple
|
|
|
|
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
TOOLBOX_ID = "workflow"
|
|
|
|
|
|
def _toData(payload: Any) -> str:
|
|
"""Encode a structured payload into ToolResult.data (which is a string)."""
|
|
if isinstance(payload, str):
|
|
return payload
|
|
try:
|
|
return json.dumps(payload, default=str, ensure_ascii=False)
|
|
except Exception:
|
|
return str(payload)
|
|
|
|
|
|
def _err(toolName: str, message: str) -> ToolResult:
|
|
return ToolResult(toolCallId="", toolName=toolName, success=False, error=message)
|
|
|
|
|
|
def _ok(toolName: str, payload: Any) -> ToolResult:
|
|
return ToolResult(toolCallId="", toolName=toolName, success=True, data=_toData(payload))
|
|
|
|
|
|
def _resolveIds(params: Dict[str, Any], context: Any) -> Tuple[str, str]:
|
|
"""Return (workflowId, instanceId), auto-injecting from context when missing.
|
|
|
|
The editor agent context (``agentLoop._executeToolCalls``) is a dict with
|
|
``workflowId`` and ``featureInstanceId`` — use them as defaults so the
|
|
model doesn't have to re-state the ids on every tool call.
|
|
"""
|
|
ctx: Dict[str, Any] = context if isinstance(context, dict) else {}
|
|
workflowId = params.get("workflowId") or ctx.get("workflowId") or ""
|
|
instanceId = (
|
|
params.get("instanceId")
|
|
or ctx.get("featureInstanceId")
|
|
or ctx.get("instanceId")
|
|
or ""
|
|
)
|
|
return workflowId, instanceId
|
|
|
|
|
|
def _resolveUser(context: Any):
|
|
"""Return the User object for the current agent context (lazy DB fetch)."""
|
|
if not isinstance(context, dict):
|
|
return getattr(context, "user", None)
|
|
user = context.get("user")
|
|
if user is not None:
|
|
return user
|
|
userId = context.get("userId")
|
|
if not userId:
|
|
return None
|
|
try:
|
|
from modules.interfaces.interfaceDbApp import getRootInterface
|
|
return getRootInterface().getUser(str(userId))
|
|
except Exception as e:
|
|
logger.warning("workflowTools: could not resolve user %s: %s", userId, e)
|
|
return None
|
|
|
|
|
|
def _resolveMandateId(context: Any) -> str:
|
|
if not isinstance(context, dict):
|
|
return getattr(context, "mandateId", "") or ""
|
|
return context.get("mandateId") or ""
|
|
|
|
|
|
def _getInterface(context: Any, instanceId: str):
|
|
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
|
|
return getGraphicalEditorInterface(_resolveUser(context), _resolveMandateId(context), instanceId)
|
|
|
|
|
|
async def _readWorkflowGraph(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Read the current workflow graph (nodes and connections)."""
|
|
name = "readWorkflowGraph"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required (and not present in agent context)")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
wf = iface.getWorkflow(workflowId)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
|
|
graph = wf.get("graph", {}) or {}
|
|
nodes = graph.get("nodes", []) or []
|
|
connections = graph.get("connections", []) or []
|
|
return _ok(name, {
|
|
"workflowId": workflowId,
|
|
"label": wf.get("label", ""),
|
|
"nodeCount": len(nodes),
|
|
"connectionCount": len(connections),
|
|
"nodes": [
|
|
{"id": n.get("id"), "type": n.get("type"), "title": n.get("title", "")}
|
|
for n in nodes
|
|
],
|
|
"connections": connections,
|
|
})
|
|
except Exception as e:
|
|
logger.exception("readWorkflowGraph failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _addNode(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Add a node to the workflow graph."""
|
|
name = "addNode"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
nodeType = params.get("nodeType")
|
|
if not workflowId or not instanceId or not nodeType:
|
|
return _err(name, "workflowId, instanceId, and nodeType required")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
wf = iface.getWorkflow(workflowId)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
|
|
graph = dict(wf.get("graph", {}) or {})
|
|
nodes = list(graph.get("nodes", []) or [])
|
|
|
|
nodeId = params.get("nodeId") or str(uuid.uuid4())[:8]
|
|
title = params.get("title", "")
|
|
nodeParams = params.get("parameters", {}) or {}
|
|
|
|
# Frontend stores positions as TOP-LEVEL ``x`` / ``y`` on the node
|
|
# (see ``fromApiGraph`` / ``toApiGraph``). Accept either explicit
|
|
# ``x`` / ``y`` or a ``position={x,y}`` shape from the model and
|
|
# always persist as top-level ``x`` / ``y``. Fallback puts new
|
|
# nodes in a horizontal stripe so the user sees them even before
|
|
# ``autoLayoutWorkflow`` runs.
|
|
position = params.get("position") or {}
|
|
x = params.get("x")
|
|
if x is None:
|
|
x = position.get("x") if isinstance(position, dict) else None
|
|
if x is None:
|
|
x = 40 + len(nodes) * 260
|
|
y = params.get("y")
|
|
if y is None:
|
|
y = position.get("y") if isinstance(position, dict) else None
|
|
if y is None:
|
|
y = 40
|
|
|
|
newNode = {
|
|
"id": nodeId,
|
|
"type": nodeType,
|
|
"title": title,
|
|
"parameters": nodeParams,
|
|
"x": x,
|
|
"y": y,
|
|
}
|
|
nodes.append(newNode)
|
|
graph["nodes"] = nodes
|
|
|
|
iface.updateWorkflow(workflowId, {"graph": graph})
|
|
return _ok(name, {
|
|
"nodeId": nodeId,
|
|
"nodeType": nodeType,
|
|
"message": f"Node '{title or nodeType}' added",
|
|
})
|
|
except Exception as e:
|
|
logger.exception("addNode failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _removeNode(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Remove a node and its connections from the workflow graph."""
|
|
name = "removeNode"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
nodeId = params.get("nodeId")
|
|
if not workflowId or not instanceId or not nodeId:
|
|
return _err(name, "workflowId, instanceId, and nodeId required")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
wf = iface.getWorkflow(workflowId)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
|
|
graph = dict(wf.get("graph", {}) or {})
|
|
nodes = [n for n in (graph.get("nodes", []) or []) if n.get("id") != nodeId]
|
|
connections = [
|
|
c for c in (graph.get("connections", []) or [])
|
|
if c.get("source") != nodeId and c.get("target") != nodeId
|
|
]
|
|
graph["nodes"] = nodes
|
|
graph["connections"] = connections
|
|
|
|
iface.updateWorkflow(workflowId, {"graph": graph})
|
|
return _ok(name, {"nodeId": nodeId, "message": f"Node {nodeId} removed"})
|
|
except Exception as e:
|
|
logger.exception("removeNode failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _connectNodes(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Connect two nodes in the workflow graph."""
|
|
name = "connectNodes"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
sourceId = params.get("sourceId")
|
|
targetId = params.get("targetId")
|
|
if not workflowId or not instanceId or not sourceId or not targetId:
|
|
return _err(name, "workflowId, instanceId, sourceId, and targetId required")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
wf = iface.getWorkflow(workflowId)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
|
|
graph = dict(wf.get("graph", {}) or {})
|
|
connections = list(graph.get("connections", []) or [])
|
|
newConn = {
|
|
"source": sourceId,
|
|
"target": targetId,
|
|
"sourceOutput": params.get("sourceOutput", 0),
|
|
"targetInput": params.get("targetInput", 0),
|
|
}
|
|
connections.append(newConn)
|
|
graph["connections"] = connections
|
|
|
|
iface.updateWorkflow(workflowId, {"graph": graph})
|
|
return _ok(name, {"connection": newConn, "message": f"Connected {sourceId} -> {targetId}"})
|
|
except Exception as e:
|
|
logger.exception("connectNodes failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _setNodeParameter(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Set a parameter on a node."""
|
|
name = "setNodeParameter"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
nodeId = params.get("nodeId")
|
|
paramName = params.get("parameterName")
|
|
paramValue = params.get("parameterValue")
|
|
if not workflowId or not instanceId or not nodeId or not paramName:
|
|
return _err(name, "workflowId, instanceId, nodeId, and parameterName required")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
wf = iface.getWorkflow(workflowId)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
|
|
graph = dict(wf.get("graph", {}) or {})
|
|
nodes = list(graph.get("nodes", []) or [])
|
|
found = False
|
|
for n in nodes:
|
|
if n.get("id") == nodeId:
|
|
nodeParams = dict(n.get("parameters", {}) or {})
|
|
nodeParams[paramName] = paramValue
|
|
n["parameters"] = nodeParams
|
|
found = True
|
|
break
|
|
|
|
if not found:
|
|
return _err(name, f"Node {nodeId} not found in graph")
|
|
|
|
graph["nodes"] = nodes
|
|
iface.updateWorkflow(workflowId, {"graph": graph})
|
|
return _ok(name, {
|
|
"nodeId": nodeId,
|
|
"parameter": paramName,
|
|
"message": f"Parameter '{paramName}' set",
|
|
})
|
|
except Exception as e:
|
|
logger.exception("setNodeParameter failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _list_upstream_paths(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""List pickable upstream DataRef paths for a node (saved workflow graph)."""
|
|
name = "listUpstreamPaths"
|
|
try:
|
|
workflow_id, instance_id = _resolveIds(params, context)
|
|
node_id = params.get("nodeId")
|
|
if not workflow_id or not instance_id or not node_id:
|
|
return _err(name, "workflowId, instanceId, and nodeId required")
|
|
|
|
iface = _getInterface(context, instance_id)
|
|
wf = iface.getWorkflow(workflow_id)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflow_id} not found")
|
|
|
|
graph = wf.get("graph", {}) or {}
|
|
from modules.features.graphicalEditor.upstreamPathsService import compute_upstream_paths
|
|
|
|
paths = compute_upstream_paths(graph if isinstance(graph, dict) else {}, str(node_id))
|
|
return _ok(name, {"paths": paths})
|
|
except Exception as e:
|
|
logger.exception("listUpstreamPaths failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _bind_node_parameter(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Bind a node parameter to an upstream field via an explicit DataRef."""
|
|
name = "bindNodeParameter"
|
|
try:
|
|
workflow_id, instance_id = _resolveIds(params, context)
|
|
node_id = params.get("nodeId")
|
|
param_name = params.get("parameterName")
|
|
producer_node_id = params.get("producerNodeId")
|
|
path = params.get("path")
|
|
if not workflow_id or not instance_id or not node_id or not param_name:
|
|
return _err(name, "workflowId, instanceId, nodeId, and parameterName required")
|
|
if not producer_node_id:
|
|
return _err(name, "producerNodeId required")
|
|
|
|
iface = _getInterface(context, instance_id)
|
|
wf = iface.getWorkflow(workflow_id)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflow_id} not found")
|
|
|
|
graph = dict(wf.get("graph", {}) or {})
|
|
nodes = list(graph.get("nodes", []) or [])
|
|
found = False
|
|
ref: Dict[str, Any] = {
|
|
"type": "ref",
|
|
"nodeId": str(producer_node_id),
|
|
"path": list(path) if isinstance(path, (list, tuple)) else [],
|
|
}
|
|
exp_type = params.get("expectedType")
|
|
if exp_type:
|
|
ref["expectedType"] = str(exp_type)
|
|
|
|
for n in nodes:
|
|
if n.get("id") == node_id:
|
|
node_params = dict(n.get("parameters", {}) or {})
|
|
node_params[param_name] = ref
|
|
n["parameters"] = node_params
|
|
found = True
|
|
break
|
|
|
|
if not found:
|
|
return _err(name, f"Node {node_id} not found in graph")
|
|
|
|
graph["nodes"] = nodes
|
|
iface.updateWorkflow(workflow_id, {"graph": graph})
|
|
return _ok(name, {
|
|
"nodeId": node_id,
|
|
"parameter": param_name,
|
|
"dataRef": ref,
|
|
"message": f"Parameter '{param_name}' bound to upstream {producer_node_id}",
|
|
})
|
|
except Exception as e:
|
|
logger.exception("bindNodeParameter failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
def _coerceLabel(rawLabel: Any, fallback: str) -> str:
|
|
"""Normalize a node label which may be a string, dict {locale: str}, or other."""
|
|
if isinstance(rawLabel, str):
|
|
return rawLabel
|
|
if isinstance(rawLabel, dict):
|
|
for key in ("en", "de", "fr"):
|
|
value = rawLabel.get(key)
|
|
if isinstance(value, str) and value:
|
|
return value
|
|
for value in rawLabel.values():
|
|
if isinstance(value, str) and value:
|
|
return value
|
|
return fallback
|
|
|
|
|
|
def _summarizeNodeForCatalog(n: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Compact summary used in ``listAvailableNodeTypes`` — small but
|
|
informative enough that the model can pick the right type and knows
|
|
whether ``describeNodeType`` is worth a follow-up call."""
|
|
nodeId = n.get("id") or ""
|
|
paramsList = n.get("parameters") or []
|
|
requiredCount = sum(1 for p in paramsList if isinstance(p, dict) and p.get("required"))
|
|
return {
|
|
"id": nodeId,
|
|
"category": n.get("category"),
|
|
"label": _coerceLabel(n.get("label"), nodeId),
|
|
"description": _coerceLabel(n.get("description"), ""),
|
|
"paramCount": len(paramsList),
|
|
"requiredParamCount": requiredCount,
|
|
"usesAi": bool(((n.get("meta") or {}).get("usesAi"))),
|
|
}
|
|
|
|
|
|
def _summarizeParameter(p: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Reduce a node parameter spec to just what the AI needs to fill it."""
|
|
out: Dict[str, Any] = {
|
|
"name": p.get("name"),
|
|
"type": p.get("type"),
|
|
"required": bool(p.get("required")),
|
|
"frontendType": p.get("frontendType"),
|
|
"description": _coerceLabel(p.get("description"), ""),
|
|
}
|
|
if "default" in p:
|
|
out["default"] = p.get("default")
|
|
feOpts = p.get("frontendOptions")
|
|
if isinstance(feOpts, dict):
|
|
# Expose enum-style choices ("options") so the model sticks to allowed values.
|
|
if isinstance(feOpts.get("options"), list):
|
|
out["allowedValues"] = feOpts.get("options")
|
|
if p.get("frontendType") == "userConnection":
|
|
out["hint"] = (
|
|
"Call listConnections to discover available connections; pass the "
|
|
"connectionId here. Required before this node can run."
|
|
)
|
|
return out
|
|
|
|
|
|
async def _listAvailableNodeTypes(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""List all available node types for the flow builder (compact catalog).
|
|
|
|
Returns ``id``, ``category``, ``label``, short ``description``, and the
|
|
parameter counts. To learn HOW to fill a node's parameters use
|
|
``describeNodeType(nodeType=...)`` — that returns the full schema.
|
|
"""
|
|
name = "listAvailableNodeTypes"
|
|
try:
|
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
|
nodeTypes = []
|
|
for n in STATIC_NODE_TYPES:
|
|
if not isinstance(n, dict):
|
|
continue
|
|
nodeTypes.append(_summarizeNodeForCatalog(n))
|
|
return _ok(name, {"nodeTypes": nodeTypes, "count": len(nodeTypes)})
|
|
except Exception as e:
|
|
logger.exception("listAvailableNodeTypes failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _describeNodeType(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Return the full schema for a single node type so the AI can fill
|
|
``addNode.parameters`` correctly (which fields are required, what types,
|
|
default values, allowed enum values, what each port expects/produces).
|
|
|
|
This is the canonical way to discover required parameters before
|
|
calling ``addNode`` — without it the model guesses ``parameters={}``
|
|
and the user gets empty configuration cards.
|
|
"""
|
|
name = "describeNodeType"
|
|
try:
|
|
nodeType = params.get("nodeType") or params.get("id")
|
|
if not nodeType:
|
|
return _err(name, "nodeType required")
|
|
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
|
|
target: Dict[str, Any] = {}
|
|
for n in STATIC_NODE_TYPES:
|
|
if isinstance(n, dict) and n.get("id") == nodeType:
|
|
target = n
|
|
break
|
|
if not target:
|
|
return _err(name, f"Unknown nodeType '{nodeType}' — call listAvailableNodeTypes first")
|
|
|
|
rawParams = target.get("parameters") or []
|
|
parameters = [
|
|
_summarizeParameter(p) for p in rawParams if isinstance(p, dict)
|
|
]
|
|
|
|
def _portList(portsDict: Any) -> List[Dict[str, Any]]:
|
|
if not isinstance(portsDict, dict):
|
|
return []
|
|
out: List[Dict[str, Any]] = []
|
|
for idx, spec in sorted(portsDict.items(), key=lambda kv: int(kv[0]) if str(kv[0]).isdigit() else 0):
|
|
if not isinstance(spec, dict):
|
|
continue
|
|
entry: Dict[str, Any] = {"index": int(idx) if str(idx).isdigit() else idx}
|
|
if "schema" in spec:
|
|
entry["schema"] = spec.get("schema")
|
|
if "accepts" in spec:
|
|
entry["accepts"] = spec.get("accepts")
|
|
out.append(entry)
|
|
return out
|
|
|
|
meta = target.get("meta") or {}
|
|
return _ok(name, {
|
|
"id": target.get("id"),
|
|
"category": target.get("category"),
|
|
"label": _coerceLabel(target.get("label"), target.get("id") or ""),
|
|
"description": _coerceLabel(target.get("description"), ""),
|
|
"usesAi": bool(meta.get("usesAi")),
|
|
"inputs": int(target.get("inputs") or 0),
|
|
"outputs": int(target.get("outputs") or 0),
|
|
"inputPorts": _portList(target.get("inputPorts")),
|
|
"outputPorts": _portList(target.get("outputPorts")),
|
|
"parameters": parameters,
|
|
"requiredParameters": [p["name"] for p in parameters if p.get("required")],
|
|
})
|
|
except Exception as e:
|
|
logger.exception("describeNodeType failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
# Geometry constants — MUST match the frontend (FlowCanvas.tsx) so the
|
|
# server-side auto-layout produces the exact same coordinates the user
|
|
# would get by clicking "Arrange" in the UI.
|
|
_NODE_WIDTH = 200
|
|
_NODE_HEIGHT = 72
|
|
_LAYOUT_V_GAP = 80
|
|
_LAYOUT_H_GAP = 60
|
|
_LAYOUT_START_X = 40
|
|
_LAYOUT_START_Y = 40
|
|
|
|
|
|
def _computeAutoLayout(
|
|
nodes: List[Dict[str, Any]],
|
|
connections: List[Dict[str, Any]],
|
|
) -> List[Dict[str, Any]]:
|
|
"""Topological-layer layout — port of ``computeAutoLayout`` in FlowCanvas.tsx.
|
|
|
|
Arranges nodes top-to-bottom in layers (one layer per BFS step from the
|
|
sources). Disconnected nodes are appended as extra single-node layers,
|
|
same as the frontend. Returns a NEW node list with updated top-level
|
|
``x``/``y``; legacy ``position`` keys are stripped to avoid two
|
|
competing sources of truth.
|
|
"""
|
|
if not nodes:
|
|
return nodes
|
|
|
|
nodeIds = {n.get("id") for n in nodes if n.get("id")}
|
|
inDegree: Dict[str, int] = {nid: 0 for nid in nodeIds if nid}
|
|
children: Dict[str, List[str]] = {nid: [] for nid in nodeIds if nid}
|
|
|
|
for c in connections or []:
|
|
src = c.get("source")
|
|
tgt = c.get("target")
|
|
if src in inDegree and tgt in inDegree:
|
|
inDegree[tgt] = inDegree[tgt] + 1
|
|
children[src].append(tgt)
|
|
|
|
layers: List[List[str]] = []
|
|
layerOf: Dict[str, int] = {}
|
|
queue: List[str] = [nid for nid, deg in inDegree.items() if deg == 0]
|
|
|
|
while queue:
|
|
batch = list(queue)
|
|
queue = []
|
|
layerIdx = len(layers)
|
|
layers.append(batch)
|
|
for nid in batch:
|
|
layerOf[nid] = layerIdx
|
|
for childId in children.get(nid, []):
|
|
inDegree[childId] = inDegree[childId] - 1
|
|
if inDegree[childId] == 0:
|
|
queue.append(childId)
|
|
|
|
# Cycles: append remaining nodes as their own layers (matches frontend).
|
|
for n in nodes:
|
|
nid = n.get("id")
|
|
if nid and nid not in layerOf:
|
|
layerIdx = len(layers)
|
|
layers.append([nid])
|
|
layerOf[nid] = layerIdx
|
|
|
|
out: List[Dict[str, Any]] = []
|
|
for n in nodes:
|
|
nid = n.get("id")
|
|
layer = layerOf.get(nid, 0) if nid else 0
|
|
siblings = layers[layer] if 0 <= layer < len(layers) else [nid]
|
|
idxInLayer = siblings.index(nid) if nid in siblings else 0
|
|
new = dict(n)
|
|
new["x"] = _LAYOUT_START_X + idxInLayer * (_NODE_WIDTH + _LAYOUT_H_GAP)
|
|
new["y"] = _LAYOUT_START_Y + layer * (_NODE_HEIGHT + _LAYOUT_V_GAP)
|
|
# Strip legacy ``position`` so frontend never sees two coordinates.
|
|
new.pop("position", None)
|
|
out.append(new)
|
|
return out
|
|
|
|
|
|
async def _autoLayoutWorkflow(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Re-arrange all nodes of the workflow into a clean top-down layered layout.
|
|
|
|
Same algorithm as the editor's "Arrange" button — call this after you
|
|
finished adding/connecting nodes so the user doesn't see an unreadable
|
|
pile of overlapping boxes.
|
|
"""
|
|
name = "autoLayoutWorkflow"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required (and not present in agent context)")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
wf = iface.getWorkflow(workflowId)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
|
|
graph = dict(wf.get("graph", {}) or {})
|
|
nodes = list(graph.get("nodes", []) or [])
|
|
connections = list(graph.get("connections", []) or [])
|
|
if not nodes:
|
|
return _ok(name, {"message": "No nodes to layout", "nodeCount": 0})
|
|
|
|
graph["nodes"] = _computeAutoLayout(nodes, connections)
|
|
iface.updateWorkflow(workflowId, {"graph": graph})
|
|
|
|
return _ok(name, {
|
|
"message": f"Auto-layout applied to {len(nodes)} nodes",
|
|
"nodeCount": len(nodes),
|
|
"layerCount": max((c.get("y", 0) for c in graph["nodes"]), default=_LAYOUT_START_Y) // (_NODE_HEIGHT + _LAYOUT_V_GAP) + 1,
|
|
})
|
|
except Exception as e:
|
|
logger.exception("autoLayoutWorkflow failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _validateGraph(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Validate a workflow graph for common issues."""
|
|
name = "validateGraph"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
wf = iface.getWorkflow(workflowId)
|
|
if not wf:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
|
|
graph = wf.get("graph", {}) or {}
|
|
nodes = graph.get("nodes", []) or []
|
|
connections = graph.get("connections", []) or []
|
|
issues: List[str] = []
|
|
|
|
nodeIds = {n.get("id") for n in nodes}
|
|
if not nodes:
|
|
issues.append("Graph has no nodes")
|
|
|
|
hasTrigger = any((n.get("type") or "").startswith("trigger.") for n in nodes)
|
|
if not hasTrigger:
|
|
issues.append("No trigger node found")
|
|
|
|
for c in connections:
|
|
if c.get("source") not in nodeIds:
|
|
issues.append(f"Connection source '{c.get('source')}' not found")
|
|
if c.get("target") not in nodeIds:
|
|
issues.append(f"Connection target '{c.get('target')}' not found")
|
|
|
|
connectedNodes = set()
|
|
for c in connections:
|
|
connectedNodes.add(c.get("source"))
|
|
connectedNodes.add(c.get("target"))
|
|
orphans = [
|
|
n.get("id") for n in nodes
|
|
if n.get("id") not in connectedNodes and not (n.get("type") or "").startswith("trigger.")
|
|
]
|
|
if orphans:
|
|
issues.append(f"Orphan nodes (not connected): {', '.join(orphans)}")
|
|
|
|
return _ok(name, {
|
|
"valid": len(issues) == 0,
|
|
"issues": issues,
|
|
"nodeCount": len(nodes),
|
|
"connectionCount": len(connections),
|
|
})
|
|
except Exception as e:
|
|
logger.exception("validateGraph failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _listWorkflowHistory(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""List versions (history) for a workflow."""
|
|
name = "listWorkflowHistory"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required")
|
|
iface = _getInterface(context, instanceId)
|
|
versions = iface.getVersions(workflowId) or []
|
|
return _ok(name, {
|
|
"workflowId": workflowId,
|
|
"versions": [
|
|
{
|
|
"id": v.get("id"),
|
|
"versionNumber": v.get("versionNumber"),
|
|
"status": v.get("status"),
|
|
"publishedAt": v.get("publishedAt"),
|
|
"publishedBy": v.get("publishedBy"),
|
|
}
|
|
for v in versions
|
|
],
|
|
})
|
|
except Exception as e:
|
|
logger.exception("listWorkflowHistory failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _readWorkflowMessages(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Read recent run logs/messages for a workflow."""
|
|
name = "readWorkflowMessages"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required")
|
|
iface = _getInterface(context, instanceId)
|
|
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoRun
|
|
runs = iface.db.getRecordset(AutoRun, recordFilter={"workflowId": workflowId}) or []
|
|
runSummaries = []
|
|
for r in sorted(runs, key=lambda x: x.get("startedAt") or 0, reverse=True)[:10]:
|
|
runSummaries.append({
|
|
"runId": r.get("id"),
|
|
"status": r.get("status"),
|
|
"startedAt": r.get("startedAt"),
|
|
"completedAt": r.get("completedAt"),
|
|
"error": r.get("error"),
|
|
})
|
|
return _ok(name, {"workflowId": workflowId, "recentRuns": runSummaries})
|
|
except Exception as e:
|
|
logger.exception("readWorkflowMessages failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
# -------------------------------------------------------------------------
|
|
# Full-CRUD tools — create / load-from-file / export-to-file / delete
|
|
# (Phase 3 of 2026-04-pwg-pilot-mietzinsbestaetigung-workflow.md)
|
|
# -------------------------------------------------------------------------
|
|
|
|
|
|
async def _updateWorkflowMetadata(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Update workflow metadata (label / description / tags / active flag).
|
|
|
|
This is the **rename / re-tag / activate** tool. It does NOT touch the
|
|
graph or invocations. Use this whenever the user wants to:
|
|
- rename the workflow ("nenne den workflow um", "rename to X")
|
|
- change description
|
|
- add/remove tags
|
|
- toggle active
|
|
|
|
NEVER use ``removeNode`` / ``deleteWorkflow`` for a rename — they are
|
|
destructive and irreversible. If only ``label`` is provided, only the
|
|
label is changed.
|
|
"""
|
|
name = "updateWorkflowMetadata"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required")
|
|
patch: Dict[str, Any] = {}
|
|
for key in ("label", "description", "tags", "active"):
|
|
if key in params and params[key] is not None:
|
|
patch[key] = params[key]
|
|
if not patch:
|
|
return _err(name, "at least one of label/description/tags/active must be provided")
|
|
if "label" in patch:
|
|
label = (patch["label"] or "").strip()
|
|
if not label:
|
|
return _err(name, "label, if provided, must be a non-empty string")
|
|
patch["label"] = label
|
|
iface = _getInterface(context, instanceId)
|
|
updated = iface.updateWorkflow(workflowId, patch)
|
|
if updated is None:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
changedFields = sorted(patch.keys())
|
|
return _ok(name, {
|
|
"workflowId": updated.get("id"),
|
|
"label": updated.get("label"),
|
|
"active": updated.get("active"),
|
|
"changed": changedFields,
|
|
"message": f"Workflow metadata updated ({', '.join(changedFields)}).",
|
|
})
|
|
except Exception as e:
|
|
logger.exception("updateWorkflowMetadata failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _createWorkflow(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Create a new (empty) workflow in the current feature instance.
|
|
|
|
The newly created workflow is returned so subsequent ``addNode``/
|
|
``connectNodes`` calls can target it via ``workflowId`` (or via the
|
|
agent's auto-injected context once the editor switches to it).
|
|
"""
|
|
name = "createWorkflow"
|
|
try:
|
|
_, instanceId = _resolveIds(params, context)
|
|
if not instanceId:
|
|
return _err(name, "instanceId required (and not present in agent context)")
|
|
label = (params.get("label") or "").strip()
|
|
if not label:
|
|
return _err(name, "label required")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
graph = params.get("graph") or {"nodes": [], "connections": []}
|
|
invocations = params.get("invocations") or []
|
|
data = {
|
|
"label": label,
|
|
"description": params.get("description") or "",
|
|
"tags": params.get("tags") or [],
|
|
"graph": graph,
|
|
"invocations": invocations,
|
|
"active": False,
|
|
}
|
|
created = iface.createWorkflow(data)
|
|
return _ok(name, {
|
|
"workflowId": created.get("id"),
|
|
"label": created.get("label"),
|
|
"message": f"Workflow '{label}' created (active=false; activate via UI when ready).",
|
|
})
|
|
except Exception as e:
|
|
logger.exception("createWorkflow failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _createWorkflowFromFile(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Import a workflow from a UDB-uploaded ``.workflow.json`` envelope.
|
|
|
|
Accepts either ``fileId`` (preferred — re-uses uploaded file from the
|
|
Unified-Data-Bar) or ``envelope`` (inline dict, useful for tests). Always
|
|
creates a new workflow with ``active=false``.
|
|
"""
|
|
name = "createWorkflowFromFile"
|
|
try:
|
|
_, instanceId = _resolveIds(params, context)
|
|
if not instanceId:
|
|
return _err(name, "instanceId required")
|
|
fileId = params.get("fileId")
|
|
envelope = params.get("envelope")
|
|
if not fileId and not envelope:
|
|
return _err(name, "either fileId or envelope required")
|
|
|
|
if not envelope and fileId:
|
|
envelope = _loadEnvelopeFromUdb(fileId, context)
|
|
if envelope is None:
|
|
return _err(name, f"Could not read workflow file {fileId}")
|
|
|
|
iface = _getInterface(context, instanceId)
|
|
try:
|
|
result = iface.importWorkflowFromDict(envelope, existingWorkflowId=params.get("existingWorkflowId"))
|
|
except Exception as exc:
|
|
return _err(name, f"Import failed: {exc}")
|
|
wf = result.get("workflow") or {}
|
|
return _ok(name, {
|
|
"workflowId": wf.get("id"),
|
|
"label": wf.get("label"),
|
|
"created": result.get("created"),
|
|
"warnings": result.get("warnings") or [],
|
|
"message": f"Workflow '{wf.get('label')}' {'created' if result.get('created') else 'updated'} from file (active=false).",
|
|
})
|
|
except Exception as e:
|
|
logger.exception("createWorkflowFromFile failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _exportWorkflowToFile(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Export a workflow as a versioned envelope.
|
|
|
|
Returns the canonical envelope dict and a suggested filename so the
|
|
agent can offer the user a download link or re-upload to UDB.
|
|
"""
|
|
name = "exportWorkflowToFile"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required")
|
|
iface = _getInterface(context, instanceId)
|
|
envelope = iface.exportWorkflowToDict(workflowId)
|
|
if envelope is None:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
from modules.features.graphicalEditor._workflowFileSchema import buildFileName
|
|
return _ok(name, {
|
|
"fileName": buildFileName(envelope.get("label", "workflow")),
|
|
"envelope": envelope,
|
|
"schemaVersion": envelope.get("$schemaVersion"),
|
|
})
|
|
except Exception as e:
|
|
logger.exception("exportWorkflowToFile failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
async def _deleteWorkflow(params: Dict[str, Any], context: Any) -> ToolResult:
|
|
"""Delete a workflow. Requires explicit ``confirm=true`` to avoid
|
|
accidental deletion by an over-eager agent."""
|
|
name = "deleteWorkflow"
|
|
try:
|
|
workflowId, instanceId = _resolveIds(params, context)
|
|
if not workflowId or not instanceId:
|
|
return _err(name, "workflowId and instanceId required")
|
|
if not params.get("confirm"):
|
|
return _err(name, "confirm=true required (deletion is permanent)")
|
|
iface = _getInterface(context, instanceId)
|
|
ok = iface.deleteWorkflow(workflowId)
|
|
if not ok:
|
|
return _err(name, f"Workflow {workflowId} not found")
|
|
return _ok(name, {"workflowId": workflowId, "message": "Workflow deleted."})
|
|
except Exception as e:
|
|
logger.exception("deleteWorkflow failed: %s", e)
|
|
return _err(name, str(e))
|
|
|
|
|
|
def _loadEnvelopeFromUdb(fileId: str, context: Any):
|
|
"""Load and JSON-parse a workflow file from the Unified-Data-Bar.
|
|
|
|
Returns ``None`` if the file cannot be read or is not valid JSON — the
|
|
caller turns that into a tool error message.
|
|
"""
|
|
import json
|
|
try:
|
|
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
|
user = _resolveUser(context)
|
|
mandateId = _resolveMandateId(context)
|
|
mgmt = interfaceDbManagement.getInterface(user, mandateId)
|
|
rawBytes = mgmt.getFileData(fileId)
|
|
except Exception as exc:
|
|
logger.warning("workflowTools: cannot read UDB file %s: %s", fileId, exc)
|
|
return None
|
|
if not rawBytes:
|
|
return None
|
|
try:
|
|
text = rawBytes.decode("utf-8") if isinstance(rawBytes, bytes) else str(rawBytes)
|
|
return json.loads(text)
|
|
except Exception as exc:
|
|
logger.warning("workflowTools: file %s is not valid JSON: %s", fileId, exc)
|
|
return None
|
|
|
|
|
|
def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
|
|
"""Return tool definitions for registration in the ToolRegistry.
|
|
|
|
Note: ``workflowId`` and ``instanceId`` are NOT marked ``required`` —
|
|
they are auto-injected from the agent context by ``_resolveIds``. The
|
|
model may still pass them explicitly (e.g. to target a different
|
|
workflow) but doesn't have to repeat them on every call.
|
|
"""
|
|
_idFields = {
|
|
"workflowId": {"type": "string", "description": "Workflow ID (defaults to the current editor workflow)"},
|
|
"instanceId": {"type": "string", "description": "Feature instance ID (defaults to the current editor instance)"},
|
|
}
|
|
return [
|
|
{
|
|
"name": "readWorkflowGraph",
|
|
"handler": _readWorkflowGraph,
|
|
"description": "Read the current workflow graph (nodes and connections). Always call this first to understand the current state before making changes.",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {**_idFields},
|
|
"required": [],
|
|
},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "addNode",
|
|
"handler": _addNode,
|
|
"description": "Add a node to the workflow graph.",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"nodeType": {"type": "string", "description": "Node type id (e.g. ai.chat, email.send) — use listAvailableNodeTypes to discover"},
|
|
"title": {"type": "string", "description": "Human-readable title"},
|
|
"parameters": {"type": "object", "description": "Node parameters"},
|
|
"position": {"type": "object", "description": "Canvas position {x, y}"},
|
|
"nodeId": {"type": "string", "description": "Optional explicit node id"},
|
|
},
|
|
"required": ["nodeType"],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "removeNode",
|
|
"handler": _removeNode,
|
|
"description": (
|
|
"Remove a SINGLE node and its connections from the graph. "
|
|
"DESTRUCTIVE — only call when the user explicitly asks to "
|
|
"delete that specific node. NEVER use this to 'rename' or "
|
|
"'rebuild' a workflow — for renaming use updateWorkflowMetadata; "
|
|
"for replacing the whole graph use createWorkflowFromFile with "
|
|
"existingWorkflowId. NEVER batch-remove all nodes."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"nodeId": {"type": "string", "description": "ID of the node to remove"},
|
|
},
|
|
"required": ["nodeId"],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "connectNodes",
|
|
"handler": _connectNodes,
|
|
"description": "Connect two nodes in the graph (source -> target).",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"sourceId": {"type": "string"},
|
|
"targetId": {"type": "string"},
|
|
"sourceOutput": {"type": "integer", "default": 0},
|
|
"targetInput": {"type": "integer", "default": 0},
|
|
},
|
|
"required": ["sourceId", "targetId"],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "setNodeParameter",
|
|
"handler": _setNodeParameter,
|
|
"description": "Set a single parameter on a node.",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"nodeId": {"type": "string"},
|
|
"parameterName": {"type": "string"},
|
|
"parameterValue": {"description": "Value to set (any type)"},
|
|
},
|
|
"required": ["nodeId", "parameterName", "parameterValue"],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "listUpstreamPaths",
|
|
"handler": _list_upstream_paths,
|
|
"description": (
|
|
"List pickable upstream paths for binding DataRefs on a node. "
|
|
"Call after readWorkflowGraph; use with bindNodeParameter instead of relying on implicit wiring."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"nodeId": {"type": "string", "description": "Target node id (the node whose parameters you will bind)"},
|
|
},
|
|
"required": ["nodeId"],
|
|
},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "bindNodeParameter",
|
|
"handler": _bind_node_parameter,
|
|
"description": (
|
|
"Bind a parameter to an upstream output using an explicit DataRef "
|
|
"(producerNodeId + path). Prefer listUpstreamPaths to discover valid paths."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"nodeId": {"type": "string"},
|
|
"parameterName": {"type": "string"},
|
|
"producerNodeId": {"type": "string", "description": "Upstream node id (port 0 producer)"},
|
|
"path": {"type": "array", "items": {}, "description": "JSON path segments, e.g. [\"documents\"] or [\"id\"]"},
|
|
"expectedType": {"type": "string", "description": "Optional type hint stored on the ref"},
|
|
},
|
|
"required": ["nodeId", "parameterName", "producerNodeId"],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "listAvailableNodeTypes",
|
|
"handler": _listAvailableNodeTypes,
|
|
"description": (
|
|
"List all available node types (compact catalog: id, label, "
|
|
"description, paramCount, requiredParamCount, usesAi). Call this "
|
|
"once to discover ids; then call describeNodeType for each type "
|
|
"you intend to add to learn the parameter schema."
|
|
),
|
|
"parameters": {"type": "object", "properties": {}},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "describeNodeType",
|
|
"handler": _describeNodeType,
|
|
"description": (
|
|
"Return the FULL parameter schema for a single node type "
|
|
"(name, type, required, default, allowedValues, description) "
|
|
"plus input/output ports. ALWAYS call this before addNode for "
|
|
"any node type that has requiredParamCount > 0, and pass all "
|
|
"required parameters into addNode — otherwise the user sees an "
|
|
"empty configuration card. For parameters with "
|
|
"frontendType='userConnection' call listConnections to obtain "
|
|
"a connectionId."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"nodeType": {"type": "string", "description": "Node type id from listAvailableNodeTypes (e.g. 'email.checkEmail', 'ai.prompt')"},
|
|
},
|
|
"required": ["nodeType"],
|
|
},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "autoLayoutWorkflow",
|
|
"handler": _autoLayoutWorkflow,
|
|
"description": (
|
|
"Re-arrange ALL nodes into a clean top-down layered layout "
|
|
"(same algorithm as the editor's 'Arrange' button). Call this "
|
|
"AFTER you finished adding nodes and connections, otherwise the "
|
|
"user sees a pile of overlapping boxes. Idempotent — safe to "
|
|
"call multiple times."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {**_idFields},
|
|
"required": [],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "validateGraph",
|
|
"handler": _validateGraph,
|
|
"description": "Validate a workflow graph for common issues (missing trigger, dangling connections, orphans).",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {**_idFields},
|
|
"required": [],
|
|
},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "listWorkflowHistory",
|
|
"handler": _listWorkflowHistory,
|
|
"description": "List version history for a workflow (AutoVersion entries).",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {**_idFields},
|
|
"required": [],
|
|
},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "readWorkflowMessages",
|
|
"handler": _readWorkflowMessages,
|
|
"description": "Read recent run logs and status for a workflow.",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {**_idFields},
|
|
"required": [],
|
|
},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "updateWorkflowMetadata",
|
|
"handler": _updateWorkflowMetadata,
|
|
"description": (
|
|
"Rename / re-tag / (de)activate an EXISTING workflow. This is "
|
|
"the ONLY correct way to rename a workflow — DO NOT delete and "
|
|
"recreate, and NEVER call removeNode for a rename. Provide any "
|
|
"subset of label/description/tags/active; omitted fields stay "
|
|
"unchanged. Graph and invocations are not affected."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"label": {"type": "string", "description": "New workflow label (rename target)"},
|
|
"description": {"type": "string", "description": "New description"},
|
|
"tags": {"type": "array", "items": {"type": "string"}, "description": "New tag list (replaces existing)"},
|
|
"active": {"type": "boolean", "description": "Activate (true) or deactivate (false) the workflow"},
|
|
},
|
|
"required": [],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "createWorkflow",
|
|
"handler": _createWorkflow,
|
|
"description": (
|
|
"Create a NEW empty workflow in the current feature instance. "
|
|
"The workflow is created with active=false; the user activates "
|
|
"it from the editor. Use this when the user wants to start "
|
|
"building a new automation from scratch."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"instanceId": _idFields["instanceId"],
|
|
"label": {"type": "string", "description": "Workflow label (required, shown in the editor list)"},
|
|
"description": {"type": "string", "description": "Optional description"},
|
|
"tags": {"type": "array", "items": {"type": "string"}, "description": "Optional tags"},
|
|
"graph": {"type": "object", "description": "Optional initial graph {nodes, connections} (defaults to empty)"},
|
|
"invocations": {"type": "array", "description": "Optional invocation triggers"},
|
|
},
|
|
"required": ["label"],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "createWorkflowFromFile",
|
|
"handler": _createWorkflowFromFile,
|
|
"description": (
|
|
"Import a workflow from a previously uploaded .workflow.json "
|
|
"envelope (Unified-Data-Bar). Pass the UDB ``fileId``; the file "
|
|
"is parsed, validated against the envelopeVersioned schema and "
|
|
"saved as a new workflow with active=false. Use ``existingWorkflowId`` "
|
|
"to overwrite an existing workflow instead of creating a new one."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"instanceId": _idFields["instanceId"],
|
|
"fileId": {"type": "string", "description": "FileItem.id of the uploaded .workflow.json (preferred)"},
|
|
"envelope": {"type": "object", "description": "Inline envelope dict (alternative to fileId, mainly for tests)"},
|
|
"existingWorkflowId": {"type": "string", "description": "Optional — overwrite this workflow instead of creating a new one"},
|
|
},
|
|
"required": [],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "exportWorkflowToFile",
|
|
"handler": _exportWorkflowToFile,
|
|
"description": (
|
|
"Export the current workflow as a portable envelopeVersioned "
|
|
"JSON dict and a suggested filename. The agent can then offer "
|
|
"the user a download or re-upload to UDB. Persistence-bound "
|
|
"fields (timestamps, mandate ids) are stripped automatically."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {**_idFields},
|
|
"required": [],
|
|
},
|
|
"readOnly": True,
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
{
|
|
"name": "deleteWorkflow",
|
|
"handler": _deleteWorkflow,
|
|
"description": (
|
|
"Permanently delete a workflow. Requires ``confirm=true`` to "
|
|
"execute — this is a destructive operation. Always confirm "
|
|
"with the user in chat BEFORE calling this tool with confirm=true."
|
|
),
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
**_idFields,
|
|
"confirm": {"type": "boolean", "description": "Must be true to actually delete"},
|
|
},
|
|
"required": ["confirm"],
|
|
},
|
|
"toolSet": TOOLBOX_ID,
|
|
},
|
|
]
|