fixes ai agents parameter flow
This commit is contained in:
parent
d9fcea54ff
commit
fb3a1f0a51
8 changed files with 652 additions and 13 deletions
|
|
@ -21,6 +21,47 @@ logger = logging.getLogger(__name__)
|
|||
# No mapping needed - table name = Pydantic model name exactly
|
||||
|
||||
|
||||
class DatabaseQueryError(RuntimeError):
|
||||
"""Raised by DB read methods when the underlying SQL query failed.
|
||||
|
||||
Empty result sets do NOT raise this — they return ``[]`` / ``None`` /
|
||||
``{"items": [], "totalItems": 0, "totalPages": 0}`` as before. This
|
||||
exception is reserved for **real** failures: psycopg2 ProgrammingError,
|
||||
DataError, OperationalError, IntegrityError, plus any unexpected
|
||||
Python error raised inside a query path.
|
||||
|
||||
Read methods used to silently swallow such errors and return empty
|
||||
collections, which made every caller incapable of distinguishing
|
||||
"no rows" from "broken query / type adapter / dropped column / lost
|
||||
connection". That hid concrete bugs (e.g. dict passed where Postgres
|
||||
expected a UUID string) behind misleading downstream "no record found"
|
||||
errors.
|
||||
"""
|
||||
|
||||
def __init__(self, table: str, message: str, original: BaseException = None):
|
||||
super().__init__(f"{table}: {message}")
|
||||
self.table = table
|
||||
self.original = original
|
||||
|
||||
|
||||
def _rollbackQuietly(connection) -> None:
|
||||
"""Restore the connection state after a failed query.
|
||||
|
||||
Postgres puts the connection in an error state after any failed
|
||||
statement; subsequent queries on the same connection raise
|
||||
``InFailedSqlTransaction`` until we rollback. We swallow rollback
|
||||
errors because the original query error is what the caller should
|
||||
see — a secondary rollback failure typically means the connection
|
||||
is gone and will be reopened on the next ``_ensure_connection``.
|
||||
"""
|
||||
if connection is None:
|
||||
return
|
||||
try:
|
||||
connection.rollback()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class SystemTable(PowerOnModel):
|
||||
"""Data model for system table entries"""
|
||||
|
||||
|
|
@ -762,7 +803,8 @@ class DatabaseConnector:
|
|||
return record
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading record {recordId} from table {table}: {e}")
|
||||
return None
|
||||
_rollbackQuietly(getattr(self, "connection", None))
|
||||
raise DatabaseQueryError(table, str(e), original=e) from e
|
||||
|
||||
def getRecord(self, model_class: type, recordId: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load one row by primary key (routes / services; wraps _loadRecord)."""
|
||||
|
|
@ -848,7 +890,8 @@ class DatabaseConnector:
|
|||
return records
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading table {table}: {e}")
|
||||
return []
|
||||
_rollbackQuietly(getattr(self, "connection", None))
|
||||
raise DatabaseQueryError(table, str(e), original=e) from e
|
||||
|
||||
def _registerInitialId(self, table: str, initialId: str) -> bool:
|
||||
"""Registers the initial ID for a table."""
|
||||
|
|
@ -1047,7 +1090,8 @@ class DatabaseConnector:
|
|||
return records
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading records from table {table}: {e}")
|
||||
return []
|
||||
_rollbackQuietly(getattr(self, "connection", None))
|
||||
raise DatabaseQueryError(table, str(e), original=e) from e
|
||||
|
||||
def _buildPaginationClauses(
|
||||
self,
|
||||
|
|
@ -1270,7 +1314,8 @@ class DatabaseConnector:
|
|||
return {"items": records, "totalItems": totalItems, "totalPages": totalPages}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in getRecordsetPaginated for table {table}: {e}")
|
||||
return {"items": [], "totalItems": 0, "totalPages": 0}
|
||||
_rollbackQuietly(getattr(self, "connection", None))
|
||||
raise DatabaseQueryError(table, str(e), original=e) from e
|
||||
|
||||
def getDistinctColumnValues(
|
||||
self,
|
||||
|
|
@ -1332,7 +1377,8 @@ class DatabaseConnector:
|
|||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Error in getDistinctColumnValues for {table}.{column}: {e}")
|
||||
return []
|
||||
_rollbackQuietly(getattr(self, "connection", None))
|
||||
raise DatabaseQueryError(table, str(e), original=e) from e
|
||||
|
||||
def recordCreate(
|
||||
self, model_class: type, record: Union[Dict[str, Any], BaseModel]
|
||||
|
|
@ -1710,7 +1756,8 @@ class DatabaseConnector:
|
|||
return records
|
||||
except Exception as e:
|
||||
logger.error(f"Error in semantic search on {table}: {e}")
|
||||
return []
|
||||
_rollbackQuietly(getattr(self, "connection", None))
|
||||
raise DatabaseQueryError(table, str(e), original=e) from e
|
||||
|
||||
def close(self, forceClose: bool = False):
|
||||
"""Close the database connection.
|
||||
|
|
|
|||
|
|
@ -187,7 +187,15 @@ def _catalogTypeToJsonSchema(typeStr: str, _depth: int = 0) -> Dict[str, Any]:
|
|||
|
||||
|
||||
def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
|
||||
"""Create an async handler that dispatches to the ActionExecutor."""
|
||||
"""Create an async handler that dispatches to the ActionExecutor.
|
||||
|
||||
Parameter validation and Ref-payload normalization (collapsing
|
||||
``{id: ..., featureCode: ...}`` from the agent's typed tool schema to the
|
||||
bare UUID expected by action implementations) happen centrally inside
|
||||
``ActionExecutor.executeAction`` via ``parameterValidation``. This keeps
|
||||
a single source of truth for the action parameter contract regardless
|
||||
of caller (agent, workflow graph, REST route).
|
||||
"""
|
||||
async def _handler(args: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
|
||||
try:
|
||||
if context:
|
||||
|
|
|
|||
|
|
@ -9,6 +9,9 @@ from modules.datamodels.datamodelChat import ActionResult, ActionItem, TaskStep
|
|||
from modules.datamodels.datamodelChat import ChatWorkflow
|
||||
from modules.workflows.processing.shared.methodDiscovery import methods
|
||||
from modules.workflows.processing.shared.stateTools import checkWorkflowStopped
|
||||
from modules.workflows.processing.shared.parameterValidation import (
|
||||
InvalidActionParameterError, validateAndCoerceParameters,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -20,7 +23,13 @@ class ActionExecutor:
|
|||
|
||||
|
||||
async def executeAction(self, methodName: str, actionName: str, parameters: Dict[str, Any]) -> ActionResult:
|
||||
"""Execute a method action"""
|
||||
"""Execute a method action with validated/coerced parameters.
|
||||
|
||||
Parameter validation is centralised here so the contract holds for
|
||||
every execution path (agent tool calls, workflow graph nodes,
|
||||
REST routes) — actions can rely on declared types without
|
||||
defensive isinstance branches.
|
||||
"""
|
||||
try:
|
||||
if methodName not in methods:
|
||||
raise ValueError(f"Unknown method: {methodName}")
|
||||
|
|
@ -31,9 +40,15 @@ class ActionExecutor:
|
|||
|
||||
action = method['actions'][actionName]
|
||||
|
||||
# Execute the action
|
||||
actionDef = method['instance']._actions.get(actionName)
|
||||
if actionDef is not None:
|
||||
parameters = validateAndCoerceParameters(actionDef, parameters or {})
|
||||
|
||||
return await action['method'](parameters)
|
||||
|
||||
except InvalidActionParameterError as e:
|
||||
logger.error(f"Invalid parameters for {methodName}.{actionName}: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing method {methodName}.{actionName}: {str(e)}")
|
||||
raise
|
||||
|
|
|
|||
198
modules/workflows/processing/shared/parameterValidation.py
Normal file
198
modules/workflows/processing/shared/parameterValidation.py
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
# Copyright (c) 2026 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Universal parameter validation + coercion for workflow actions.
|
||||
|
||||
Workflow actions historically received their ``parameters`` as a raw
|
||||
``Dict[str, Any]`` with no enforcement of the declared parameter schema.
|
||||
That implicit contract masked two whole classes of bugs:
|
||||
|
||||
1. **Type confusion at the agent boundary.** The agent's tool schema
|
||||
(Phase-3 Typed Action Architecture) exposes ``FeatureInstanceRef`` /
|
||||
``ConnectionRef`` etc. as typed *objects* with ``id`` plus a
|
||||
discriminator (``featureCode`` / ``authority``) so the LLM can pick
|
||||
the right instance among several. The action implementations, however,
|
||||
use the value as a bare UUID string in ``recordFilter={"col": <value>}``.
|
||||
Without normalization Postgres fails with "can't adapt type 'dict'",
|
||||
the connector's previous swallow-and-return-[] hid the failure, and the
|
||||
action returned the misleading "no record found" error.
|
||||
|
||||
2. **Unchecked optional flags.** ``forceRefresh`` arriving as the string
|
||||
``"true"`` instead of a real bool, ``periodMonth`` arriving as ``"12"``
|
||||
instead of ``12``, etc. Every action grew its own ad-hoc coercion code.
|
||||
|
||||
This module centralises validation and coercion at exactly one boundary:
|
||||
``ActionExecutor.executeAction``. By the time the action body runs, the
|
||||
``parameters`` dict is guaranteed to satisfy the declared schema.
|
||||
|
||||
Unknown extra keys (e.g. ``parentOperationId`` injected by the executor,
|
||||
``expectedDocumentFormats`` from action items) are passed through
|
||||
untouched — the schema only constrains *declared* parameters.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InvalidActionParameterError(ValueError):
|
||||
"""Raised when a declared action parameter is missing, malformed, or
|
||||
cannot be coerced into the declared type.
|
||||
|
||||
The message identifies the action and parameter so the agent and
|
||||
workflow log can pinpoint the offending call instead of getting an
|
||||
opaque downstream "no record found" or "can't adapt type 'X'".
|
||||
"""
|
||||
|
||||
def __init__(self, actionId: str, paramName: str, reason: str):
|
||||
super().__init__(f"{actionId}.{paramName}: {reason}")
|
||||
self.actionId = actionId
|
||||
self.paramName = paramName
|
||||
self.reason = reason
|
||||
|
||||
|
||||
_TRUE_STRINGS = {"true", "1", "yes", "on"}
|
||||
_FALSE_STRINGS = {"false", "0", "no", "off", ""}
|
||||
|
||||
|
||||
def _isRefSchema(typeStr: str) -> bool:
|
||||
"""A declared type is a Ref-Schema iff its name ends with ``Ref`` AND it
|
||||
resolves to a PORT_TYPE_CATALOG schema with an ``id`` field.
|
||||
|
||||
The catalog is imported lazily to keep this module light at startup.
|
||||
"""
|
||||
if not typeStr or not typeStr.endswith("Ref"):
|
||||
return False
|
||||
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
|
||||
schema = PORT_TYPE_CATALOG.get(typeStr)
|
||||
if schema is None:
|
||||
return False
|
||||
return any(f.name == "id" for f in schema.fields)
|
||||
|
||||
|
||||
def _coerceRef(actionId: str, paramName: str, value: Any) -> Optional[str]:
|
||||
"""Collapse a Ref payload to its ``id`` string.
|
||||
|
||||
Accepts:
|
||||
* already a string → returned as-is (workflow execution path),
|
||||
* dict with non-empty ``id`` field → returns the id (agent path),
|
||||
* ``None`` → returned as-is so optional Ref params stay optional.
|
||||
"""
|
||||
if value is None or isinstance(value, str):
|
||||
return value
|
||||
if isinstance(value, dict):
|
||||
refId = value.get("id")
|
||||
if isinstance(refId, str) and refId:
|
||||
return refId
|
||||
raise InvalidActionParameterError(
|
||||
actionId, paramName,
|
||||
f"Ref payload missing or empty 'id' field: {value!r}",
|
||||
)
|
||||
raise InvalidActionParameterError(
|
||||
actionId, paramName,
|
||||
f"Ref must be a string id or {{'id': ...}} dict, got {type(value).__name__}",
|
||||
)
|
||||
|
||||
|
||||
def _coercePrimitive(actionId: str, paramName: str, value: Any, typeStr: str) -> Any:
|
||||
"""Best-effort coercion of primitive types from string-form payloads.
|
||||
|
||||
The agent's JSON tool calls deliver everything as strings/numbers; the
|
||||
workflow executor passes through raw template values which are also
|
||||
often strings. Coercing here removes ad-hoc ``isinstance(x, str)``
|
||||
branches inside every action.
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
if typeStr == "bool":
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
lower = value.strip().lower()
|
||||
if lower in _TRUE_STRINGS:
|
||||
return True
|
||||
if lower in _FALSE_STRINGS:
|
||||
return False
|
||||
if isinstance(value, (int, float)):
|
||||
return bool(value)
|
||||
raise InvalidActionParameterError(
|
||||
actionId, paramName, f"cannot coerce {value!r} to bool",
|
||||
)
|
||||
if typeStr == "int":
|
||||
if isinstance(value, bool):
|
||||
return int(value)
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
if isinstance(value, str) and value.strip():
|
||||
try:
|
||||
return int(value.strip(), 10)
|
||||
except ValueError:
|
||||
pass
|
||||
if isinstance(value, float) and value.is_integer():
|
||||
return int(value)
|
||||
raise InvalidActionParameterError(
|
||||
actionId, paramName, f"cannot coerce {value!r} to int",
|
||||
)
|
||||
if typeStr == "float":
|
||||
if isinstance(value, (int, float)):
|
||||
return float(value)
|
||||
if isinstance(value, str) and value.strip():
|
||||
try:
|
||||
return float(value.strip())
|
||||
except ValueError:
|
||||
pass
|
||||
raise InvalidActionParameterError(
|
||||
actionId, paramName, f"cannot coerce {value!r} to float",
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
def validateAndCoerceParameters(actionDef, parameters: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate and coerce ``parameters`` against ``actionDef.parameters``.
|
||||
|
||||
Behaviour per declared parameter:
|
||||
|
||||
* **Missing + required** → raises ``InvalidActionParameterError``.
|
||||
* **Missing + optional** → left absent (action uses its own default).
|
||||
* **Present + Ref-Schema (e.g. FeatureInstanceRef)** → ``{id: ..., ...}``
|
||||
collapsed to the bare id string; pass-through if already a string.
|
||||
* **Present + primitive (bool/int/float)** → coerced from common
|
||||
string forms (e.g. ``"true"`` → ``True``).
|
||||
* **Present + other types** (catalog objects, ``str``, ``Any``,
|
||||
containers) → passed through untouched.
|
||||
|
||||
Unknown keys (e.g. ``parentOperationId``, ``expectedDocumentFormats``,
|
||||
ad-hoc fields injected by the executor) are passed through unchanged.
|
||||
|
||||
Returns a new dict (does not mutate the caller's parameters).
|
||||
"""
|
||||
if not parameters:
|
||||
parameters = {}
|
||||
actionId = getattr(actionDef, "actionId", None) or "<unknown.action>"
|
||||
declared = getattr(actionDef, "parameters", {}) or {}
|
||||
|
||||
coerced: Dict[str, Any] = dict(parameters)
|
||||
|
||||
for paramName, paramSchema in declared.items():
|
||||
typeStr = getattr(paramSchema, "type", None) or "Any"
|
||||
required = bool(getattr(paramSchema, "required", False))
|
||||
|
||||
if paramName not in coerced or coerced[paramName] is None:
|
||||
if required:
|
||||
raise InvalidActionParameterError(
|
||||
actionId, paramName, "required parameter missing",
|
||||
)
|
||||
continue
|
||||
|
||||
rawValue = coerced[paramName]
|
||||
|
||||
if _isRefSchema(typeStr):
|
||||
coerced[paramName] = _coerceRef(actionId, paramName, rawValue)
|
||||
continue
|
||||
|
||||
if typeStr in ("bool", "int", "float"):
|
||||
coerced[paramName] = _coercePrimitive(actionId, paramName, rawValue, typeStr)
|
||||
continue
|
||||
|
||||
return coerced
|
||||
0
tests/unit/connectors/__init__.py
Normal file
0
tests/unit/connectors/__init__.py
Normal file
158
tests/unit/connectors/test_connectorDbPostgre_failLoud.py
Normal file
158
tests/unit/connectors/test_connectorDbPostgre_failLoud.py
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
# Copyright (c) 2026 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Unit tests: PostgreSQL connector raises DatabaseQueryError on real failures.
|
||||
|
||||
Historical regression: ``getRecordset`` and friends used to swallow every
|
||||
exception (``except Exception: log; return []``), which turned every kind of
|
||||
broken query into "no rows found". That hid bugs like:
|
||||
|
||||
* dict passed where Postgres expected a UUID string ("can't adapt type 'dict'"),
|
||||
* missing/renamed columns after an incomplete schema migration,
|
||||
* dropped tables, lost connections, etc.
|
||||
|
||||
These tests pin the new contract: empty result sets still return ``[]`` /
|
||||
``None`` (normal), but any exception inside the query path propagates as
|
||||
``DatabaseQueryError`` with the table name attached. The transaction is
|
||||
rolled back so the connection is usable for subsequent queries.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
import psycopg2.errors
|
||||
|
||||
from modules.connectors.connectorDbPostgre import (
|
||||
DatabaseConnector,
|
||||
DatabaseQueryError,
|
||||
_rollbackQuietly,
|
||||
)
|
||||
|
||||
|
||||
class DummyTable:
|
||||
"""Stand-in for a Pydantic model so we can drive the connector without a real DB.
|
||||
|
||||
The connector reads ``model_class.__name__`` to derive the SQL table name,
|
||||
so the class name itself becomes the asserted table name in tests.
|
||||
"""
|
||||
model_fields = {}
|
||||
|
||||
|
||||
def _makeConnector(cursorBehavior):
|
||||
"""Build a ``DatabaseConnector`` skeleton with mocked connection/cursor.
|
||||
|
||||
``cursorBehavior`` is a callable invoked with the cursor mock so the test
|
||||
can configure ``execute``/``fetchall``/``fetchone`` per scenario.
|
||||
"""
|
||||
connector = DatabaseConnector.__new__(DatabaseConnector)
|
||||
cursor = MagicMock()
|
||||
cursorContext = MagicMock()
|
||||
cursorContext.__enter__ = MagicMock(return_value=cursor)
|
||||
cursorContext.__exit__ = MagicMock(return_value=False)
|
||||
|
||||
connection = MagicMock()
|
||||
connection.cursor.return_value = cursorContext
|
||||
connector.connection = connection
|
||||
|
||||
connector._ensureTableExists = MagicMock(return_value=True)
|
||||
connector._systemTableName = "_system"
|
||||
|
||||
cursorBehavior(cursor)
|
||||
return connector, connection, cursor
|
||||
|
||||
|
||||
class TestGetRecordsetFailLoud:
|
||||
def test_emptyResultStillReturnsList(self):
|
||||
"""No rows → []; this is the normal happy path, not a failure."""
|
||||
def behavior(cursor):
|
||||
cursor.execute.return_value = None
|
||||
cursor.fetchall.return_value = []
|
||||
connector, connection, _ = _makeConnector(behavior)
|
||||
|
||||
result = connector.getRecordset(DummyTable)
|
||||
assert result == []
|
||||
connection.rollback.assert_not_called()
|
||||
|
||||
def test_dictAdaptErrorRaisesDatabaseQueryError(self):
|
||||
"""Reproduces the Trustee bug: passing a dict in WHERE → can't adapt → raise."""
|
||||
def behavior(cursor):
|
||||
cursor.execute.side_effect = psycopg2.ProgrammingError(
|
||||
"can't adapt type 'dict'"
|
||||
)
|
||||
connector, connection, _ = _makeConnector(behavior)
|
||||
|
||||
with pytest.raises(DatabaseQueryError) as excinfo:
|
||||
connector.getRecordset(
|
||||
DummyTable,
|
||||
recordFilter={"featureInstanceId": {"id": "uuid", "featureCode": "trustee"}},
|
||||
)
|
||||
|
||||
assert excinfo.value.table == "DummyTable"
|
||||
assert "can't adapt type 'dict'" in str(excinfo.value)
|
||||
assert isinstance(excinfo.value.original, psycopg2.ProgrammingError)
|
||||
connection.rollback.assert_called_once()
|
||||
|
||||
def test_missingColumnRaisesDatabaseQueryError(self):
|
||||
def behavior(cursor):
|
||||
cursor.execute.side_effect = psycopg2.errors.UndefinedColumn(
|
||||
'column "wat" does not exist'
|
||||
)
|
||||
connector, connection, _ = _makeConnector(behavior)
|
||||
|
||||
with pytest.raises(DatabaseQueryError) as excinfo:
|
||||
connector.getRecordset(DummyTable, recordFilter={"wat": "x"})
|
||||
|
||||
assert "wat" in str(excinfo.value)
|
||||
connection.rollback.assert_called_once()
|
||||
|
||||
def test_operationalErrorRaisesDatabaseQueryError(self):
|
||||
"""Connection lost mid-query is also a real failure that must propagate."""
|
||||
def behavior(cursor):
|
||||
cursor.execute.side_effect = psycopg2.OperationalError("connection lost")
|
||||
connector, connection, _ = _makeConnector(behavior)
|
||||
|
||||
with pytest.raises(DatabaseQueryError):
|
||||
connector.getRecordset(DummyTable)
|
||||
connection.rollback.assert_called_once()
|
||||
|
||||
|
||||
class TestGetRecordFailLoud:
|
||||
def test_recordNotFoundReturnsNone(self):
|
||||
"""`fetchone()` returning None is "row missing", not an error."""
|
||||
def behavior(cursor):
|
||||
cursor.execute.return_value = None
|
||||
cursor.fetchone.return_value = None
|
||||
connector, connection, _ = _makeConnector(behavior)
|
||||
|
||||
result = connector.getRecord(DummyTable, "missing-id")
|
||||
assert result is None
|
||||
connection.rollback.assert_not_called()
|
||||
|
||||
def test_queryErrorRaisesDatabaseQueryError(self):
|
||||
def behavior(cursor):
|
||||
cursor.execute.side_effect = psycopg2.errors.UndefinedTable(
|
||||
'relation "DummyTable" does not exist'
|
||||
)
|
||||
connector, connection, _ = _makeConnector(behavior)
|
||||
|
||||
with pytest.raises(DatabaseQueryError) as excinfo:
|
||||
connector.getRecord(DummyTable, "any-id")
|
||||
|
||||
assert excinfo.value.table == "DummyTable"
|
||||
connection.rollback.assert_called_once()
|
||||
|
||||
|
||||
class TestRollbackQuietly:
|
||||
def test_rollsBackOnLiveConnection(self):
|
||||
connection = MagicMock()
|
||||
_rollbackQuietly(connection)
|
||||
connection.rollback.assert_called_once()
|
||||
|
||||
def test_swallowsRollbackError(self):
|
||||
"""Rollback failure must not mask the original query error."""
|
||||
connection = MagicMock()
|
||||
connection.rollback.side_effect = RuntimeError("rollback failed")
|
||||
_rollbackQuietly(connection)
|
||||
|
||||
def test_noopOnNoneConnection(self):
|
||||
_rollbackQuietly(None)
|
||||
|
|
@ -125,3 +125,10 @@ class TestConvertParameterSchema:
|
|||
schema = _convertParameterSchema(actionParams)
|
||||
assert schema["properties"]["connection"]["type"] == "object"
|
||||
assert "id" in schema["properties"]["connection"]["properties"]
|
||||
|
||||
|
||||
# Ref-payload normalization (collapsing `{id: ..., featureCode: ...}` to the
|
||||
# bare id string) is no longer the adapter's job — it moved to the central
|
||||
# `parameterValidation.validateAndCoerceParameters` invoked by
|
||||
# `ActionExecutor.executeAction`. Tests for that contract live in
|
||||
# `tests/unit/workflows/test_parameterValidation.py`.
|
||||
|
|
|
|||
206
tests/unit/workflows/test_parameterValidation.py
Normal file
206
tests/unit/workflows/test_parameterValidation.py
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
# Copyright (c) 2026 Patrick Motsch
|
||||
# All rights reserved.
|
||||
"""Unit tests: universal action parameter validation + coercion.
|
||||
|
||||
This is the single source of truth for the action parameter contract:
|
||||
every workflow action (called via the agent, the workflow graph, or REST)
|
||||
runs through ``validateAndCoerceParameters`` before its body executes.
|
||||
|
||||
The tests pin three groups of behaviour:
|
||||
|
||||
1. **Required-parameter enforcement** — missing required params raise a
|
||||
typed ``InvalidActionParameterError`` instead of an opaque downstream
|
||||
error.
|
||||
2. **Ref-payload normalization** — the agent's typed tool schema delivers
|
||||
``FeatureInstanceRef`` as ``{id: ..., featureCode: ...}``, but actions
|
||||
expect a bare UUID string. Collapsing happens here, not in N action
|
||||
bodies.
|
||||
3. **Primitive coercion** — ``"true"``/``"12"``/``"3.14"`` from JSON-shaped
|
||||
payloads are coerced to bool/int/float, removing ad-hoc branches.
|
||||
|
||||
Unknown extra keys (e.g. ``parentOperationId``) flow through unchanged so
|
||||
the executor can keep injecting cross-cutting context.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from modules.datamodels.datamodelWorkflowActions import (
|
||||
WorkflowActionDefinition, WorkflowActionParameter,
|
||||
)
|
||||
from modules.shared.frontendTypes import FrontendType
|
||||
from modules.workflows.processing.shared.parameterValidation import (
|
||||
InvalidActionParameterError, validateAndCoerceParameters,
|
||||
)
|
||||
|
||||
|
||||
def _makeActionDef(actionId: str = "trustee.refreshAccountingData", **paramDefs) -> WorkflowActionDefinition:
|
||||
"""Build a real WorkflowActionDefinition; we only care about parameters."""
|
||||
parameters = {
|
||||
name: WorkflowActionParameter(
|
||||
name=name,
|
||||
type=spec["type"],
|
||||
frontendType=FrontendType.TEXT,
|
||||
required=spec.get("required", False),
|
||||
description=spec.get("description", ""),
|
||||
)
|
||||
for name, spec in paramDefs.items()
|
||||
}
|
||||
return WorkflowActionDefinition(
|
||||
actionId=actionId,
|
||||
description="Test action",
|
||||
parameters=parameters,
|
||||
execute=lambda *_a, **_kw: None,
|
||||
)
|
||||
|
||||
|
||||
class TestRequiredEnforcement:
|
||||
def test_missingRequiredRaises(self):
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
with pytest.raises(InvalidActionParameterError) as excinfo:
|
||||
validateAndCoerceParameters(actionDef, {})
|
||||
assert excinfo.value.paramName == "featureInstanceId"
|
||||
assert "required" in excinfo.value.reason.lower()
|
||||
assert "trustee.refreshAccountingData.featureInstanceId" in str(excinfo.value)
|
||||
|
||||
def test_optionalMissingIsFine(self):
|
||||
actionDef = _makeActionDef(
|
||||
forceRefresh={"type": "bool", "required": False},
|
||||
)
|
||||
result = validateAndCoerceParameters(actionDef, {})
|
||||
assert result == {}
|
||||
|
||||
def test_requiredNoneCountsAsMissing(self):
|
||||
"""Explicit ``None`` for a required param is missing, not "unset"."""
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
with pytest.raises(InvalidActionParameterError):
|
||||
validateAndCoerceParameters(actionDef, {"featureInstanceId": None})
|
||||
|
||||
|
||||
class TestRefNormalization:
|
||||
"""Trustee bug regression: agent passed `{id: ..., featureCode: ...}` and
|
||||
Postgres failed with "can't adapt type 'dict'", which the connector
|
||||
silently turned into "no record found"."""
|
||||
|
||||
def test_collapsesDictWithIdToString(self):
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
result = validateAndCoerceParameters(actionDef, {
|
||||
"featureInstanceId": {
|
||||
"id": "b7574103-f4a3-4894-8c23-74bd0d0e83a5",
|
||||
"featureCode": "trustee",
|
||||
"label": "Demo AG",
|
||||
},
|
||||
})
|
||||
assert result["featureInstanceId"] == "b7574103-f4a3-4894-8c23-74bd0d0e83a5"
|
||||
|
||||
def test_passThroughString(self):
|
||||
"""Workflow execution path passes a plain UUID; must not break."""
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
uuid = "b7574103-f4a3-4894-8c23-74bd0d0e83a5"
|
||||
result = validateAndCoerceParameters(actionDef, {"featureInstanceId": uuid})
|
||||
assert result["featureInstanceId"] == uuid
|
||||
|
||||
def test_dictWithoutIdRaises(self):
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
with pytest.raises(InvalidActionParameterError) as excinfo:
|
||||
validateAndCoerceParameters(actionDef, {
|
||||
"featureInstanceId": {"featureCode": "trustee", "label": "Demo"},
|
||||
})
|
||||
assert "id" in excinfo.value.reason
|
||||
|
||||
def test_otherDictTypeRaises(self):
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
with pytest.raises(InvalidActionParameterError):
|
||||
validateAndCoerceParameters(actionDef, {"featureInstanceId": 12345})
|
||||
|
||||
def test_connectionRefAlsoCollapses(self):
|
||||
"""Same logic applies to every Ref-Schema, not just FeatureInstanceRef."""
|
||||
actionDef = _makeActionDef(
|
||||
actionId="msft.readEmails",
|
||||
connection={"type": "ConnectionRef", "required": True},
|
||||
)
|
||||
result = validateAndCoerceParameters(actionDef, {
|
||||
"connection": {"id": "conn-uuid-123", "authority": "msft", "label": "Outlook"},
|
||||
})
|
||||
assert result["connection"] == "conn-uuid-123"
|
||||
|
||||
|
||||
class TestPrimitiveCoercion:
|
||||
def test_boolFromTrueString(self):
|
||||
actionDef = _makeActionDef(forceRefresh={"type": "bool", "required": False})
|
||||
result = validateAndCoerceParameters(actionDef, {"forceRefresh": "true"})
|
||||
assert result["forceRefresh"] is True
|
||||
|
||||
def test_boolFromFalseString(self):
|
||||
actionDef = _makeActionDef(forceRefresh={"type": "bool", "required": False})
|
||||
result = validateAndCoerceParameters(actionDef, {"forceRefresh": "false"})
|
||||
assert result["forceRefresh"] is False
|
||||
|
||||
def test_boolPassthrough(self):
|
||||
actionDef = _makeActionDef(forceRefresh={"type": "bool", "required": False})
|
||||
assert validateAndCoerceParameters(actionDef, {"forceRefresh": True})["forceRefresh"] is True
|
||||
|
||||
def test_boolBadValueRaises(self):
|
||||
actionDef = _makeActionDef(forceRefresh={"type": "bool", "required": False})
|
||||
with pytest.raises(InvalidActionParameterError):
|
||||
validateAndCoerceParameters(actionDef, {"forceRefresh": "maybe"})
|
||||
|
||||
def test_intFromString(self):
|
||||
actionDef = _makeActionDef(periodMonth={"type": "int", "required": False})
|
||||
assert validateAndCoerceParameters(actionDef, {"periodMonth": "12"})["periodMonth"] == 12
|
||||
|
||||
def test_intBadValueRaises(self):
|
||||
actionDef = _makeActionDef(periodMonth={"type": "int", "required": False})
|
||||
with pytest.raises(InvalidActionParameterError):
|
||||
validateAndCoerceParameters(actionDef, {"periodMonth": "twelve"})
|
||||
|
||||
def test_floatFromString(self):
|
||||
actionDef = _makeActionDef(threshold={"type": "float", "required": False})
|
||||
assert validateAndCoerceParameters(actionDef, {"threshold": "0.75"})["threshold"] == 0.75
|
||||
|
||||
|
||||
class TestUnknownAndOtherTypes:
|
||||
def test_unknownKeysPassThrough(self):
|
||||
"""The executor injects parentOperationId, expectedDocumentFormats, etc.
|
||||
Validation must not strip them."""
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
result = validateAndCoerceParameters(actionDef, {
|
||||
"featureInstanceId": "uuid-123",
|
||||
"parentOperationId": "action_xyz",
|
||||
"expectedDocumentFormats": ["pdf", "txt"],
|
||||
})
|
||||
assert result["parentOperationId"] == "action_xyz"
|
||||
assert result["expectedDocumentFormats"] == ["pdf", "txt"]
|
||||
|
||||
def test_strParamsAreUntouched(self):
|
||||
actionDef = _makeActionDef(dateFrom={"type": "str", "required": False})
|
||||
assert validateAndCoerceParameters(actionDef, {"dateFrom": "2025-01-01"})["dateFrom"] == "2025-01-01"
|
||||
|
||||
def test_listParamsAreUntouched(self):
|
||||
actionDef = _makeActionDef(documentList={"type": "List[ActionDocument]", "required": False})
|
||||
docs = [{"name": "a"}, {"name": "b"}]
|
||||
assert validateAndCoerceParameters(actionDef, {"documentList": docs})["documentList"] is docs
|
||||
|
||||
def test_doesNotMutateInput(self):
|
||||
"""validateAndCoerceParameters must return a new dict."""
|
||||
actionDef = _makeActionDef(
|
||||
featureInstanceId={"type": "FeatureInstanceRef", "required": True},
|
||||
)
|
||||
original = {"featureInstanceId": {"id": "uuid", "featureCode": "trustee"}}
|
||||
result = validateAndCoerceParameters(actionDef, original)
|
||||
assert isinstance(original["featureInstanceId"], dict)
|
||||
assert result["featureInstanceId"] == "uuid"
|
||||
Loading…
Reference in a new issue