From 9bd7821cf53722966f35ffcfefa4dd5150cf2366 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Mon, 17 Nov 2025 23:12:18 +0100
Subject: [PATCH] feat: refactored ai calls and pydantic models
---
modules/datamodels/__init__.py | 2 +-
modules/datamodels/datamodelAi.py | 131 ++-
modules/datamodels/datamodelChat.py | 279 ++----
modules/datamodels/datamodelDocref.py | 118 +++
modules/datamodels/datamodelExtraction.py | 11 +-
modules/datamodels/datamodelFiles.py | 14 +-
modules/datamodels/datamodelNeutralizer.py | 26 +-
modules/datamodels/datamodelPagination.py | 5 +-
modules/datamodels/datamodelSecurity.py | 67 +-
modules/datamodels/datamodelUam.py | 92 +-
modules/datamodels/datamodelUtils.py | 8 +-
modules/datamodels/datamodelVoice.py | 20 +-
modules/datamodels/datamodelWorkflow.py | 374 ++++++++
.../chatPlayground/mainChatPlayground.py | 2 +-
modules/routes/routeChatPlayground.py | 4 +-
modules/services/serviceAi/mainServiceAi.py | 345 ++++----
.../services/serviceChat/mainServiceChat.py | 24 +-
.../mainServiceExtraction.py | 8 +-
.../renderers/rendererImage.py | 44 +-
modules/services/serviceWeb/mainServiceWeb.py | 14 +-
modules/shared/jsonUtils.py | 81 +-
modules/workflows/methods/methodAi.py | 203 ++++-
modules/workflows/methods/methodOutlook.py | 27 +-
modules/workflows/methods/methodSharepoint.py | 33 +-
.../processing/core/actionExecutor.py | 23 +-
.../processing/core/messageCreator.py | 29 +-
.../processing/modes/modeActionplan.py | 811 ------------------
.../processing/modes/modeAutomation.py | 4 +-
.../workflows/processing/modes/modeBase.py | 3 +-
.../workflows/processing/modes/modeDynamic.py | 217 ++---
.../processing/shared/placeholderFactory.py | 16 +-
.../promptGenerationActionsActionplan.py | 234 -----
.../shared/promptGenerationActionsDynamic.py | 7 +-
.../workflows/processing/workflowProcessor.py | 13 +-
pytest.ini | 2 +-
tests/README.md | 228 +++++
tests/__init__.py | 4 +
tests/conftest.py | 14 +
tests/functional/__init__.py | 10 +
.../functional/test01_ai_model_selection.py | 14 +-
.../functional/test02_ai_models.py | 493 ++++++-----
.../functional/test03_ai_operations.py | 164 +++-
.../functional/test04_ai_behavior.py | 86 +-
tests/integration/__init__.py | 4 +
.../workflows/test_workflow_execution.py | 155 ++++
tests/testdata/Foto20250906_125903.jpg | Bin 0 -> 2074421 bytes
tests/unit/__init__.py | 4 +
tests/unit/datamodels/test_docref.py | 139 +++
tests/unit/datamodels/test_workflow_models.py | 230 +++++
tests/unit/services/test_ai_service.py | 146 ++++
tests/unit/utils/test_json_utils.py | 131 +++
tests/unit/workflows/test_state_management.py | 170 ++++
.../test_architecture_validation.py | 139 +++
53 files changed, 3365 insertions(+), 2057 deletions(-)
create mode 100644 modules/datamodels/datamodelDocref.py
create mode 100644 modules/datamodels/datamodelWorkflow.py
delete mode 100644 modules/workflows/processing/modes/modeActionplan.py
delete mode 100644 modules/workflows/processing/shared/promptGenerationActionsActionplan.py
create mode 100644 tests/README.md
create mode 100644 tests/__init__.py
create mode 100644 tests/conftest.py
create mode 100644 tests/functional/__init__.py
rename test2_ai_model_selection.py => tests/functional/test01_ai_model_selection.py (98%)
rename test1_ai_models.py => tests/functional/test02_ai_models.py (62%)
rename test4_method_ai_operations.py => tests/functional/test03_ai_operations.py (66%)
rename test3_ai_behavior.py => tests/functional/test04_ai_behavior.py (82%)
create mode 100644 tests/integration/__init__.py
create mode 100644 tests/integration/workflows/test_workflow_execution.py
create mode 100644 tests/testdata/Foto20250906_125903.jpg
create mode 100644 tests/unit/__init__.py
create mode 100644 tests/unit/datamodels/test_docref.py
create mode 100644 tests/unit/datamodels/test_workflow_models.py
create mode 100644 tests/unit/services/test_ai_service.py
create mode 100644 tests/unit/utils/test_json_utils.py
create mode 100644 tests/unit/workflows/test_state_management.py
create mode 100644 tests/validation/test_architecture_validation.py
diff --git a/modules/datamodels/__init__.py b/modules/datamodels/__init__.py
index e1adfd1d..7d73660e 100644
--- a/modules/datamodels/__init__.py
+++ b/modules/datamodels/__init__.py
@@ -12,4 +12,4 @@ from . import datamodelNeutralizer as neutralizer
from . import datamodelChat as chat
from . import datamodelFiles as files
from . import datamodelVoice as voice
-from . import datamodelUtils as utils
+from . import datamodelUtils as utils
\ No newline at end of file
diff --git a/modules/datamodels/datamodelAi.py b/modules/datamodels/datamodelAi.py
index 1da6c65f..48e4ff82 100644
--- a/modules/datamodels/datamodelAi.py
+++ b/modules/datamodels/datamodelAi.py
@@ -1,9 +1,11 @@
from typing import Optional, List, Dict, Any, Callable, TYPE_CHECKING, Tuple
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, ConfigDict
from enum import Enum
# Import ContentPart for runtime use (needed for Pydantic model rebuilding)
from modules.datamodels.datamodelExtraction import ContentPart
+# Import JSON utilities for safe conversion
+from modules.shared.jsonUtils import extractJsonString, tryParseJson, repairBrokenJson
# Operation Types
class OperationTypeEnum(str, Enum):
@@ -109,8 +111,7 @@ class AiModel(BaseModel):
version: Optional[str] = Field(default=None, description="Model version")
lastUpdated: Optional[str] = Field(default=None, description="Last update timestamp")
- class Config:
- arbitraryTypesAllowed = True # Allow Callable type
+ model_config = ConfigDict(arbitrary_types_allowed=True) # Allow Callable type
class SelectionRule(BaseModel):
@@ -172,8 +173,7 @@ class AiModelCall(BaseModel):
model: Optional[AiModel] = Field(default=None, description="The AI model being called")
options: AiCallOptions = Field(default_factory=AiCallOptions, description="Additional model-specific options")
- class Config:
- arbitraryTypesAllowed = True
+ model_config = ConfigDict(arbitrary_types_allowed=True)
class AiModelResponse(BaseModel):
@@ -189,8 +189,7 @@ class AiModelResponse(BaseModel):
tokensUsed: Optional[Dict[str, int]] = Field(default=None, description="Token usage (input, output, total)")
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Additional model-specific metadata")
- class Config:
- arbitraryTypesAllowed = True
+ model_config = ConfigDict(arbitrary_types_allowed=True)
# Structured prompt models for specialized operations
@@ -203,9 +202,6 @@ class AiCallPromptWebSearch(BaseModel):
maxNumberPages: Optional[int] = Field(default=10, description="Maximum number of pages to search (default: 10)")
language: Optional[str] = Field(default=None, description="Language code (lowercase, e.g., de, en, fr)")
researchDepth: Optional[str] = Field(default="general", description="Research depth: fast (maxDepth=1), general (maxDepth=2), deep (maxDepth=3)")
-
- class Config:
- pass
class AiCallPromptWebCrawl(BaseModel):
@@ -215,9 +211,6 @@ class AiCallPromptWebCrawl(BaseModel):
url: str = Field(description="Single URL to crawl")
maxDepth: Optional[int] = Field(default=2, description="Maximum number of hops from starting page (default: 2)")
maxWidth: Optional[int] = Field(default=10, description="Maximum pages to crawl per level (default: 10)")
-
- class Config:
- pass
class AiCallPromptImage(BaseModel):
@@ -227,7 +220,113 @@ class AiCallPromptImage(BaseModel):
size: Optional[str] = Field(default="1024x1024", description="Image size (1024x1024, 1792x1024, 1024x1792)")
quality: Optional[str] = Field(default="standard", description="Image quality (standard, hd)")
style: Optional[str] = Field(default="vivid", description="Image style (vivid, natural)")
-
- class Config:
- pass
+
+
+class DocumentData(BaseModel):
+ """Single document in response."""
+ documentName: str = Field(description="Document name")
+ documentData: Any = Field(description="Document data (can be str, bytes, dict, etc.)")
+ mimeType: str = Field(description="MIME type of the document")
+
+
+class AiProcessParameters(BaseModel):
+ """Parameters for AI processing action."""
+ aiPrompt: str = Field(description="AI instruction prompt")
+ contentParts: Optional[List[ContentPart]] = Field(
+ None,
+ description="Already-extracted content parts (required if documents need to be processed)"
+ )
+ resultType: str = Field(
+ default="txt",
+ description="Output file extension (txt, json, pdf, docx, xlsx, etc.)"
+ )
+
+
+class AiResponseMetadata(BaseModel):
+ """Metadata for AI response (varies by operation type)."""
+ # Document Generation Metadata
+ title: Optional[str] = Field(None, description="Document title")
+ filename: Optional[str] = Field(None, description="Document filename")
+
+ # Operation-Specific Metadata
+ operationType: Optional[str] = Field(None, description="Type of operation performed")
+ schemaVersion: Optional[str] = Field(None, description="Schema version (e.g., 'parameters_v1')", alias="schema")
+ extractionMethod: Optional[str] = Field(None, description="Method used for extraction")
+ sourceDocuments: Optional[List[str]] = Field(None, description="Source document references")
+
+ # Additional metadata (for extensibility)
+ additionalData: Optional[Dict[str, Any]] = Field(None, description="Additional operation-specific metadata")
+
+ @classmethod
+ def fromDict(cls, data: Optional[Dict[str, Any]]) -> Optional["AiResponseMetadata"]:
+ """Create AiResponseMetadata from dict with camelCase field names."""
+ if not data or not isinstance(data, dict):
+ return None
+
+ knownFields = {"title", "filename", "operationType", "schema", "extractionMethod", "sourceDocuments", "additionalData"}
+ mappedData = {k: v for k, v in data.items() if k in knownFields}
+
+ additionalFields = {k: v for k, v in data.items() if k not in knownFields}
+ if additionalFields:
+ mappedData["additionalData"] = additionalFields
+
+ try:
+ return cls(**mappedData)
+ except Exception:
+ return None
+
+
+class AiResponse(BaseModel):
+ """Unified response from all AI calls (planning, text, documents)."""
+ content: str = Field(description="Response content (JSON string for planning, text for analysis, unified JSON for documents)")
+ metadata: Optional[AiResponseMetadata] = Field(
+ None,
+ description="Response metadata (varies by operation type)"
+ )
+ documents: Optional[List[DocumentData]] = Field(
+ None,
+ description="Generated documents (only for document generation operations)"
+ )
+
+ def toJson(self) -> Dict[str, Any]:
+ """
+ Convert AI response content to JSON using enhanced stabilizing failsafe conversion methods.
+ Centralizes AI result to JSON conversion in one place.
+
+ Returns:
+ Dict containing the parsed JSON content, or a safe fallback structure if parsing fails.
+ """
+ if not self.content:
+ return {}
+
+ # Use enhanced stabilizing failsafe JSON conversion methods from jsonUtils
+ # First, try to extract and parse JSON using the safe methods
+ obj, err, cleaned = tryParseJson(self.content)
+
+ if err is None and isinstance(obj, dict):
+ # Successfully parsed as dict
+ return obj
+ elif err is None and isinstance(obj, list):
+ # Successfully parsed as list - wrap in dict for consistency
+ return {"data": obj}
+
+ # If parsing failed, try to repair broken JSON
+ repaired = repairBrokenJson(self.content)
+ if repaired is not None:
+ return repaired
+
+ # If all else fails, return a safe structure with the cleaned content
+ # Extract JSON string even if it's not fully parseable
+ extracted = extractJsonString(self.content)
+ if extracted and extracted != self.content:
+ # Try one more time with extracted string
+ obj, err, _ = tryParseJson(extracted)
+ if err is None and isinstance(obj, (dict, list)):
+ return obj if isinstance(obj, dict) else {"data": obj}
+
+ # Final fallback: return safe structure with raw content
+ return {
+ "content": self.content,
+ "parseError": True
+ }
diff --git a/modules/datamodels/datamodelChat.py b/modules/datamodels/datamodelChat.py
index c748c44a..4931d463 100644
--- a/modules/datamodels/datamodelChat.py
+++ b/modules/datamodels/datamodelChat.py
@@ -264,7 +264,6 @@ registerModelLabels(
class WorkflowModeEnum(str, Enum):
- WORKFLOW_ACTIONPLAN = "Actionplan"
WORKFLOW_DYNAMIC = "Dynamic"
WORKFLOW_AUTOMATION = "Automation"
@@ -273,7 +272,6 @@ registerModelLabels(
"WorkflowModeEnum",
{"en": "Workflow Mode", "fr": "Mode de workflow"},
{
- "WORKFLOW_ACTIONPLAN": {"en": "Actionplan", "fr": "Actionplan"},
"WORKFLOW_DYNAMIC": {"en": "Dynamic", "fr": "Dynamique"},
"WORKFLOW_AUTOMATION": {"en": "Automation", "fr": "Automatisation"},
},
@@ -281,125 +279,27 @@ registerModelLabels(
class ChatWorkflow(BaseModel):
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()),
- description="Primary key",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- mandateId: str = Field(
- description="ID of the mandate this workflow belongs to",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- status: str = Field(
- description="Current status of the workflow",
- frontend_type="select",
- frontend_readonly=False,
- frontend_required=False,
- frontend_options=[
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ mandateId: str = Field(description="ID of the mandate this workflow belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ status: str = Field(default="running", description="Current status of the workflow", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
{"value": "running", "label": {"en": "Running", "fr": "En cours"}},
{"value": "completed", "label": {"en": "Completed", "fr": "Terminé"}},
{"value": "stopped", "label": {"en": "Stopped", "fr": "Arrêté"}},
{"value": "error", "label": {"en": "Error", "fr": "Erreur"}},
- ],
- )
- name: Optional[str] = Field(
- None,
- description="Name of the workflow",
- frontend_type="text",
- frontend_readonly=False,
- frontend_required=True,
- )
- currentRound: int = Field(
- description="Current round number",
- frontend_type="integer",
- frontend_readonly=True,
- frontend_required=False,
- )
- currentTask: int = Field(
- default=0,
- description="Current task number",
- frontend_type="integer",
- frontend_readonly=True,
- frontend_required=False,
- )
- currentAction: int = Field(
- default=0,
- description="Current action number",
- frontend_type="integer",
- frontend_readonly=True,
- frontend_required=False,
- )
- totalTasks: int = Field(
- default=0,
- description="Total number of tasks in the workflow",
- frontend_type="integer",
- frontend_readonly=True,
- frontend_required=False,
- )
- totalActions: int = Field(
- default=0,
- description="Total number of actions in the workflow",
- frontend_type="integer",
- frontend_readonly=True,
- frontend_required=False,
- )
- lastActivity: float = Field(
- default_factory=getUtcTimestamp,
- description="Timestamp of last activity (UTC timestamp in seconds)",
- frontend_type="timestamp",
- frontend_readonly=True,
- frontend_required=False,
- )
- startedAt: float = Field(
- default_factory=getUtcTimestamp,
- description="When the workflow started (UTC timestamp in seconds)",
- frontend_type="timestamp",
- frontend_readonly=True,
- frontend_required=False,
- )
- logs: List[ChatLog] = Field(
- default_factory=list,
- description="Workflow logs",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- messages: List[ChatMessage] = Field(
- default_factory=list,
- description="Messages in the workflow",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- stats: List[ChatStat] = Field(
- default_factory=list,
- description="Workflow statistics list",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- tasks: list = Field(
- default_factory=list,
- description="List of tasks in the workflow",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- workflowMode: WorkflowModeEnum = Field(
- default=WorkflowModeEnum.WORKFLOW_DYNAMIC,
- description="Workflow mode selector",
- frontend_type="select",
- frontend_readonly=False,
- frontend_required=False,
- frontend_options=[
- {
- "value": WorkflowModeEnum.WORKFLOW_ACTIONPLAN.value,
- "label": {"en": "Actionplan", "fr": "Actionplan"},
- },
+ ]})
+ name: Optional[str] = Field(None, description="Name of the workflow", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
+ currentRound: int = Field(default=0, description="Current round number", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ currentTask: int = Field(default=0, description="Current task number", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ currentAction: int = Field(default=0, description="Current action number", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ totalTasks: int = Field(default=0, description="Total number of tasks in the workflow", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ totalActions: int = Field(default=0, description="Total number of actions in the workflow", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ lastActivity: float = Field(default_factory=getUtcTimestamp, description="Timestamp of last activity (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ startedAt: float = Field(default_factory=getUtcTimestamp, description="When the workflow started (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ logs: List[ChatLog] = Field(default_factory=list, description="Workflow logs", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ messages: List[ChatMessage] = Field(default_factory=list, description="Messages in the workflow", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ stats: List[ChatStat] = Field(default_factory=list, description="Workflow statistics list", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ tasks: list = Field(default_factory=list, description="List of tasks in the workflow", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ workflowMode: WorkflowModeEnum = Field(default=WorkflowModeEnum.WORKFLOW_DYNAMIC, description="Workflow mode selector", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
{
"value": WorkflowModeEnum.WORKFLOW_DYNAMIC.value,
"label": {"en": "Dynamic", "fr": "Dynamique"},
@@ -408,22 +308,37 @@ class ChatWorkflow(BaseModel):
"value": WorkflowModeEnum.WORKFLOW_AUTOMATION.value,
"label": {"en": "Automation", "fr": "Automatisation"},
},
- ],
- )
- maxSteps: int = Field(
- default=5,
- description="Maximum number of iterations in react mode",
- frontend_type="integer",
- frontend_readonly=False,
- frontend_required=False,
- )
- expectedFormats: Optional[List[str]] = Field(
- None,
- description="List of expected file format extensions from user request (e.g., ['xlsx', 'pdf']). Extracted during intent analysis.",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
+ ]})
+ maxSteps: int = Field(default=10, description="Maximum number of iterations in dynamic mode", json_schema_extra={"frontend_type": "integer", "frontend_readonly": False, "frontend_required": False})
+ expectedFormats: Optional[List[str]] = Field(None, description="List of expected file format extensions from user request (e.g., ['xlsx', 'pdf']). Extracted during intent analysis.", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+
+ # Helper methods for execution state management
+ def getRoundIndex(self) -> int:
+ """Get current round index"""
+ return self.currentRound
+
+ def getTaskIndex(self) -> int:
+ """Get current task index"""
+ return self.currentTask
+
+ def getActionIndex(self) -> int:
+ """Get current action index"""
+ return self.currentAction
+
+ def incrementRound(self):
+ """Increment round when new user input received"""
+ self.currentRound += 1
+ self.currentTask = 0
+ self.currentAction = 0
+
+ def incrementTask(self):
+ """Increment task when starting new task in current round"""
+ self.currentTask += 1
+ self.currentAction = 0
+
+ def incrementAction(self):
+ """Increment action when executing new action in current task"""
+ self.currentAction += 1
registerModelLabels(
@@ -885,7 +800,7 @@ registerModelLabels(
class TaskContext(BaseModel):
taskStep: TaskStep
- workflow: Optional["ChatWorkflow"] = None
+ workflow: Optional[ChatWorkflow] = None
workflowId: Optional[str] = None
availableDocuments: Optional[str] = "No documents available"
availableConnections: Optional[list[str]] = Field(default_factory=list)
@@ -900,6 +815,26 @@ class TaskContext(BaseModel):
failedActions: Optional[list] = Field(default_factory=list)
successfulActions: Optional[list] = Field(default_factory=list)
criteriaProgress: Optional[dict] = None
+
+ # Stage 2 context fields (NEW)
+ actionObjective: Optional[str] = Field(None, description="Objective for current action")
+ parametersContext: Optional[str] = Field(None, description="Context for parameter generation")
+ learnings: Optional[list[str]] = Field(default_factory=list, description="Learnings from previous actions")
+ stage1Selection: Optional[dict] = Field(None, description="Stage 1 selection data")
+
+ def updateFromSelection(self, selection: Any):
+ """Update context from Stage 1 selection
+
+ Args:
+ selection: ActionDefinition instance from Stage 1
+ """
+ from modules.datamodels.datamodelWorkflow import ActionDefinition
+
+ if isinstance(selection, ActionDefinition):
+ self.actionObjective = selection.actionObjective
+ self.parametersContext = selection.parametersContext
+ self.learnings = selection.learnings if selection.learnings else []
+ self.stage1Selection = selection.model_dump()
def getDocumentReferences(self) -> List[str]:
docs = []
@@ -973,8 +908,7 @@ registerModelLabels(
},
)
-# Resolve forward references
-TaskContext.update_forward_refs()
+# Forward references resolved automatically since ChatWorkflow is defined above
class PromptPlaceholder(BaseModel):
@@ -1013,71 +947,20 @@ registerModelLabels(
class AutomationDefinition(BaseModel):
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()),
- description="Primary key",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False
- )
- mandateId: str = Field(
- description="Mandate ID",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False
- )
- label: str = Field(
- description="User-friendly name",
- frontend_type="text",
- frontend_required=True
- )
- schedule: str = Field(
- description="Cron schedule pattern",
- frontend_type="select",
- frontend_options=[
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ mandateId: str = Field(description="Mandate ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ label: str = Field(description="User-friendly name", json_schema_extra={"frontend_type": "text", "frontend_required": True})
+ schedule: str = Field(description="Cron schedule pattern", json_schema_extra={"frontend_type": "select", "frontend_required": True, "frontend_options": [
{"value": "0 */4 * * *", "label": {"en": "Every 4 hours", "fr": "Toutes les 4 heures"}},
{"value": "0 22 * * *", "label": {"en": "Daily at 22:00", "fr": "Quotidien à 22:00"}},
{"value": "0 10 * * 1", "label": {"en": "Weekly Monday 10:00", "fr": "Hebdomadaire lundi 10:00"}}
- ],
- frontend_required=True
- )
- template: str = Field(
- description="JSON template with placeholders (format: {{KEY:PLACEHOLDER_NAME}})",
- frontend_type="textarea",
- frontend_required=True
- )
- placeholders: Dict[str, str] = Field(
- default_factory=dict,
- description="Dictionary of placeholder key/value pairs (e.g., {'connectionName': 'MyConnection', 'sharepointFolderNameSource': '/folder/path', 'webResearchUrl': 'https://...', 'webResearchPrompt': '...', 'documentPrompt': '...'})",
- frontend_type="text"
- )
- active: bool = Field(
- default=False,
- description="Whether automation should be launched in event handler",
- frontend_type="checkbox",
- frontend_required=False
- )
- eventId: Optional[str] = Field(
- None,
- description="Event ID from event management (None if not registered)",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False
- )
- status: Optional[str] = Field(
- None,
- description="Status: 'active' if event is registered, 'inactive' if not (computed, readonly)",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False
- )
- executionLogs: List[Dict[str, Any]] = Field(
- default_factory=list,
- description="List of execution logs, each containing timestamp, workflowId, status, and messages",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False
- )
+ ]})
+ template: str = Field(description="JSON template with placeholders (format: {{KEY:PLACEHOLDER_NAME}})", json_schema_extra={"frontend_type": "textarea", "frontend_required": True})
+ placeholders: Dict[str, str] = Field(default_factory=dict, description="Dictionary of placeholder key/value pairs (e.g., {'connectionName': 'MyConnection', 'sharepointFolderNameSource': '/folder/path', 'webResearchUrl': 'https://...', 'webResearchPrompt': '...', 'documentPrompt': '...'})", json_schema_extra={"frontend_type": "text"})
+ active: bool = Field(default=False, description="Whether automation should be launched in event handler", json_schema_extra={"frontend_type": "checkbox", "frontend_required": False})
+ eventId: Optional[str] = Field(None, description="Event ID from event management (None if not registered)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ status: Optional[str] = Field(None, description="Status: 'active' if event is registered, 'inactive' if not (computed, readonly)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ executionLogs: List[Dict[str, Any]] = Field(default_factory=list, description="List of execution logs, each containing timestamp, workflowId, status, and messages", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
registerModelLabels(
diff --git a/modules/datamodels/datamodelDocref.py b/modules/datamodels/datamodelDocref.py
new file mode 100644
index 00000000..0ad3d2bb
--- /dev/null
+++ b/modules/datamodels/datamodelDocref.py
@@ -0,0 +1,118 @@
+"""
+Document reference models for typed document references in workflows.
+"""
+
+from typing import List, Optional
+from pydantic import BaseModel, Field
+from modules.shared.attributeUtils import registerModelLabels
+
+
+class DocumentReference(BaseModel):
+ """Base class for document references"""
+ pass
+
+
+class DocumentListReference(DocumentReference):
+ """Reference to a document list via message label"""
+ messageId: Optional[str] = Field(None, description="Optional message ID for cross-round references")
+ label: str = Field(description="Document list label")
+
+ def to_string(self) -> str:
+ """Convert to string format: docList:messageId:label or docList:label"""
+ if self.messageId:
+ return f"docList:{self.messageId}:{self.label}"
+ return f"docList:{self.label}"
+
+
+class DocumentItemReference(DocumentReference):
+ """Reference to a specific document item"""
+ documentId: str = Field(description="Document ID")
+ fileName: Optional[str] = Field(None, description="Optional file name")
+
+ def to_string(self) -> str:
+ """Convert to string format: docItem:documentId:fileName or docItem:documentId"""
+ if self.fileName:
+ return f"docItem:{self.documentId}:{self.fileName}"
+ return f"docItem:{self.documentId}"
+
+
+class DocumentReferenceList(BaseModel):
+ """List of document references with conversion methods"""
+ references: List[DocumentReference] = Field(
+ default_factory=list,
+ description="List of document references"
+ )
+
+ def to_string_list(self) -> List[str]:
+ """Convert all references to string list"""
+ return [ref.to_string() for ref in self.references]
+
+ @classmethod
+ def from_string_list(cls, stringList: List[str]) -> "DocumentReferenceList":
+ """Parse string list to typed references
+
+ Supports formats:
+ - docList:label
+ - docList:messageId:label
+ - docItem:documentId
+ - docItem:documentId:fileName
+ """
+ references = []
+
+ for refStr in stringList:
+ if not refStr or not isinstance(refStr, str):
+ continue
+
+ refStr = refStr.strip()
+
+ # Parse docList: references
+ if refStr.startswith("docList:"):
+ parts = refStr[8:].split(":", 1) # Remove "docList:" prefix
+ if len(parts) == 2:
+ # docList:messageId:label
+ messageId, label = parts
+ references.append(DocumentListReference(messageId=messageId, label=label))
+ elif len(parts) == 1 and parts[0]:
+ # docList:label
+ references.append(DocumentListReference(label=parts[0]))
+
+ # Parse docItem: references
+ elif refStr.startswith("docItem:"):
+ parts = refStr[8:].split(":", 1) # Remove "docItem:" prefix
+ if len(parts) == 2:
+ # docItem:documentId:fileName
+ documentId, fileName = parts
+ references.append(DocumentItemReference(documentId=documentId, fileName=fileName))
+ elif len(parts) == 1 and parts[0]:
+ # docItem:documentId
+ references.append(DocumentItemReference(documentId=parts[0]))
+
+ # Unknown format - skip or log warning
+ else:
+ # Try to parse as simple string (backward compatibility)
+ # Assume it's a label if it doesn't match known patterns
+ if refStr:
+ references.append(DocumentListReference(label=refStr))
+
+ return cls(references=references)
+
+
+registerModelLabels(
+ "DocumentReference",
+ {"en": "Document Reference", "fr": "Référence de document"},
+ {
+ "messageId": {"en": "Message ID", "fr": "ID du message"},
+ "label": {"en": "Label", "fr": "Étiquette"},
+ "documentId": {"en": "Document ID", "fr": "ID du document"},
+ "fileName": {"en": "File Name", "fr": "Nom du fichier"},
+ },
+)
+
+registerModelLabels(
+ "DocumentReferenceList",
+ {"en": "Document Reference List", "fr": "Liste de références de documents"},
+ {
+ "references": {"en": "References", "fr": "Références"},
+ },
+)
+
diff --git a/modules/datamodels/datamodelExtraction.py b/modules/datamodels/datamodelExtraction.py
index 5a530cab..ebfe2944 100644
--- a/modules/datamodels/datamodelExtraction.py
+++ b/modules/datamodels/datamodelExtraction.py
@@ -1,9 +1,6 @@
-from typing import Any, Dict, List, Optional, Literal, TYPE_CHECKING
+from typing import Any, Dict, List, Optional, Literal
from pydantic import BaseModel, Field
-if TYPE_CHECKING:
- from modules.datamodels.datamodelAi import OperationTypeEnum
-
class ContentPart(BaseModel):
id: str = Field(description="Unique content part identifier")
@@ -67,7 +64,6 @@ class ExtractionOptions(BaseModel):
# Core extraction parameters
prompt: str = Field(description="Extraction prompt for AI processing")
- operationType: 'OperationTypeEnum' = Field(description="Type of operation for AI processing")
processDocumentsIndividually: bool = Field(default=True, description="Process each document separately")
# Image processing parameters
@@ -85,7 +81,4 @@ class ExtractionOptions(BaseModel):
# Additional processing options
enableParallelProcessing: bool = Field(default=True, description="Enable parallel processing of chunks")
- maxConcurrentChunks: int = Field(default=5, ge=1, le=20, description="Maximum number of chunks to process concurrently")
-
- class Config:
- arbitraryTypesAllowed = True # Allow OperationTypeEnum import
\ No newline at end of file
+ maxConcurrentChunks: int = Field(default=5, ge=1, le=20, description="Maximum number of chunks to process concurrently")
\ No newline at end of file
diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py
index 32e8d445..106bac96 100644
--- a/modules/datamodels/datamodelFiles.py
+++ b/modules/datamodels/datamodelFiles.py
@@ -9,13 +9,13 @@ import base64
class FileItem(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", frontend_type="text", frontend_readonly=True, frontend_required=False)
- mandateId: str = Field(description="ID of the mandate this file belongs to", frontend_type="text", frontend_readonly=True, frontend_required=False)
- fileName: str = Field(description="Name of the file", frontend_type="text", frontend_readonly=False, frontend_required=True)
- mimeType: str = Field(description="MIME type of the file", frontend_type="text", frontend_readonly=True, frontend_required=False)
- fileHash: str = Field(description="Hash of the file", frontend_type="text", frontend_readonly=True, frontend_required=False)
- fileSize: int = Field(description="Size of the file in bytes", frontend_type="integer", frontend_readonly=True, frontend_required=False)
- creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the file was created (UTC timestamp in seconds)", frontend_type="timestamp", frontend_readonly=True, frontend_required=False)
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ mandateId: str = Field(description="ID of the mandate this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ fileName: str = Field(description="Name of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
+ mimeType: str = Field(description="MIME type of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ fileHash: str = Field(description="Hash of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ fileSize: int = Field(description="Size of the file in bytes", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the file was created (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
registerModelLabels(
"FileItem",
diff --git a/modules/datamodels/datamodelNeutralizer.py b/modules/datamodels/datamodelNeutralizer.py
index 60894dff..b1f2b411 100644
--- a/modules/datamodels/datamodelNeutralizer.py
+++ b/modules/datamodels/datamodelNeutralizer.py
@@ -7,13 +7,13 @@ from modules.shared.attributeUtils import registerModelLabels
class DataNeutraliserConfig(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the configuration", frontend_type="text", frontend_readonly=True, frontend_required=False)
- mandateId: str = Field(description="ID of the mandate this configuration belongs to", frontend_type="text", frontend_readonly=True, frontend_required=True)
- userId: str = Field(description="ID of the user who created this configuration", frontend_type="text", frontend_readonly=True, frontend_required=True)
- enabled: bool = Field(default=True, description="Whether data neutralization is enabled", frontend_type="checkbox", frontend_readonly=False, frontend_required=False)
- namesToParse: str = Field(default="", description="Multiline list of names to parse for neutralization", frontend_type="textarea", frontend_readonly=False, frontend_required=False)
- sharepointSourcePath: str = Field(default="", description="SharePoint path to read files for neutralization", frontend_type="text", frontend_readonly=False, frontend_required=False)
- sharepointTargetPath: str = Field(default="", description="SharePoint path to store neutralized files", frontend_type="text", frontend_readonly=False, frontend_required=False)
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ mandateId: str = Field(description="ID of the mandate this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ userId: str = Field(description="ID of the user who created this configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ enabled: bool = Field(default=True, description="Whether data neutralization is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False})
+ namesToParse: str = Field(default="", description="Multiline list of names to parse for neutralization", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False})
+ sharepointSourcePath: str = Field(default="", description="SharePoint path to read files for neutralization", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
+ sharepointTargetPath: str = Field(default="", description="SharePoint path to store neutralized files", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
registerModelLabels(
"DataNeutraliserConfig",
{"en": "Data Neutralization Config", "fr": "Configuration de neutralisation des données"},
@@ -29,12 +29,12 @@ registerModelLabels(
)
class DataNeutralizerAttributes(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the attribute mapping (used as UID in neutralized files)", frontend_type="text", frontend_readonly=True, frontend_required=False)
- mandateId: str = Field(description="ID of the mandate this attribute belongs to", frontend_type="text", frontend_readonly=True, frontend_required=True)
- userId: str = Field(description="ID of the user who created this attribute", frontend_type="text", frontend_readonly=True, frontend_required=True)
- originalText: str = Field(description="Original text that was neutralized", frontend_type="text", frontend_readonly=True, frontend_required=True)
- fileId: Optional[str] = Field(default=None, description="ID of the file this attribute belongs to", frontend_type="text", frontend_readonly=True, frontend_required=False)
- patternType: str = Field(description="Type of pattern that matched (email, phone, name, etc.)", frontend_type="text", frontend_readonly=True, frontend_required=True)
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the attribute mapping (used as UID in neutralized files)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ mandateId: str = Field(description="ID of the mandate this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ userId: str = Field(description="ID of the user who created this attribute", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ originalText: str = Field(description="Original text that was neutralized", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ fileId: Optional[str] = Field(default=None, description="ID of the file this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ patternType: str = Field(description="Type of pattern that matched (email, phone, name, etc.)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
registerModelLabels(
"DataNeutralizerAttributes",
{"en": "Neutralized Data Attribute", "fr": "Attribut de données neutralisées"},
diff --git a/modules/datamodels/datamodelPagination.py b/modules/datamodels/datamodelPagination.py
index 3222e0e7..bed65332 100644
--- a/modules/datamodels/datamodelPagination.py
+++ b/modules/datamodels/datamodelPagination.py
@@ -5,7 +5,7 @@ All models use camelStyle naming convention for consistency with frontend.
"""
from typing import List, Dict, Any, Optional, Generic, TypeVar
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, ConfigDict
import math
T = TypeVar('T')
@@ -67,6 +67,5 @@ class PaginatedResponse(BaseModel, Generic[T]):
items: List[T] = Field(..., description="Array of items for current page")
pagination: Optional[PaginationMetadata] = Field(..., description="Pagination metadata (None if pagination not applied)")
- class Config:
- arbitrary_types_allowed = True
+ model_config = ConfigDict(arbitrary_types_allowed=True)
diff --git a/modules/datamodels/datamodelSecurity.py b/modules/datamodels/datamodelSecurity.py
index e5a1e8a4..6803638e 100644
--- a/modules/datamodels/datamodelSecurity.py
+++ b/modules/datamodels/datamodelSecurity.py
@@ -1,7 +1,7 @@
"""Security models: Token and AuthEvent."""
from typing import Optional
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, ConfigDict
from modules.shared.attributeUtils import registerModelLabels
from modules.shared.timeUtils import getUtcTimestamp
from .datamodelUam import AuthAuthority
@@ -47,8 +47,7 @@ class Token(BaseModel):
None, description="Mandate ID for tenant scoping of the token"
)
- class Config:
- use_enum_values = True
+ model_config = ConfigDict(use_enum_values=True)
registerModelLabels(
@@ -75,60 +74,14 @@ registerModelLabels(
class AuthEvent(BaseModel):
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()),
- description="Unique ID of the auth event",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- userId: str = Field(
- description="ID of the user this event belongs to",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=True,
- )
- eventType: str = Field(
- description="Type of authentication event (e.g., 'login', 'logout', 'token_refresh')",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=True,
- )
- timestamp: float = Field(
- default_factory=getUtcTimestamp,
- description="Unix timestamp when the event occurred",
- frontend_type="datetime",
- frontend_readonly=True,
- frontend_required=True,
- )
- ipAddress: Optional[str] = Field(
- default=None,
- description="IP address from which the event originated",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- userAgent: Optional[str] = Field(
- default=None,
- description="User agent string from the request",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
- success: bool = Field(
- default=True,
- description="Whether the authentication event was successful",
- frontend_type="boolean",
- frontend_readonly=True,
- frontend_required=True,
- )
- details: Optional[str] = Field(
- default=None,
- description="Additional details about the event",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False,
- )
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the auth event", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ userId: str = Field(description="ID of the user this event belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ eventType: str = Field(description="Type of authentication event (e.g., 'login', 'logout', 'token_refresh')", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ timestamp: float = Field(default_factory=getUtcTimestamp, description="Unix timestamp when the event occurred", json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True})
+ ipAddress: Optional[str] = Field(default=None, description="IP address from which the event originated", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ userAgent: Optional[str] = Field(default=None, description="User agent string from the request", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ success: bool = Field(default=True, description="Whether the authentication event was successful", json_schema_extra={"frontend_type": "boolean", "frontend_readonly": True, "frontend_required": True})
+ details: Optional[str] = Field(default=None, description="Additional details about the event", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
registerModelLabels(
diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py
index a889b4ae..4a9c10aa 100644
--- a/modules/datamodels/datamodelUam.py
+++ b/modules/datamodels/datamodelUam.py
@@ -25,15 +25,35 @@ class ConnectionStatus(str, Enum):
PENDING = "pending"
class Mandate(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the mandate", frontend_type="text", frontend_readonly=True, frontend_required=False)
- name: str = Field(description="Name of the mandate", frontend_type="text", frontend_readonly=False, frontend_required=True)
- language: str = Field(default="en", description="Default language of the mandate", frontend_type="select", frontend_readonly=False, frontend_required=True, frontend_options=[
- {"value": "de", "label": {"en": "Deutsch", "fr": "Allemand"}},
- {"value": "en", "label": {"en": "English", "fr": "Anglais"}},
- {"value": "fr", "label": {"en": "Français", "fr": "Français"}},
- {"value": "it", "label": {"en": "Italiano", "fr": "Italien"}},
- ])
- enabled: bool = Field(default=True, description="Indicates whether the mandate is enabled", frontend_type="checkbox", frontend_readonly=False, frontend_required=False)
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Unique ID of the mandate",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ )
+ name: str = Field(
+ description="Name of the mandate",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ )
+ language: str = Field(
+ default="en",
+ description="Default language of the mandate",
+ json_schema_extra={
+ "frontend_type": "select",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ "frontend_options": [
+ {"value": "de", "label": {"en": "Deutsch", "fr": "Allemand"}},
+ {"value": "en", "label": {"en": "English", "fr": "Anglais"}},
+ {"value": "fr", "label": {"en": "Français", "fr": "Français"}},
+ {"value": "it", "label": {"en": "Italiano", "fr": "Italien"}},
+ ]
+ }
+ )
+ enabled: bool = Field(
+ default=True,
+ description="Indicates whether the mandate is enabled",
+ json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ )
registerModelLabels(
"Mandate",
{"en": "Mandate", "fr": "Mandat"},
@@ -46,31 +66,31 @@ registerModelLabels(
)
class UserConnection(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the connection", frontend_type="text", frontend_readonly=True, frontend_required=False)
- userId: str = Field(description="ID of the user this connection belongs to", frontend_type="text", frontend_readonly=True, frontend_required=False)
- authority: AuthAuthority = Field(description="Authentication authority", frontend_type="select", frontend_readonly=True, frontend_required=False, frontend_options=[
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the connection", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ userId: str = Field(description="ID of the user this connection belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ authority: AuthAuthority = Field(description="Authentication authority", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": [
{"value": "local", "label": {"en": "Local", "fr": "Local"}},
{"value": "google", "label": {"en": "Google", "fr": "Google"}},
{"value": "msft", "label": {"en": "Microsoft", "fr": "Microsoft"}},
- ])
- externalId: str = Field(description="User ID in the external system", frontend_type="text", frontend_readonly=True, frontend_required=False)
- externalUsername: str = Field(description="Username in the external system", frontend_type="text", frontend_readonly=False, frontend_required=False)
- externalEmail: Optional[EmailStr] = Field(None, description="Email in the external system", frontend_type="email", frontend_readonly=False, frontend_required=False)
- status: ConnectionStatus = Field(default=ConnectionStatus.ACTIVE, description="Connection status", frontend_type="select", frontend_readonly=False, frontend_required=False, frontend_options=[
+ ]})
+ externalId: str = Field(description="User ID in the external system", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ externalUsername: str = Field(description="Username in the external system", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
+ externalEmail: Optional[EmailStr] = Field(None, description="Email in the external system", json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": False})
+ status: ConnectionStatus = Field(default=ConnectionStatus.ACTIVE, description="Connection status", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
{"value": "active", "label": {"en": "Active", "fr": "Actif"}},
{"value": "inactive", "label": {"en": "Inactive", "fr": "Inactif"}},
{"value": "expired", "label": {"en": "Expired", "fr": "Expiré"}},
{"value": "pending", "label": {"en": "Pending", "fr": "En attente"}},
- ])
- connectedAt: float = Field(default_factory=getUtcTimestamp, description="When the connection was established (UTC timestamp in seconds)", frontend_type="timestamp", frontend_readonly=True, frontend_required=False)
- lastChecked: float = Field(default_factory=getUtcTimestamp, description="When the connection was last verified (UTC timestamp in seconds)", frontend_type="timestamp", frontend_readonly=True, frontend_required=False)
- expiresAt: Optional[float] = Field(None, description="When the connection expires (UTC timestamp in seconds)", frontend_type="timestamp", frontend_readonly=True, frontend_required=False)
- tokenStatus: Optional[str] = Field(None, description="Current token status: active, expired, none", frontend_type="select", frontend_readonly=True, frontend_required=False, frontend_options=[
+ ]})
+ connectedAt: float = Field(default_factory=getUtcTimestamp, description="When the connection was established (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ lastChecked: float = Field(default_factory=getUtcTimestamp, description="When the connection was last verified (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ expiresAt: Optional[float] = Field(None, description="When the connection expires (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ tokenStatus: Optional[str] = Field(None, description="Current token status: active, expired, none", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": [
{"value": "active", "label": {"en": "Active", "fr": "Actif"}},
{"value": "expired", "label": {"en": "Expired", "fr": "Expiré"}},
{"value": "none", "label": {"en": "None", "fr": "Aucun"}},
- ])
- tokenExpiresAt: Optional[float] = Field(None, description="When the current token expires (UTC timestamp in seconds)", frontend_type="timestamp", frontend_readonly=True, frontend_required=False)
+ ]})
+ tokenExpiresAt: Optional[float] = Field(None, description="When the current token expires (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
registerModelLabels(
"UserConnection",
{"en": "User Connection", "fr": "Connexion utilisateur"},
@@ -91,28 +111,28 @@ registerModelLabels(
)
class User(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the user", frontend_type="text", frontend_readonly=True, frontend_required=False)
- username: str = Field(description="Username for login", frontend_type="text", frontend_readonly=False, frontend_required=True)
- email: Optional[EmailStr] = Field(None, description="Email address of the user", frontend_type="email", frontend_readonly=False, frontend_required=True)
- fullName: Optional[str] = Field(None, description="Full name of the user", frontend_type="text", frontend_readonly=False, frontend_required=False)
- language: str = Field(default="en", description="Preferred language of the user", frontend_type="select", frontend_readonly=False, frontend_required=True, frontend_options=[
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the user", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ username: str = Field(description="Username for login", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
+ email: Optional[EmailStr] = Field(None, description="Email address of the user", json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": True})
+ fullName: Optional[str] = Field(None, description="Full name of the user", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
+ language: str = Field(default="en", description="Preferred language of the user", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_options": [
{"value": "de", "label": {"en": "Deutsch", "fr": "Allemand"}},
{"value": "en", "label": {"en": "English", "fr": "Anglais"}},
{"value": "fr", "label": {"en": "Français", "fr": "Français"}},
{"value": "it", "label": {"en": "Italiano", "fr": "Italien"}},
- ])
- enabled: bool = Field(default=True, description="Indicates whether the user is enabled", frontend_type="checkbox", frontend_readonly=False, frontend_required=False)
- privilege: UserPrivilege = Field(default=UserPrivilege.USER, description="Permission level", frontend_type="select", frontend_readonly=False, frontend_required=True, frontend_options=[
+ ]})
+ enabled: bool = Field(default=True, description="Indicates whether the user is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False})
+ privilege: UserPrivilege = Field(default=UserPrivilege.USER, description="Permission level", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_options": [
{"value": "user", "label": {"en": "User", "fr": "Utilisateur"}},
{"value": "admin", "label": {"en": "Admin", "fr": "Administrateur"}},
{"value": "sysadmin", "label": {"en": "SysAdmin", "fr": "Administrateur système"}},
- ])
- authenticationAuthority: AuthAuthority = Field(default=AuthAuthority.LOCAL, description="Primary authentication authority", frontend_type="select", frontend_readonly=True, frontend_required=False, frontend_options=[
+ ]})
+ authenticationAuthority: AuthAuthority = Field(default=AuthAuthority.LOCAL, description="Primary authentication authority", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": [
{"value": "local", "label": {"en": "Local", "fr": "Local"}},
{"value": "google", "label": {"en": "Google", "fr": "Google"}},
{"value": "msft", "label": {"en": "Microsoft", "fr": "Microsoft"}},
- ])
- mandateId: Optional[str] = Field(None, description="ID of the mandate this user belongs to", frontend_type="text", frontend_readonly=True, frontend_required=False)
+ ]})
+ mandateId: Optional[str] = Field(None, description="ID of the mandate this user belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
registerModelLabels(
"User",
{"en": "User", "fr": "Utilisateur"},
diff --git a/modules/datamodels/datamodelUtils.py b/modules/datamodels/datamodelUtils.py
index 67a42534..4f1c69c2 100644
--- a/modules/datamodels/datamodelUtils.py
+++ b/modules/datamodels/datamodelUtils.py
@@ -6,10 +6,10 @@ import uuid
class Prompt(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", frontend_type="text", frontend_readonly=True, frontend_required=False)
- mandateId: str = Field(description="ID of the mandate this prompt belongs to", frontend_type="text", frontend_readonly=True, frontend_required=False)
- content: str = Field(description="Content of the prompt", frontend_type="textarea", frontend_readonly=False, frontend_required=True)
- name: str = Field(description="Name of the prompt", frontend_type="text", frontend_readonly=False, frontend_required=True)
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ mandateId: str = Field(description="ID of the mandate this prompt belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ content: str = Field(description="Content of the prompt", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": True})
+ name: str = Field(description="Name of the prompt", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
registerModelLabels(
"Prompt",
{"en": "Prompt", "fr": "Invite"},
diff --git a/modules/datamodels/datamodelVoice.py b/modules/datamodels/datamodelVoice.py
index 1ab47f15..10e820c6 100644
--- a/modules/datamodels/datamodelVoice.py
+++ b/modules/datamodels/datamodelVoice.py
@@ -7,16 +7,16 @@ import uuid
class VoiceSettings(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", frontend_type="text", frontend_readonly=True, frontend_required=False)
- userId: str = Field(description="ID of the user these settings belong to", frontend_type="text", frontend_readonly=True, frontend_required=True)
- mandateId: str = Field(description="ID of the mandate these settings belong to", frontend_type="text", frontend_readonly=True, frontend_required=True)
- sttLanguage: str = Field(default="de-DE", description="Speech-to-Text language", frontend_type="select", frontend_readonly=False, frontend_required=True)
- ttsLanguage: str = Field(default="de-DE", description="Text-to-Speech language", frontend_type="select", frontend_readonly=False, frontend_required=True)
- ttsVoice: str = Field(default="de-DE-KatjaNeural", description="Text-to-Speech voice", frontend_type="select", frontend_readonly=False, frontend_required=True)
- translationEnabled: bool = Field(default=True, description="Whether translation is enabled", frontend_type="checkbox", frontend_readonly=False, frontend_required=False)
- targetLanguage: str = Field(default="en-US", description="Target language for translation", frontend_type="select", frontend_readonly=False, frontend_required=False)
- creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the settings were created (UTC timestamp in seconds)", frontend_type="timestamp", frontend_readonly=True, frontend_required=False)
- lastModified: float = Field(default_factory=getUtcTimestamp, description="Date when the settings were last modified (UTC timestamp in seconds)", frontend_type="timestamp", frontend_readonly=True, frontend_required=False)
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ userId: str = Field(description="ID of the user these settings belong to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ mandateId: str = Field(description="ID of the mandate these settings belong to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ sttLanguage: str = Field(default="de-DE", description="Speech-to-Text language", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True})
+ ttsLanguage: str = Field(default="de-DE", description="Text-to-Speech language", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True})
+ ttsVoice: str = Field(default="de-DE-KatjaNeural", description="Text-to-Speech voice", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True})
+ translationEnabled: bool = Field(default=True, description="Whether translation is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False})
+ targetLanguage: str = Field(default="en-US", description="Target language for translation", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False})
+ creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the settings were created (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ lastModified: float = Field(default_factory=getUtcTimestamp, description="Date when the settings were last modified (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
registerModelLabels(
diff --git a/modules/datamodels/datamodelWorkflow.py b/modules/datamodels/datamodelWorkflow.py
new file mode 100644
index 00000000..55bee215
--- /dev/null
+++ b/modules/datamodels/datamodelWorkflow.py
@@ -0,0 +1,374 @@
+"""
+Workflow execution models for action definitions, AI responses, and workflow-level structures.
+"""
+
+from typing import Dict, Any, List, Optional, TYPE_CHECKING
+from pydantic import BaseModel, Field
+from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.jsonUtils import extractJsonString, tryParseJson, repairBrokenJson
+
+# Import DocumentReferenceList at runtime (needed for ActionDefinition)
+from modules.datamodels.datamodelDocref import DocumentReferenceList
+
+# Forward references for circular imports (use string annotations)
+if TYPE_CHECKING:
+ from modules.datamodels.datamodelChat import ChatDocument, ActionResult
+ from modules.datamodels.datamodelExtraction import ExtractionOptions
+
+
+class ActionDefinition(BaseModel):
+ """Action definition with selection and parameters from planning phase"""
+
+ # Core action selection (Stage 1)
+ action: str = Field(description="Compound action name (method.action)")
+ actionObjective: str = Field(description="Objective for this action")
+ parametersContext: Optional[str] = Field(
+ None,
+ description="Context for parameter generation"
+ )
+ learnings: List[str] = Field(
+ default_factory=list,
+ description="Learnings from previous actions"
+ )
+
+ # Resources (ALWAYS defined in Stage 1 if action needs them)
+ documentList: Optional[DocumentReferenceList] = Field(
+ None,
+ description="Document references (ALWAYS defined in Stage 1 if action needs documents)"
+ )
+ connectionReference: Optional[str] = Field(
+ None,
+ description="Connection reference (ALWAYS defined in Stage 1 if action needs connection)"
+ )
+
+ # Parameters (may be defined in Stage 1 OR Stage 2, depending on action and actionObjective)
+ parameters: Optional[Dict[str, Any]] = Field(
+ None,
+ description="Action-specific parameters (generated in Stage 2 for complex actions, or inferred from actionObjective for simple actions)"
+ )
+
+ def hasParameters(self) -> bool:
+ """Check if parameters have been generated (Stage 2 complete or inferred)"""
+ return self.parameters is not None
+
+ def needsStage2(self) -> bool:
+ """Determine if Stage 2 parameter generation is needed (generic, deterministic check)
+
+ Generic logic (works for any action, dynamically added or removed):
+ - If parameters are already set → Stage 2 not needed
+ - If parameters are None → Stage 2 needed (to generate parameters from actionObjective and context)
+
+ Note: Stage 1 always defines documentList and connectionReference if the action needs them.
+ Stage 2 only generates the action-specific parameters dictionary.
+ """
+ # Generic check: if parameters are not set, Stage 2 is needed
+ return self.parameters is None
+
+ def updateFromStage1StringReferences(self, stringRefs: Optional[List[str]], connectionRef: Optional[str]):
+ """Update documentList and connectionReference from Stage 1 string references
+
+ Called when Stage 1 AI returns string references that need to be converted to typed models.
+ """
+ if stringRefs:
+ self.documentList = DocumentReferenceList.from_string_list(stringRefs)
+ if connectionRef:
+ self.connectionReference = connectionRef
+
+
+class AiResponseMetadata(BaseModel):
+ """Metadata for AI response (varies by operation type)."""
+
+ # Document Generation Metadata
+ title: Optional[str] = Field(None, description="Document title")
+ filename: Optional[str] = Field(None, description="Document filename")
+
+ # Operation-Specific Metadata
+ operationType: Optional[str] = Field(None, description="Type of operation performed")
+ schemaVersion: Optional[str] = Field(None, description="Schema version (e.g., 'parameters_v1')", alias="schema")
+ extractionMethod: Optional[str] = Field(None, description="Method used for extraction")
+ sourceDocuments: Optional[List[str]] = Field(None, description="Source document references")
+
+ # Additional metadata (for extensibility)
+ additionalData: Optional[Dict[str, Any]] = Field(None, description="Additional operation-specific metadata")
+
+ @classmethod
+ def fromDict(cls, data: Optional[Dict[str, Any]]) -> Optional["AiResponseMetadata"]:
+ """Create AiResponseMetadata from dict with camelCase field names"""
+ if not data:
+ return None
+
+ # Convert snake_case keys to camelCase if needed (for backward compatibility)
+ convertedData = {}
+ for key, value in data.items():
+ # Keep camelCase as-is, convert snake_case if present
+ if '_' in key:
+ # Convert snake_case to camelCase
+ parts = key.split('_')
+ camelKey = parts[0] + ''.join(word.capitalize() for word in parts[1:])
+ convertedData[camelKey] = value
+ else:
+ convertedData[key] = value
+
+ return cls(**convertedData)
+
+
+class DocumentData(BaseModel):
+ """Single document in response"""
+ documentName: str = Field(description="Document name")
+ documentData: Any = Field(description="Document data (can be str, bytes, dict, etc.)")
+ mimeType: str = Field(description="MIME type of the document")
+
+
+class ExtractContentParameters(BaseModel):
+ """Parameters for extraction action.
+
+ This model is defined together with the `methodAi.extractContent()` action function.
+ All action parameter models follow this pattern: defined in the same module as the action.
+ However, since this is a workflow-level model used across the system, it's defined here.
+ """
+ documentList: DocumentReferenceList = Field(description="Document references to extract content from")
+ extractionOptions: Optional[Any] = Field( # ExtractionOptions - forward reference
+ None,
+ description="Extraction options (determined dynamically based on task and document characteristics)"
+ )
+
+
+class AiResponse(BaseModel):
+ """Unified response from all AI calls (planning, text, documents)"""
+
+ content: str = Field(description="Response content (JSON string for planning, text for analysis, unified JSON for documents)")
+ metadata: Optional[AiResponseMetadata] = Field(
+ None,
+ description="Response metadata (varies by operation type)"
+ )
+ documents: Optional[List[DocumentData]] = Field(
+ None,
+ description="Generated documents (only for document generation operations)"
+ )
+
+ def toJson(self) -> Dict[str, Any]:
+ """
+ Convert AI response content to JSON using enhanced stabilizing failsafe conversion methods.
+ Centralizes AI result to JSON conversion in one place.
+
+ Uses methods from jsonUtils:
+ - tryParseJson() - Safe parsing with error handling
+ - repairBrokenJson() - Repairs broken/incomplete JSON
+ - extractJsonString() - Extracts JSON from text with code fences
+
+ Returns:
+ Dict containing the parsed JSON content, or a safe fallback structure if parsing fails.
+ - If content is valid JSON dict: returns the dict directly
+ - If content is valid JSON list: wraps in {"data": [...]}
+ - If content is broken JSON: attempts repair using repairBrokenJson()
+ - If all parsing fails: returns {"content": "...", "parseError": True}
+ """
+ # If content is already a dict, return it directly
+ if isinstance(self.content, dict):
+ return self.content
+
+ # If content is already a list, wrap it
+ if isinstance(self.content, list):
+ return {"data": self.content}
+
+ # Convert to string if needed
+ contentStr = str(self.content) if not isinstance(self.content, str) else self.content
+
+ # First, try to extract JSON from text (handles code fences, etc.)
+ extractedJson = extractJsonString(contentStr)
+
+ # Try to parse as JSON (returns tuple: obj, error, cleaned_str)
+ parsedJson, parseError, _ = tryParseJson(extractedJson)
+
+ if parsedJson is not None and parseError is None:
+ # If it's a dict, return directly
+ if isinstance(parsedJson, dict):
+ return parsedJson
+ # If it's a list, wrap in dict
+ elif isinstance(parsedJson, list):
+ return {"data": parsedJson}
+
+ # Try to repair broken JSON
+ repairedJson = repairBrokenJson(contentStr)
+ if repairedJson:
+ # repairBrokenJson returns Optional[Dict[str, Any]] - always a dict or None
+ if isinstance(repairedJson, dict):
+ return repairedJson
+
+ # All parsing failed - return safe fallback
+ contentStr = str(self.content) if not isinstance(self.content, str) else self.content
+ return {"content": contentStr, "parseError": True}
+
+
+# Workflow-level models
+
+class RequestContext(BaseModel):
+ """Normalized request context from user input"""
+
+ originalPrompt: str = Field(description="Original user prompt")
+ documents: List[Any] = Field( # ChatDocument - forward reference
+ default_factory=list,
+ description="Documents provided by user"
+ )
+ userLanguage: str = Field(description="User's language")
+ detectedComplexity: str = Field(
+ description="Complexity level: simple, moderate, complex"
+ )
+ requiresDocuments: bool = Field(default=False, description="Whether request requires documents")
+ requiresWebResearch: bool = Field(default=False, description="Whether request requires web research")
+ requiresAnalysis: bool = Field(default=False, description="Whether request requires analysis")
+ expectedOutputFormat: Optional[str] = Field(None, description="Expected output format")
+ expectedOutputType: Optional[str] = Field(None, description="Expected output type: answer, document, analysis")
+
+
+class UnderstandingResult(BaseModel):
+ """Result from initial understanding phase (combined AI call)"""
+
+ parameters: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Basic parameters (language, format, detail level)"
+ )
+ intention: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="User intention (primaryGoal, secondaryGoals, intentionType)"
+ )
+ context: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Extracted context (topics, requirements, constraints)"
+ )
+ documentReferences: List[Dict[str, Any]] = Field(
+ default_factory=list,
+ description="Document references with purpose and relevance"
+ )
+ tasks: List["TaskDefinition"] = Field( # Forward reference
+ default_factory=list,
+ description="Task definitions with deliverables"
+ )
+
+
+class TaskDefinition(BaseModel):
+ """Task definition from understanding phase"""
+
+ id: str = Field(description="Task identifier")
+ objective: str = Field(description="Task objective")
+ deliverable: Dict[str, Any] = Field(
+ description="Deliverable specification (type, format, style, detailLevel)"
+ )
+ requiresWebResearch: bool = Field(default=False, description="Whether task requires web research")
+ requiresDocumentAnalysis: bool = Field(default=False, description="Whether task requires document analysis")
+ requiresContentGeneration: bool = Field(default=True, description="Whether task requires content generation")
+ requiredDocuments: List[str] = Field(
+ default_factory=list,
+ description="Document references needed for this task"
+ )
+ extractionOptions: Optional[Any] = Field( # ExtractionOptions - forward reference
+ None,
+ description="Extraction options for document processing (determined dynamically based on task and document characteristics)"
+ )
+
+
+class TaskResult(BaseModel):
+ """Result from task execution"""
+
+ taskId: str = Field(description="Task identifier")
+ actionResult: Any = Field(description="ActionResult from task execution") # ActionResult - forward reference
+
+
+# Register model labels for UI
+registerModelLabels(
+ "ActionDefinition",
+ {"en": "Action Definition", "fr": "Définition d'action"},
+ {
+ "action": {"en": "Action", "fr": "Action"},
+ "actionObjective": {"en": "Action Objective", "fr": "Objectif de l'action"},
+ "parametersContext": {"en": "Parameters Context", "fr": "Contexte des paramètres"},
+ "learnings": {"en": "Learnings", "fr": "Apprentissages"},
+ "documentList": {"en": "Document List", "fr": "Liste de documents"},
+ "connectionReference": {"en": "Connection Reference", "fr": "Référence de connexion"},
+ "parameters": {"en": "Parameters", "fr": "Paramètres"},
+ },
+)
+
+registerModelLabels(
+ "AiResponse",
+ {"en": "AI Response", "fr": "Réponse IA"},
+ {
+ "content": {"en": "Content", "fr": "Contenu"},
+ "metadata": {"en": "Metadata", "fr": "Métadonnées"},
+ "documents": {"en": "Documents", "fr": "Documents"},
+ },
+)
+
+registerModelLabels(
+ "AiResponseMetadata",
+ {"en": "AI Response Metadata", "fr": "Métadonnées de réponse IA"},
+ {
+ "title": {"en": "Title", "fr": "Titre"},
+ "filename": {"en": "Filename", "fr": "Nom de fichier"},
+ "operationType": {"en": "Operation Type", "fr": "Type d'opération"},
+ "schemaVersion": {"en": "Schema Version", "fr": "Version du schéma"},
+ "extractionMethod": {"en": "Extraction Method", "fr": "Méthode d'extraction"},
+ "sourceDocuments": {"en": "Source Documents", "fr": "Documents sources"},
+ },
+)
+
+registerModelLabels(
+ "DocumentData",
+ {"en": "Document Data", "fr": "Données de document"},
+ {
+ "documentName": {"en": "Document Name", "fr": "Nom du document"},
+ "documentData": {"en": "Document Data", "fr": "Données du document"},
+ "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
+ },
+)
+
+registerModelLabels(
+ "RequestContext",
+ {"en": "Request Context", "fr": "Contexte de requête"},
+ {
+ "originalPrompt": {"en": "Original Prompt", "fr": "Invite originale"},
+ "documents": {"en": "Documents", "fr": "Documents"},
+ "userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"},
+ "detectedComplexity": {"en": "Detected Complexity", "fr": "Complexité détectée"},
+ "requiresDocuments": {"en": "Requires Documents", "fr": "Nécessite des documents"},
+ "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
+ "requiresAnalysis": {"en": "Requires Analysis", "fr": "Nécessite une analyse"},
+ },
+)
+
+registerModelLabels(
+ "UnderstandingResult",
+ {"en": "Understanding Result", "fr": "Résultat de compréhension"},
+ {
+ "parameters": {"en": "Parameters", "fr": "Paramètres"},
+ "intention": {"en": "Intention", "fr": "Intention"},
+ "context": {"en": "Context", "fr": "Contexte"},
+ "documentReferences": {"en": "Document References", "fr": "Références de documents"},
+ "tasks": {"en": "Tasks", "fr": "Tâches"},
+ },
+)
+
+registerModelLabels(
+ "TaskDefinition",
+ {"en": "Task Definition", "fr": "Définition de tâche"},
+ {
+ "id": {"en": "ID", "fr": "ID"},
+ "objective": {"en": "Objective", "fr": "Objectif"},
+ "deliverable": {"en": "Deliverable", "fr": "Livrable"},
+ "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
+ "requiresDocumentAnalysis": {"en": "Requires Document Analysis", "fr": "Nécessite une analyse de document"},
+ "requiresContentGeneration": {"en": "Requires Content Generation", "fr": "Nécessite une génération de contenu"},
+ "requiredDocuments": {"en": "Required Documents", "fr": "Documents requis"},
+ "extractionOptions": {"en": "Extraction Options", "fr": "Options d'extraction"},
+ },
+)
+
+registerModelLabels(
+ "TaskResult",
+ {"en": "Task Result", "fr": "Résultat de tâche"},
+ {
+ "taskId": {"en": "Task ID", "fr": "ID de tâche"},
+ "actionResult": {"en": "Action Result", "fr": "Résultat d'action"},
+ },
+)
+
diff --git a/modules/features/chatPlayground/mainChatPlayground.py b/modules/features/chatPlayground/mainChatPlayground.py
index fc148e56..7489608b 100644
--- a/modules/features/chatPlayground/mainChatPlayground.py
+++ b/modules/features/chatPlayground/mainChatPlayground.py
@@ -16,7 +16,7 @@ async def chatStart(currentUser: User, userInput: UserInputRequest, workflowMode
currentUser: Current user
userInput: User input request
workflowId: Optional workflow ID to continue existing workflow
- workflowMode: "Actionplan" for traditional task planning, "Dynamic" for iterative dynamic-style processing, "Template" for template-based processing
+ workflowMode: "Dynamic" for iterative dynamic-style processing, "Automation" for automated workflow execution
Example usage for Dynamic mode:
workflow = await chatStart(currentUser, userInput, workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC)
diff --git a/modules/routes/routeChatPlayground.py b/modules/routes/routeChatPlayground.py
index 8f23f7fd..3307ac7a 100644
--- a/modules/routes/routeChatPlayground.py
+++ b/modules/routes/routeChatPlayground.py
@@ -39,7 +39,7 @@ def getServiceChat(currentUser: User):
async def start_workflow(
request: Request,
workflowId: Optional[str] = Query(None, description="Optional ID of the workflow to continue"),
- workflowMode: WorkflowModeEnum = Query(..., description="Workflow mode: 'Actionplan', 'Dynamic', or 'Template' (mandatory)"),
+ workflowMode: WorkflowModeEnum = Query(..., description="Workflow mode: 'Dynamic' or 'Automation' (mandatory)"),
userInput: UserInputRequest = Body(...),
currentUser: User = Depends(getCurrentUser)
) -> ChatWorkflow:
@@ -48,7 +48,7 @@ async def start_workflow(
Corresponds to State 1 in the state machine documentation.
Args:
- workflowMode: "Actionplan" for traditional task planning, "Dynamic" for iterative dynamic-style processing, "Template" for template-based processing
+ workflowMode: "Dynamic" for iterative dynamic-style processing, "Automation" for automated workflow execution
"""
try:
# Start or continue workflow using playground controller
diff --git a/modules/services/serviceAi/mainServiceAi.py b/modules/services/serviceAi/mainServiceAi.py
index e03b15cd..14218247 100644
--- a/modules/services/serviceAi/mainServiceAi.py
+++ b/modules/services/serviceAi/mainServiceAi.py
@@ -2,16 +2,19 @@ import json
import logging
import re
import time
-from typing import Dict, Any, List, Optional, Tuple, Union
+from typing import Dict, Any, List, Optional, Tuple
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument
from modules.services.serviceExtraction.mainServiceExtraction import ExtractionService
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
+from modules.datamodels.datamodelExtraction import ContentPart
+from modules.datamodels.datamodelWorkflow import AiResponse, AiResponseMetadata, DocumentData
from modules.interfaces.interfaceAiObjects import AiObjects
from modules.shared.jsonUtils import (
extractJsonString,
repairBrokenJson,
extractSectionsFromDocument,
- buildContinuationContext
+ buildContinuationContext,
+ parseJsonWithModel
)
logger = logging.getLogger(__name__)
@@ -138,25 +141,11 @@ Respond with ONLY a JSON object in this exact format:
response = await self.aiObjects.call(request)
- # Parse AI response
+ # Parse AI response using structured parsing with AiCallOptions model
try:
- jsonStart = response.content.find('{')
- jsonEnd = response.content.rfind('}') + 1
- if jsonStart != -1 and jsonEnd > jsonStart:
- analysis = json.loads(response.content[jsonStart:jsonEnd])
-
- # Map string values to enums
- operationType = OperationTypeEnum(analysis.get('operationType', 'dataAnalyse'))
- priority = PriorityEnum(analysis.get('priority', 'balanced'))
- processingMode = ProcessingModeEnum(analysis.get('processingMode', 'basic'))
-
- return AiCallOptions(
- operationType=operationType,
- priority=priority,
- processingMode=processingMode,
- compressPrompt=analysis.get('compressPrompt', True),
- compressContext=analysis.get('compressContext', True)
- )
+ # Use parseJsonWithModel to parse response into AiCallOptions (handles enum conversion automatically)
+ analysis = parseJsonWithModel(response.content, AiCallOptions)
+ return analysis
except Exception as e:
logger.warning(f"Failed to parse AI analysis response: {e}")
@@ -258,12 +247,17 @@ Respond with ONLY a JSON object in this exact format:
else:
self.services.utils.writeDebugFile(result, f"{debugPrefix}_response_iteration_{iteration}")
- # Emit stats for this iteration
- self.services.chat.storeWorkflowStat(
- self.services.workflow,
- response,
- f"ai.call.{debugPrefix}.iteration_{iteration}"
- )
+ # Emit stats for this iteration (only if workflow exists and has id)
+ if self.services.workflow and hasattr(self.services.workflow, 'id') and self.services.workflow.id:
+ try:
+ self.services.chat.storeWorkflowStat(
+ self.services.workflow,
+ response,
+ f"ai.call.{debugPrefix}.iteration_{iteration}"
+ )
+ except Exception as statError:
+ # Don't break the main loop if stat storage fails
+ logger.warning(f"Failed to store workflow stat: {str(statError)}")
if not result or not result.strip():
logger.warning(f"Iteration {iteration}: Empty response, stopping")
@@ -502,7 +496,7 @@ Respond with ONLY a JSON object in this exact format:
Args:
prompt: The planning prompt
placeholders: Optional list of placeholder replacements
- debugType: Optional debug file type identifier (e.g., 'taskplan', 'actionplan', 'intentanalysis')
+ debugType: Optional debug file type identifier (e.g., 'taskplan', 'dynamic', 'intentanalysis')
If not provided, defaults to 'plan'
Returns:
@@ -541,60 +535,83 @@ Respond with ONLY a JSON object in this exact format:
self.services.utils.writeDebugFile(result, f"{debugPrefix}_response")
return result
- # Document Generation AI Call
- async def callAiDocuments(
+ async def callAiContent(
self,
prompt: str,
- documents: Optional[List[ChatDocument]] = None,
- options: Optional[AiCallOptions] = None,
+ options: AiCallOptions,
+ contentParts: Optional[List[ContentPart]] = None,
outputFormat: Optional[str] = None,
- title: Optional[str] = None
- ) -> Union[str, Dict[str, Any]]:
+ title: Optional[str] = None,
+ documents: Optional[List[ChatDocument]] = None # Phase 6: backward compatibility, Phase 7: remove
+ ) -> AiResponse:
"""
- Document generation AI call for all non-planning calls.
- Uses the current unified path with extraction and generation.
+ Unified AI content processing method (replaces callAiDocuments and callAiText).
Args:
prompt: The main prompt for the AI call
- documents: Optional list of documents to process
- options: AI call configuration options
- outputFormat: Optional output format for document generation
+ contentParts: Optional list of already-extracted content parts (preferred)
+ options: AI call configuration options (REQUIRED - operationType must be set)
+ outputFormat: Optional output format for document generation (e.g., 'pdf', 'docx', 'xlsx')
title: Optional title for generated documents
+ documents: Optional list of documents (Phase 6: backward compatibility - extracts internally)
Returns:
- AI response as string, or dict with documents if outputFormat is specified
+ AiResponse with content, metadata, and optional documents
"""
await self._ensureAiObjectsInitialized()
# Create separate operationId for detailed progress tracking
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
- aiOperationId = f"ai_documents_{workflowId}_{int(time.time())}"
+ aiOperationId = f"ai_content_{workflowId}_{int(time.time())}"
- # Start progress tracking for this operation
+ # Start progress tracking
self.services.chat.progressLogStart(
aiOperationId,
- "AI call with documents",
- "Document Generation",
+ "AI content processing",
+ "Content Processing",
f"Format: {outputFormat or 'text'}"
)
try:
- if options is None or (hasattr(options, 'operationType') and options.operationType is None):
- # Use AI to determine parameters ONLY when truly needed (options=None OR operationType=None)
- self.services.chat.progressLogUpdate(aiOperationId, 0.1, "Analyzing prompt parameters")
- options = await self._analyzePromptAndCreateOptions(prompt)
+ # Phase 7: Extraction is now separate - contentParts must be extracted before calling
+ # If documents parameter is still provided (backward compatibility), raise error
+ if documents and len(documents) > 0:
+ raise ValueError(
+ "callAiContent() no longer accepts 'documents' parameter. "
+ "Extract content first using 'ai.extractContent' action, then pass 'contentParts'."
+ )
- # Check operationType FIRST - some operations need direct routing (before document generation checks)
+ # Phase 6: Analyze prompt if operationType not set (backward compatibility)
+ # Phase 7: Require operationType to be set before calling
opType = getattr(options, "operationType", None)
+ if not opType:
+ # If outputFormat is specified, default to DATA_GENERATE
+ if outputFormat:
+ options.operationType = OperationTypeEnum.DATA_GENERATE
+ opType = OperationTypeEnum.DATA_GENERATE
+ else:
+ self.services.chat.progressLogUpdate(aiOperationId, 0.1, "Analyzing prompt parameters")
+ analyzedOptions = await self._analyzePromptAndCreateOptions(prompt)
+ if analyzedOptions and hasattr(analyzedOptions, "operationType") and analyzedOptions.operationType:
+ options.operationType = analyzedOptions.operationType
+ # Merge other analyzed options
+ if hasattr(analyzedOptions, "priority"):
+ options.priority = analyzedOptions.priority
+ if hasattr(analyzedOptions, "processingMode"):
+ options.processingMode = analyzedOptions.processingMode
+ if hasattr(analyzedOptions, "compressPrompt"):
+ options.compressPrompt = analyzedOptions.compressPrompt
+ if hasattr(analyzedOptions, "compressContext"):
+ options.compressContext = analyzedOptions.compressContext
+ else:
+ # Default to DATA_ANALYSE if analysis fails
+ options.operationType = OperationTypeEnum.DATA_ANALYSE
+ opType = options.operationType
- # Handle image generation requests directly via generic path
- isImageRequest = (opType == OperationTypeEnum.IMAGE_GENERATE)
-
- if isImageRequest:
- # Image generation uses generic call path but bypasses document generation pipeline
+ # Handle IMAGE_GENERATE operations
+ if opType == OperationTypeEnum.IMAGE_GENERATE:
self.services.chat.progressLogUpdate(aiOperationId, 0.4, "Calling AI for image generation")
- # Call via generic path (no looping for images)
request = AiCallRequest(
prompt=prompt,
context="",
@@ -603,62 +620,56 @@ Respond with ONLY a JSON object in this exact format:
response = await self.aiObjects.call(request)
- # Extract image data from response
if response.content:
- # For base64 format, return in expected format
- if outputFormat == "base64":
- result = {
- "success": True,
- "image_data": response.content,
- "documents": [{
- "documentName": "generated_image.png",
- "documentData": response.content,
- "mimeType": "image/png",
- "title": title or "Generated Image"
- }]
- }
- else:
- # Return raw content for other formats
- result = response.content
+ # Build document data for image
+ imageDoc = DocumentData(
+ documentName="generated_image.png",
+ documentData=response.content,
+ mimeType="image/png"
+ )
+
+ metadata = AiResponseMetadata(
+ title=title or "Generated Image",
+ operationType=opType.value
+ )
- # Emit stats for image generation
self.services.chat.storeWorkflowStat(
self.services.workflow,
response,
- f"ai.generate.image"
+ "ai.generate.image"
)
self.services.chat.progressLogUpdate(aiOperationId, 0.9, "Image generated")
self.services.chat.progressLogFinish(aiOperationId, True)
- return result
+
+ return AiResponse(
+ content=response.content,
+ metadata=metadata,
+ documents=[imageDoc]
+ )
else:
errorMsg = f"No image data returned: {response.content}"
logger.error(f"Error in AI image generation: {errorMsg}")
self.services.chat.progressLogFinish(aiOperationId, False)
- return {"success": False, "error": errorMsg}
+ raise ValueError(errorMsg)
- # Handle WEB_SEARCH and WEB_CRAWL operations - route directly to connectors
- # These operations require raw JSON prompts that connectors parse directly
- # Must check BEFORE document generation to avoid wrapping the prompt
- isWebOperation = (opType == OperationTypeEnum.WEB_SEARCH or opType == OperationTypeEnum.WEB_CRAWL)
-
- if isWebOperation:
- # Web operations: prompt is already structured JSON (AiCallPromptWebSearch/WebCrawl)
- # Route directly through centralized AI call - model selector chooses appropriate connector
- # Connector parses the JSON prompt and executes the operation
+ # Handle WEB_SEARCH and WEB_CRAWL operations
+ if opType == OperationTypeEnum.WEB_SEARCH or opType == OperationTypeEnum.WEB_CRAWL:
self.services.chat.progressLogUpdate(aiOperationId, 0.4, f"Calling AI for {opType.name}")
request = AiCallRequest(
- prompt=prompt, # Pass raw JSON prompt unchanged - connector will parse it
+ prompt=prompt, # Raw JSON prompt - connector will parse it
context="",
options=options
)
response = await self.aiObjects.call(request)
- # Extract result from response
if response.content:
- # Emit stats for web operation
+ metadata = AiResponseMetadata(
+ operationType=opType.value
+ )
+
self.services.chat.storeWorkflowStat(
self.services.workflow,
response,
@@ -667,42 +678,42 @@ Respond with ONLY a JSON object in this exact format:
self.services.chat.progressLogUpdate(aiOperationId, 0.9, f"{opType.name} completed")
self.services.chat.progressLogFinish(aiOperationId, True)
- return response.content
+
+ return AiResponse(
+ content=response.content,
+ metadata=metadata
+ )
else:
errorMsg = f"No content returned from {opType.name}: {response.content}"
logger.error(f"Error in {opType.name}: {errorMsg}")
self.services.chat.progressLogFinish(aiOperationId, False)
- return {"success": False, "error": errorMsg}
+ raise ValueError(errorMsg)
- # CRITICAL: For document generation with JSON templates, NEVER compress the prompt
- # Compressing would truncate the template structure and confuse the AI
- if outputFormat: # Document generation with structured output
- if not options:
- options = AiCallOptions()
- options.compressPrompt = False # JSON templates must NOT be truncated
- options.compressContext = False # Context also should not be compressed
-
- # Handle document generation with specific output format using unified approach
+ # Handle document generation (outputFormat specified)
if outputFormat:
- # Use unified generation method for all document generation
- if documents and len(documents) > 0:
- self.services.chat.progressLogUpdate(aiOperationId, 0.2, f"Extracting content from {len(documents)} documents")
- extracted_content = await self.callAiText(prompt, documents, options, aiOperationId)
+ # CRITICAL: For document generation with JSON templates, NEVER compress the prompt
+ options.compressPrompt = False
+ options.compressContext = False
+
+ # Convert contentParts to text for generation prompt (if provided)
+ if contentParts:
+ # Convert contentParts to text for generation prompt
+ content_for_generation = "\n\n".join([f"[{part.label}]\n{part.data}" for part in contentParts if part.data])
else:
- self.services.chat.progressLogUpdate(aiOperationId, 0.2, "Preparing for direct generation")
- extracted_content = None
+ content_for_generation = None
self.services.chat.progressLogUpdate(aiOperationId, 0.3, "Building generation prompt")
from modules.services.serviceGeneration.subPromptBuilderGeneration import buildGenerationPrompt
- # First call without continuation context
- generation_prompt = await buildGenerationPrompt(outputFormat, prompt, title, extracted_content, None)
- # Prepare prompt builder arguments for continuation
+ generation_prompt = await buildGenerationPrompt(
+ outputFormat, prompt, title, content_for_generation, None
+ )
+
promptArgs = {
"outputFormat": outputFormat,
"userPrompt": prompt,
"title": title,
- "extracted_content": extracted_content
+ "extracted_content": content_for_generation
}
self.services.chat.progressLogUpdate(aiOperationId, 0.4, "Calling AI for content generation")
@@ -714,64 +725,51 @@ Respond with ONLY a JSON object in this exact format:
promptArgs,
aiOperationId
)
-
+
self.services.chat.progressLogUpdate(aiOperationId, 0.7, "Parsing generated JSON")
- # Parse the generated JSON (extract fenced/embedded JSON first)
try:
extracted_json = self.services.utils.jsonExtractString(generated_json)
generated_data = json.loads(extracted_json)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse generated JSON: {str(e)}")
- logger.error(f"JSON content length: {len(generated_json)}")
- logger.error(f"JSON content preview (last 200 chars): ...{generated_json[-200:]}")
- logger.error(f"JSON content around error position: {generated_json[max(0, e.pos-50):e.pos+50]}")
-
- # Write the problematic JSON to debug file
self.services.utils.writeDebugFile(generated_json, "failed_json_parsing")
-
self.services.chat.progressLogFinish(aiOperationId, False)
- return {"success": False, "error": f"Generated content is not valid JSON: {str(e)}"}
+ raise ValueError(f"Generated content is not valid JSON: {str(e)}")
# Extract title and filename from generated document structure
- extractedTitle = title # Default to user-provided title
+ extractedTitle = title
extractedFilename = None
if isinstance(generated_data, dict) and "documents" in generated_data:
- documents = generated_data["documents"]
- if isinstance(documents, list) and len(documents) > 0:
- firstDoc = documents[0]
+ docs = generated_data["documents"]
+ if isinstance(docs, list) and len(docs) > 0:
+ firstDoc = docs[0]
if isinstance(firstDoc, dict):
- # Extract title from document (preferred over user-provided title)
if firstDoc.get("title"):
extractedTitle = firstDoc["title"]
- # Extract filename from document
if firstDoc.get("filename"):
extractedFilename = firstDoc["filename"]
- # Ensure metadata contains the extracted title for renderers
+ # Ensure metadata contains the extracted title
if "metadata" not in generated_data:
generated_data["metadata"] = {}
if extractedTitle:
generated_data["metadata"]["title"] = extractedTitle
self.services.chat.progressLogUpdate(aiOperationId, 0.8, f"Rendering to {outputFormat} format")
- # Render to final format using the existing renderer
try:
from modules.services.serviceGeneration.mainServiceGeneration import GenerationService
generationService = GenerationService(self.services)
- # Pass extracted title to renderer (will use metadata.title if available)
rendered_content, mime_type = await generationService.renderReport(
generated_data, outputFormat, extractedTitle or "Generated Document", prompt, self
)
- # Use extracted filename if available, otherwise generate from title or use generic
+ # Determine document name
if extractedFilename:
documentName = extractedFilename
elif extractedTitle and extractedTitle != "Generated Document":
- # Sanitize title for filename
sanitized = re.sub(r"[^a-zA-Z0-9._-]", "_", extractedTitle)
sanitized = re.sub(r"_+", "_", sanitized).strip("_")
if sanitized:
- # Ensure correct extension
if not sanitized.lower().endswith(f".{outputFormat}"):
documentName = f"{sanitized}.{outputFormat}"
else:
@@ -781,63 +779,68 @@ Respond with ONLY a JSON object in this exact format:
else:
documentName = f"generated.{outputFormat}"
- # Build result in the expected format
- result = {
- "success": True,
- "content": generated_data,
- "documents": [{
- "documentName": documentName,
- "documentData": rendered_content,
- "mimeType": mime_type,
- "title": extractedTitle or "Generated Document"
- }],
- "is_multi_file": False,
- "format": outputFormat,
- "title": extractedTitle or title,
- "split_strategy": "single",
- "total_documents": 1,
- "processed_documents": 1
- }
+ # Build document data
+ docData = DocumentData(
+ documentName=documentName,
+ documentData=rendered_content,
+ mimeType=mime_type
+ )
- # Log AI response for debugging
- self.services.utils.writeDebugFile(str(result), "document_generation_response", documents)
+ metadata = AiResponseMetadata(
+ title=extractedTitle or title or "Generated Document",
+ filename=extractedFilename,
+ operationType=opType.value if opType else None
+ )
+ self.services.utils.writeDebugFile(str(generated_data), "document_generation_response")
self.services.chat.progressLogFinish(aiOperationId, True)
- return result
-
+
+ return AiResponse(
+ content=json.dumps(generated_data),
+ metadata=metadata,
+ documents=[docData]
+ )
+
except Exception as e:
logger.error(f"Error rendering document: {str(e)}")
self.services.chat.progressLogFinish(aiOperationId, False)
- return {"success": False, "error": f"Rendering failed: {str(e)}"}
+ raise ValueError(f"Rendering failed: {str(e)}")
- # Handle text calls (no output format specified)
+ # Handle text processing (no outputFormat)
self.services.chat.progressLogUpdate(aiOperationId, 0.5, "Processing text call")
- if documents:
- # Use document processing for text calls with documents
- result = await self.callAiText(prompt, documents, options, aiOperationId)
+
+ if contentParts:
+ # Process contentParts through AI
+ # Convert contentParts to text for prompt
+ contentText = "\n\n".join([f"[{part.label}]\n{part.data}" for part in contentParts if part.data])
+ fullPrompt = f"{prompt}\n\n{contentText}" if contentText else prompt
+ result_content = await self._callAiWithLooping(
+ fullPrompt, options, "text", None, None, aiOperationId
+ )
else:
- # Use shared core function for direct text calls
- result = await self._callAiWithLooping(prompt, options, "text", None, None, aiOperationId)
+ # Direct text call (no documents to process)
+ result_content = await self._callAiWithLooping(
+ prompt, options, "text", None, None, aiOperationId
+ )
+
+ metadata = AiResponseMetadata(
+ operationType=opType.value if opType else None
+ )
self.services.chat.progressLogFinish(aiOperationId, True)
- return result
+
+ return AiResponse(
+ content=result_content,
+ metadata=metadata
+ )
except Exception as e:
- logger.error(f"Error in callAiDocuments: {str(e)}")
+ logger.error(f"Error in callAiContent: {str(e)}")
self.services.chat.progressLogFinish(aiOperationId, False)
raise
- async def callAiText(
- self,
- prompt: str,
- documents: Optional[List[ChatDocument]],
- options: AiCallOptions,
- operationId: Optional[str] = None
- ) -> str:
- """
- Handle text calls with document processing through ExtractionService.
- UNIFIED PROCESSING: Always use per-chunk processing for consistency.
- """
- await self._ensureAiObjectsInitialized()
- return await self.extractionService.processDocumentsPerChunk(documents, prompt, self.aiObjects, options, operationId)
+ # DEPRECATED METHODS REMOVED:
+ # - callAiDocuments() - replaced by callAiContent()
+ # - callAiText() - replaced by callAiContent()
+ # All call sites have been updated to use callAiContent()
diff --git a/modules/services/serviceChat/mainServiceChat.py b/modules/services/serviceChat/mainServiceChat.py
index a2c80a08..bab544ca 100644
--- a/modules/services/serviceChat/mainServiceChat.py
+++ b/modules/services/serviceChat/mainServiceChat.py
@@ -20,8 +20,24 @@ class ChatService:
self.interfaceDbApp = serviceCenter.interfaceDbApp
self._progressLogger = None
- def getChatDocumentsFromDocumentList(self, documentList: List[str]) -> List[ChatDocument]:
- """Get ChatDocuments from a list of document references using all three formats."""
+ def getChatDocumentsFromDocumentList(self, documentList) -> List[ChatDocument]:
+ """Get ChatDocuments from a DocumentReferenceList.
+
+ Args:
+ documentList: DocumentReferenceList (required)
+
+ Returns:
+ List[ChatDocument]: List of ChatDocument objects
+ """
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+
+ if not isinstance(documentList, DocumentReferenceList):
+ logger.error(f"getChatDocumentsFromDocumentList: Invalid documentList type: {type(documentList)}. Expected DocumentReferenceList.")
+ return []
+
+ # Convert to string list for processing
+ stringRefs = documentList.to_string_list()
+
try:
# Use self.services.workflow which is the ChatWorkflow object (stable during workflow execution)
workflow = self.services.workflow
@@ -31,7 +47,7 @@ class ChatService:
workflowId = workflow.id if hasattr(workflow, 'id') else 'NO_ID'
workflowObjId = id(workflow)
- logger.debug(f"getChatDocumentsFromDocumentList: input documentList = {documentList}")
+ logger.debug(f"getChatDocumentsFromDocumentList: input documentList = {stringRefs}")
logger.debug(f"getChatDocumentsFromDocumentList: using workflow.id = {workflowId}, workflow object id = {workflowObjId}")
# Root cause analysis: Verify workflow.messages integrity and detect workflow changes
@@ -72,7 +88,7 @@ class ChatService:
logger.debug(f"getChatDocumentsFromDocumentList: unable to enumerate messages for debug: {e}")
allDocuments = []
- for docRef in documentList:
+ for docRef in stringRefs:
if docRef.startswith("docItem:"):
# docItem:: - extract ID and find document
parts = docRef.split(':')
diff --git a/modules/services/serviceExtraction/mainServiceExtraction.py b/modules/services/serviceExtraction/mainServiceExtraction.py
index 4cc7702d..126c7ffd 100644
--- a/modules/services/serviceExtraction/mainServiceExtraction.py
+++ b/modules/services/serviceExtraction/mainServiceExtraction.py
@@ -8,15 +8,12 @@ from .subRegistry import ExtractorRegistry, ChunkerRegistry
from .subPipeline import runExtraction
from modules.datamodels.datamodelExtraction import ContentExtracted, ContentPart, MergeStrategy, ExtractionOptions, PartResult
from modules.datamodels.datamodelChat import ChatDocument
-from modules.datamodels.datamodelAi import AiCallResponse, AiCallRequest, AiCallOptions, OperationTypeEnum
+from modules.datamodels.datamodelAi import AiCallResponse, AiCallRequest, AiCallOptions
from modules.aicore.aicoreModelRegistry import modelRegistry
logger = logging.getLogger(__name__)
-# Rebuild ExtractionOptions to resolve forward references after all imports are complete
-ExtractionOptions.model_rebuild()
-
class ExtractionService:
def __init__(self, services: Optional[Any] = None):
@@ -443,12 +440,11 @@ class ExtractionService:
extractionOptions = ExtractionOptions(
prompt=prompt,
- operationType=options.operationType if options else OperationTypeEnum.DATA_EXTRACT,
processDocumentsIndividually=True,
mergeStrategy=mergeStrategy
)
- logger.debug(f"Per-chunk extraction options: prompt length={len(extractionOptions.prompt)} chars, operationType={extractionOptions.operationType}")
+ logger.debug(f"Per-chunk extraction options: prompt length={len(extractionOptions.prompt)} chars")
# Extract content WITHOUT chunking
if operationId:
diff --git a/modules/services/serviceGeneration/renderers/rendererImage.py b/modules/services/serviceGeneration/renderers/rendererImage.py
index 71ef41b1..ca51a73a 100644
--- a/modules/services/serviceGeneration/renderers/rendererImage.py
+++ b/modules/services/serviceGeneration/renderers/rendererImage.py
@@ -73,46 +73,34 @@ class RendererImage(BaseRenderer):
)
promptJson = promptModel.model_dump_json(exclude_none=True, indent=2)
- # Use generic path via callAiDocuments
+ # Use unified callAiContent method
options = AiCallOptions(
operationType=OperationTypeEnum.IMAGE_GENERATE,
resultFormat="base64"
)
- # Call via generic path
- imageResult = await aiService.callAiDocuments(
+ # Use unified callAiContent method
+ imageResponse = await aiService.callAiContent(
prompt=promptJson,
- documents=None,
options=options,
outputFormat="base64"
)
# Save image generation response to debug
- aiService.services.utils.writeDebugFile(str(imageResult), "image_generation_response")
+ aiService.services.utils.writeDebugFile(str(imageResponse.content), "image_generation_response")
- # Extract base64 image data from result
- # The generic path returns a dict with documents array for base64 format
- if isinstance(imageResult, dict):
- if imageResult.get("success", False):
- # Check if it's the new format with documents array
- documents = imageResult.get("documents", [])
- if documents and len(documents) > 0:
- imageData = documents[0].get("documentData", "")
- if imageData:
- return imageData
- # Fallback: check for image_data field
- imageData = imageResult.get("image_data", "")
- if imageData:
- return imageData
- raise ValueError("No image data returned from AI")
- else:
- errorMsg = imageResult.get("error", "Unknown error")
- raise ValueError(f"AI image generation failed: {errorMsg}")
- elif isinstance(imageResult, str):
- # If it's just a string, it might be base64 data directly
- return imageResult
- else:
- raise ValueError(f"Unexpected image generation result format: {type(imageResult)}")
+ # Extract base64 image data from AiResponse
+ # AiResponse.documents contains DocumentData objects
+ if imageResponse.documents and len(imageResponse.documents) > 0:
+ imageData = imageResponse.documents[0].documentData
+ if imageData:
+ return imageData
+
+ # Fallback: check content field (might be base64 string)
+ if imageResponse.content:
+ return imageResponse.content
+
+ raise ValueError("No image data returned from AI")
except Exception as e:
self.logger.error(f"Error generating AI image: {str(e)}")
diff --git a/modules/services/serviceWeb/mainServiceWeb.py b/modules/services/serviceWeb/mainServiceWeb.py
index be7609e8..3772e12e 100644
--- a/modules/services/serviceWeb/mainServiceWeb.py
+++ b/modules/services/serviceWeb/mainServiceWeb.py
@@ -234,13 +234,16 @@ Return ONLY valid JSON, no additional text:
resultFormat="json"
)
- searchResult = await self.services.ai.callAiDocuments(
+ # Use unified callAiContent method
+ searchResponse = await self.services.ai.callAiContent(
prompt=searchPrompt,
- documents=None,
options=searchOptions,
outputFormat="json"
)
+ # Extract content from AiResponse
+ searchResult = searchResponse.content
+
# Debug: persist search response
if isinstance(searchResult, str):
self.services.utils.writeDebugFile(searchResult, "websearch_response")
@@ -312,13 +315,16 @@ Return ONLY valid JSON, no additional text:
resultFormat="json"
)
- crawlResult = await self.services.ai.callAiDocuments(
+ # Use unified callAiContent method
+ crawlResponse = await self.services.ai.callAiContent(
prompt=crawlPrompt,
- documents=None,
options=crawlOptions,
outputFormat="json"
)
+ # Extract content from AiResponse
+ crawlResult = crawlResponse.content
+
# Debug: persist crawl response
if isinstance(crawlResult, str):
self.services.utils.writeDebugFile(crawlResult, "webcrawl_response")
diff --git a/modules/shared/jsonUtils.py b/modules/shared/jsonUtils.py
index 71b807b4..fe3dcc2d 100644
--- a/modules/shared/jsonUtils.py
+++ b/modules/shared/jsonUtils.py
@@ -1,9 +1,12 @@
import json
import logging
-from typing import Any, Dict, List, Optional, Tuple, Union
+from typing import Any, Dict, List, Optional, Tuple, Union, Type, TypeVar
+from pydantic import BaseModel, ValidationError
logger = logging.getLogger(__name__)
+T = TypeVar('T', bound=BaseModel)
+
def stripCodeFences(text: str) -> str:
"""Remove ```json / ``` fences and surrounding whitespace if present."""
@@ -886,3 +889,79 @@ def buildContinuationContext(allSections: List[Dict[str, Any]], lastRawResponse:
return context
+
+def parseJsonWithModel(jsonString: str, modelClass: Type[T]) -> T:
+ """
+ Parse JSON string using Pydantic model with error handling.
+
+ Uses existing jsonUtils methods:
+ - extractJsonString() - Extracts JSON from text with code fences
+ - tryParseJson() - Safe parsing with error handling
+ - repairBrokenJson() - Repairs broken/incomplete JSON
+
+ Args:
+ jsonString: JSON string to parse (may contain code fences, extra text, etc.)
+ modelClass: Pydantic model class to parse into
+
+ Returns:
+ Parsed Pydantic model instance
+
+ Raises:
+ ValueError: If JSON cannot be parsed or validated
+ """
+ if not jsonString:
+ raise ValueError(f"Cannot parse empty JSON string for {modelClass.__name__}")
+
+ # Step 1: Extract JSON string (handles code fences, extra text)
+ extractedJson = extractJsonString(jsonString)
+
+ if not extractedJson or extractedJson.strip() == "":
+ raise ValueError(f"No JSON found in string for {modelClass.__name__}")
+
+ # Step 2: Try to parse as JSON
+ parsedJson, error, cleaned = tryParseJson(extractedJson)
+
+ if error is None and parsedJson is not None:
+ # Successfully parsed - try to create model
+ try:
+ if isinstance(parsedJson, dict):
+ return modelClass(**parsedJson)
+ elif isinstance(parsedJson, list):
+ # If model expects a list, try to parse first item
+ if parsedJson:
+ return modelClass(**parsedJson[0])
+ else:
+ raise ValueError(f"Empty list cannot be parsed as {modelClass.__name__}")
+ else:
+ raise ValueError(f"Parsed JSON is not a dict or list: {type(parsedJson)}")
+ except ValidationError as e:
+ logger.error(f"Validation error parsing {modelClass.__name__}: {e}")
+ raise ValueError(f"Invalid data for {modelClass.__name__}: {e}")
+ except Exception as e:
+ logger.error(f"Error creating {modelClass.__name__} instance: {e}")
+ raise ValueError(f"Failed to create {modelClass.__name__} instance: {e}")
+
+ # Step 3: Try to repair broken JSON
+ logger.warning(f"Initial JSON parsing failed, attempting repair for {modelClass.__name__}")
+ repairedJson = repairBrokenJson(extractedJson)
+
+ if repairedJson:
+ # Try parsing repaired JSON
+ parsedRepaired, errorRepaired, _ = tryParseJson(json.dumps(repairedJson))
+
+ if errorRepaired is None and parsedRepaired is not None:
+ try:
+ if isinstance(parsedRepaired, dict):
+ return modelClass(**parsedRepaired)
+ elif isinstance(parsedRepaired, list) and parsedRepaired:
+ return modelClass(**parsedRepaired[0])
+ except ValidationError as e:
+ logger.error(f"Validation error parsing repaired {modelClass.__name__}: {e}")
+ raise ValueError(f"Invalid repaired data for {modelClass.__name__}: {e}")
+ except Exception as e:
+ logger.error(f"Error creating {modelClass.__name__} from repaired JSON: {e}")
+
+ # Step 4: All parsing failed
+ logger.error(f"Failed to parse JSON for {modelClass.__name__}. Cleaned JSON preview: {cleaned[:200]}...")
+ raise ValueError(f"Failed to parse or validate JSON for {modelClass.__name__}. JSON may be malformed or incomplete.")
+
diff --git a/modules/workflows/methods/methodAi.py b/modules/workflows/methods/methodAi.py
index b1fd7cb6..ae9b7c98 100644
--- a/modules/workflows/methods/methodAi.py
+++ b/modules/workflows/methods/methodAi.py
@@ -9,8 +9,10 @@ from typing import Dict, Any, List, Optional
from datetime import datetime, UTC
from modules.workflows.methods.methodBase import MethodBase, action
-from modules.datamodels.datamodelChat import ActionResult
-from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, AiCallPromptImage
+from modules.datamodels.datamodelChat import ActionResult, ActionDocument
+from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
+from modules.datamodels.datamodelWorkflow import ExtractContentParameters
+from modules.datamodels.datamodelExtraction import ExtractionOptions, MergeStrategy, ContentPart
logger = logging.getLogger(__name__)
@@ -60,9 +62,22 @@ class MethodAi(MethodBase):
# Update progress - preparing parameters
self.services.chat.progressLogUpdate(operationId, 0.2, "Preparing parameters")
- documentList = parameters.get("documentList", [])
- if isinstance(documentList, str):
- documentList = [documentList]
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+
+ documentListParam = parameters.get("documentList")
+ # Convert to DocumentReferenceList if needed
+ if documentListParam is None:
+ documentList = DocumentReferenceList(references=[])
+ elif isinstance(documentListParam, DocumentReferenceList):
+ documentList = documentListParam
+ elif isinstance(documentListParam, str):
+ documentList = DocumentReferenceList.from_string_list([documentListParam])
+ elif isinstance(documentListParam, list):
+ documentList = DocumentReferenceList.from_string_list(documentListParam)
+ else:
+ logger.error(f"Invalid documentList type: {type(documentListParam)}")
+ documentList = DocumentReferenceList(references=[])
+
resultType = parameters.get("resultType", "txt")
@@ -78,15 +93,53 @@ class MethodAi(MethodBase):
output_mime_type = "application/octet-stream" # Prefer service-provided mimeType when available
logger.info(f"Using result type: {resultType} -> {output_extension}")
- # Update progress - preparing documents
- self.services.chat.progressLogUpdate(operationId, 0.3, "Preparing documents")
+ # Phase 7.3: Extract content first if documents provided, then use contentParts
+ # Check if contentParts are already provided (preferred path)
+ contentParts: Optional[List[ContentPart]] = None
+ if "contentParts" in parameters:
+ contentParts = parameters.get("contentParts")
+ if contentParts and not isinstance(contentParts, list):
+ # Try to extract from ContentExtracted if it's an ActionDocument
+ if hasattr(contentParts, 'parts'):
+ contentParts = contentParts.parts
+ else:
+ logger.warning(f"Invalid contentParts type: {type(contentParts)}, treating as empty")
+ contentParts = None
- # Get ChatDocuments for AI service - let AI service handle all document processing
- chatDocuments = []
- if documentList:
+ # If contentParts not provided but documentList is, extract content first
+ if not contentParts and documentList.references:
+ self.services.chat.progressLogUpdate(operationId, 0.3, "Extracting content from documents")
+
+ # Get ChatDocuments
chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(documentList)
- if chatDocuments:
- logger.info(f"Prepared {len(chatDocuments)} documents for AI processing")
+ if not chatDocuments:
+ logger.warning("No documents found in documentList")
+ else:
+ logger.info(f"Extracting content from {len(chatDocuments)} documents")
+
+ # Prepare extraction options (use defaults if not provided)
+ extractionOptions = parameters.get("extractionOptions")
+ if not extractionOptions:
+ extractionOptions = ExtractionOptions(
+ prompt="Extract all content from the document",
+ mergeStrategy=MergeStrategy(
+ mergeType="concatenate",
+ groupBy="typeGroup",
+ orderBy="id"
+ ),
+ processDocumentsIndividually=True
+ )
+
+ # Extract content using extraction service
+ extractedResults = self.services.extraction.extractContent(chatDocuments, extractionOptions)
+
+ # Combine all ContentParts from all extracted results
+ contentParts = []
+ for extracted in extractedResults:
+ if extracted.parts:
+ contentParts.extend(extracted.parts)
+
+ logger.info(f"Extracted {len(contentParts)} content parts from {len(extractedResults)} documents")
# Update progress - preparing AI call
self.services.chat.progressLogUpdate(operationId, 0.4, "Preparing AI call")
@@ -101,10 +154,11 @@ class MethodAi(MethodBase):
# Update progress - calling AI
self.services.chat.progressLogUpdate(operationId, 0.6, "Calling AI")
- result = await self.services.ai.callAiDocuments(
+ # Use unified callAiContent method with contentParts (extraction is now separate)
+ aiResponse = await self.services.ai.callAiContent(
prompt=aiPrompt,
- documents=chatDocuments if chatDocuments else None,
options=options,
+ contentParts=contentParts, # Already extracted (or None if no documents)
outputFormat=output_format
)
@@ -113,26 +167,33 @@ class MethodAi(MethodBase):
from modules.datamodels.datamodelChat import ActionDocument
- if isinstance(result, dict) and isinstance(result.get("documents"), list):
+ # Extract documents from AiResponse
+ if aiResponse.documents and len(aiResponse.documents) > 0:
action_documents = []
- for d in result["documents"]:
+ for doc in aiResponse.documents:
action_documents.append(ActionDocument(
- documentName=d.get("documentName"),
- documentData=d.get("documentData"),
- mimeType=d.get("mimeType") or output_mime_type
+ documentName=doc.documentName,
+ documentData=doc.documentData,
+ mimeType=doc.mimeType or output_mime_type
))
# Preserve structured content field for validation (if it exists)
- # This allows validator to see the actual structured data, not just rendered output
- if "content" in result and result["content"] and isinstance(result["content"], (dict, list)):
- action_documents.append(ActionDocument(
- documentName="structured_content.json",
- documentData=result["content"],
- mimeType="application/json"
- ))
+ # Parse content JSON to check if it's structured data
+ try:
+ import json
+ contentData = json.loads(aiResponse.content) if isinstance(aiResponse.content, str) else aiResponse.content
+ if isinstance(contentData, (dict, list)):
+ action_documents.append(ActionDocument(
+ documentName="structured_content.json",
+ documentData=contentData,
+ mimeType="application/json"
+ ))
+ except:
+ pass # Content is not JSON, skip structured content
final_documents = action_documents
else:
+ # Text response - create document from content
extension = output_extension.lstrip('.')
meaningful_name = self._generateMeaningfulFileName(
base_name="ai",
@@ -141,7 +202,7 @@ class MethodAi(MethodBase):
)
action_document = ActionDocument(
documentName=meaningful_name,
- documentData=result,
+ documentData=aiResponse.content,
mimeType=output_mime_type
)
final_documents = [action_document]
@@ -165,6 +226,94 @@ class MethodAi(MethodBase):
)
+ @action
+ async def extractContent(self, parameters: ExtractContentParameters) -> ActionResult:
+ """
+ Extract content from documents (separate from AI calls).
+
+ This action performs pure content extraction without AI processing.
+ The extracted ContentParts can then be used by subsequent AI processing actions.
+
+ Parameters:
+ - documentList: DocumentReferenceList - Document references to extract content from
+ - extractionOptions: Optional[ExtractionOptions] - Extraction options (if not provided, defaults are used)
+
+ Returns:
+ - ActionResult with ActionDocument containing ContentExtracted objects
+ - ContentExtracted.parts contains List[ContentPart] (already chunked if needed)
+ """
+ try:
+ # Init progress logger
+ workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
+ operationId = f"ai_extract_{workflowId}_{int(time.time())}"
+
+ # Start progress tracking
+ self.services.chat.progressLogStart(
+ operationId,
+ "Extracting content from documents",
+ "Content Extraction",
+ f"Documents: {len(parameters.documentList.references) if parameters.documentList else 0}"
+ )
+
+ # Get ChatDocuments from documentList
+ self.services.chat.progressLogUpdate(operationId, 0.2, "Loading documents")
+ chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(parameters.documentList)
+
+ if not chatDocuments:
+ self.services.chat.progressLogFinish(operationId, False)
+ return ActionResult.isFailure(error="No documents found in documentList")
+
+ logger.info(f"Extracting content from {len(chatDocuments)} documents")
+
+ # Prepare extraction options
+ self.services.chat.progressLogUpdate(operationId, 0.3, "Preparing extraction options")
+ extractionOptions = parameters.extractionOptions
+
+ # If extractionOptions not provided, create defaults
+ if not extractionOptions:
+ # Default extraction options for pure content extraction (no AI processing)
+ extractionOptions = ExtractionOptions(
+ prompt="Extract all content from the document",
+ mergeStrategy=MergeStrategy(
+ mergeType="concatenate",
+ groupBy="typeGroup",
+ orderBy="id"
+ ),
+ processDocumentsIndividually=True
+ )
+
+ # Call extraction service
+ self.services.chat.progressLogUpdate(operationId, 0.5, f"Extracting content from {len(chatDocuments)} documents")
+ extractedResults = self.services.extraction.extractContent(chatDocuments, extractionOptions)
+
+ # Build ActionDocuments from ContentExtracted results
+ self.services.chat.progressLogUpdate(operationId, 0.8, "Building result documents")
+ actionDocuments = []
+ for extracted in extractedResults:
+ # Store ContentExtracted object in ActionDocument.documentData
+ actionDoc = ActionDocument(
+ documentName=f"extracted_{extracted.id}.json",
+ documentData=extracted, # ContentExtracted object
+ mimeType="application/json"
+ )
+ actionDocuments.append(actionDoc)
+
+ self.services.chat.progressLogFinish(operationId, True)
+
+ return ActionResult.isSuccess(documents=actionDocuments)
+
+ except Exception as e:
+ logger.error(f"Error in content extraction: {str(e)}")
+
+ # Complete progress tracking with failure
+ try:
+ self.services.chat.progressLogFinish(operationId, False)
+ except:
+ pass # Don't fail on progress logging errors
+
+ return ActionResult.isFailure(error=str(e))
+
+
@action
async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
"""
diff --git a/modules/workflows/methods/methodOutlook.py b/modules/workflows/methods/methodOutlook.py
index 4539452b..99768e07 100644
--- a/modules/workflows/methods/methodOutlook.py
+++ b/modules/workflows/methods/methodOutlook.py
@@ -1134,9 +1134,19 @@ class MethodOutlook(MethodBase):
return ActionResult.isFailure(error="Connection lacks necessary permissions for Outlook operations")
# Prepare documents for AI processing
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
chatDocuments = []
if documentList:
- chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(documentList)
+ # Convert to DocumentReferenceList if needed
+ if isinstance(documentList, DocumentReferenceList):
+ docRefList = documentList
+ elif isinstance(documentList, list):
+ docRefList = DocumentReferenceList.from_string_list(documentList)
+ elif isinstance(documentList, str):
+ docRefList = DocumentReferenceList.from_string_list([documentList])
+ else:
+ docRefList = DocumentReferenceList(references=[])
+ chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(docRefList)
# Create AI prompt for email composition
# Build document reference list for AI with expanded list contents when possible
@@ -1146,7 +1156,8 @@ class MethodOutlook(MethodBase):
lines = ["Available_Document_References:"]
for ref in doc_references:
# Each item is a label: resolve to its document list and render contained items
- list_docs = self.services.chat.getChatDocumentsFromDocumentList([ref]) or []
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ list_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([ref])) or []
if list_docs:
for d in list_docs:
doc_ref_label = self.services.chat.getDocumentReferenceFromChatDocument(d)
@@ -1215,7 +1226,8 @@ Return JSON:
if documentList:
try:
available_refs = [documentList] if isinstance(documentList, str) else documentList
- available_docs = self.services.chat.getChatDocumentsFromDocumentList(available_refs) or []
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ available_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list(available_refs)) or []
except Exception:
available_docs = []
@@ -1228,7 +1240,8 @@ Return JSON:
if ai_attachments:
try:
ai_refs = [ai_attachments] if isinstance(ai_attachments, str) else ai_attachments
- ai_docs = self.services.chat.getChatDocumentsFromDocumentList(ai_refs) or []
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ ai_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list(ai_refs)) or []
except Exception:
ai_docs = []
@@ -1296,7 +1309,8 @@ Return JSON:
message["attachments"] = []
for attachment_ref in documentList:
# Get attachment document from service center
- attachment_docs = self.services.chat.getChatDocumentsFromDocumentList([attachment_ref])
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ attachment_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([attachment_ref]))
if attachment_docs:
for doc in attachment_docs:
file_id = getattr(doc, 'fileId', None)
@@ -1418,7 +1432,8 @@ Return JSON:
for docRef in documentList:
try:
# Get documents from document reference
- chatDocuments = self.services.chat.getChatDocumentsFromDocumentList([docRef])
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([docRef]))
if not chatDocuments:
logger.warning(f"No documents found for reference: {docRef}")
continue
diff --git a/modules/workflows/methods/methodSharepoint.py b/modules/workflows/methods/methodSharepoint.py
index b96b87d8..2c773989 100644
--- a/modules/workflows/methods/methodSharepoint.py
+++ b/modules/workflows/methods/methodSharepoint.py
@@ -1139,7 +1139,8 @@ class MethodSharepoint(MethodBase):
logger.debug(f"Both pathObject and pathQuery provided - using pathObject (pathQuery '{pathQuery}' will be ignored)")
try:
# Resolve the reference label to get the actual document list
- pathObjectDocuments = self.services.chat.getChatDocumentsFromDocumentList([pathObject])
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ pathObjectDocuments = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([pathObject]))
if not pathObjectDocuments or len(pathObjectDocuments) == 0:
return ActionResult.isFailure(error=f"No document list found for reference: {pathObject}")
@@ -1313,7 +1314,17 @@ class MethodSharepoint(MethodBase):
# Get documents from reference - ensure documentList is a list, not a string
# documentList is already normalized above
- chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(documentList)
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ # Convert to DocumentReferenceList if needed
+ if isinstance(documentList, DocumentReferenceList):
+ docRefList = documentList
+ elif isinstance(documentList, list):
+ docRefList = DocumentReferenceList.from_string_list(documentList)
+ elif isinstance(documentList, str):
+ docRefList = DocumentReferenceList.from_string_list([documentList])
+ else:
+ docRefList = DocumentReferenceList(references=[])
+ chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(docRefList)
if not chatDocuments:
return ActionResult.isFailure(error="No documents found for the provided reference")
@@ -1553,7 +1564,8 @@ class MethodSharepoint(MethodBase):
if pathObject:
try:
# Resolve the reference label to get the actual document list
- documentList = self.services.chat.getChatDocumentsFromDocumentList([pathObject])
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ documentList = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([pathObject]))
if not documentList or len(documentList) == 0:
return ActionResult.isFailure(error=f"No document list found for reference: {pathObject}")
@@ -1654,7 +1666,17 @@ class MethodSharepoint(MethodBase):
# Get documents from reference - ensure documentList is a list, not a string
if isinstance(documentList, str):
documentList = [documentList] # Convert string to list
- chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(documentList)
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ # Convert to DocumentReferenceList if needed
+ if isinstance(documentList, DocumentReferenceList):
+ docRefList = documentList
+ elif isinstance(documentList, list):
+ docRefList = DocumentReferenceList.from_string_list(documentList)
+ elif isinstance(documentList, str):
+ docRefList = DocumentReferenceList.from_string_list([documentList])
+ else:
+ docRefList = DocumentReferenceList(references=[])
+ chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(docRefList)
if not chatDocuments:
return ActionResult.isFailure(error="No documents found for the provided reference")
@@ -1959,7 +1981,8 @@ class MethodSharepoint(MethodBase):
logger.debug(f"Both pathObject and pathQuery provided - using pathObject (pathQuery '{pathQuery}' will be ignored)")
try:
# Resolve the reference label to get the actual document list
- documentList = self.services.chat.getChatDocumentsFromDocumentList([pathObject])
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ documentList = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([pathObject]))
if not documentList or len(documentList) == 0:
return ActionResult.isFailure(error=f"No document list found for reference: {pathObject}")
diff --git a/modules/workflows/processing/core/actionExecutor.py b/modules/workflows/processing/core/actionExecutor.py
index b3e740df..f9af58e7 100644
--- a/modules/workflows/processing/core/actionExecutor.py
+++ b/modules/workflows/processing/core/actionExecutor.py
@@ -52,16 +52,18 @@ class ActionExecutor:
logger.error(f"Error executing compound action {compoundActionName}: {str(e)}")
raise
- async def executeSingleAction(self, action: ActionItem, workflow: ChatWorkflow, taskStep: TaskStep,
- taskIndex: int = None, actionIndex: int = None, totalActions: int = None) -> ActionResult:
+ async def executeSingleAction(self, action: ActionItem, workflow: ChatWorkflow, taskStep: TaskStep) -> ActionResult:
"""Execute a single action and return ActionResult with enhanced document processing"""
try:
# Check workflow status before executing action
checkWorkflowStopped(self.services)
- # Use passed indices or fallback to '?'
- taskNum = taskIndex if taskIndex is not None else '?'
- actionNum = actionIndex if actionIndex is not None else '?'
+ # Get indices from workflow state
+ taskIndex = workflow.getTaskIndex()
+ actionIndex = workflow.getActionIndex()
+
+ taskNum = taskIndex
+ actionNum = actionIndex
logger.info(f"=== TASK {taskNum} ACTION {actionNum}: {action.execMethod}.{action.execAction} ===")
@@ -144,7 +146,7 @@ class ActionExecutor:
# Create database log entry for action failure (write-through + bind)
self.services.chat.storeLog(workflow, {
- "message": f"❌ **Task {taskNum}**❌ **Action {actionNum}/{totalActions}** failed: {result.error}",
+ "message": f"❌ **Task {taskNum}**❌ **Action {actionNum}** failed: {result.error}",
"type": "error",
"progress": 1.0
})
@@ -152,8 +154,11 @@ class ActionExecutor:
# Log action summary
logger.info(f"=== TASK {taskNum} ACTION {actionNum} COMPLETED ===")
+ # Increment action index in workflow
+ workflow.incrementAction()
+
# Create action completion message with documents (generic)
- await self._createActionCompletionMessage(action, result, workflow, taskStep, taskIndex, actionIndex, totalActions)
+ await self._createActionCompletionMessage(action, result, workflow, taskStep, taskIndex, actionIndex)
return ActionResult(
success=result.success,
@@ -186,7 +191,7 @@ class ActionExecutor:
return "\n\n---\n\n".join(resultParts) if resultParts else ""
async def _createActionCompletionMessage(self, action: ActionItem, result: ActionResult, workflow: ChatWorkflow,
- taskStep: TaskStep, taskIndex: int, actionIndex: int, totalActions: int):
+ taskStep: TaskStep, taskIndex: int, actionIndex: int):
"""Create action completion message with documents (generic)"""
try:
# Convert ActionDocument objects to ChatDocument objects for message creation
@@ -207,7 +212,7 @@ class ActionExecutor:
taskStep=taskStep,
taskIndex=taskIndex,
actionIndex=actionIndex,
- totalActions=totalActions
+ totalActions=None # Not needed - removed from signature
)
except Exception as e:
logger.error(f"Error creating action completion message: {str(e)}")
diff --git a/modules/workflows/processing/core/messageCreator.py b/modules/workflows/processing/core/messageCreator.py
index ddf32170..ee484572 100644
--- a/modules/workflows/processing/core/messageCreator.py
+++ b/modules/workflows/processing/core/messageCreator.py
@@ -59,14 +59,18 @@ class MessageCreator:
except Exception as e:
logger.error(f"Error creating task plan message: {str(e)}")
- async def createTaskStartMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int, totalTasks: int):
+ async def createTaskStartMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int, totalTasks: int = None):
"""Create a task start message for the user"""
try:
# Check workflow status before creating message
checkWorkflowStopped(self.services)
- # Create a task start message for the user
- taskProgress = f"{taskIndex}/{totalTasks}" if totalTasks is not None else str(taskIndex)
+ # Use workflow state if taskIndex not provided
+ if taskIndex is None:
+ taskIndex = workflow.getTaskIndex()
+
+ # Create a task start message for the user (totalTasks not needed - kept for backward compatibility)
+ taskProgress = str(taskIndex)
taskStartMessage = {
"workflowId": workflow.id,
"role": "assistant",
@@ -117,12 +121,11 @@ class MessageCreator:
# Create a more meaningful message that includes task context
taskObjective = taskStep.objective if taskStep else 'Unknown task'
- # Extract round, task, and action numbers from resultLabel first, then fallback to workflow context
- currentRound = self._extractRoundNumberFromLabel(resultLabel) if resultLabel else workflowContext.get('currentRound', 0)
- currentTask = self._extractTaskNumberFromLabel(resultLabel) if resultLabel else (taskIndex if taskIndex is not None else workflowContext.get('currentTask', 0))
- totalTasks = workflowStats.get('totalTasks', 0)
- currentAction = self._extractActionNumberFromLabel(resultLabel) if resultLabel else (actionIndex if actionIndex is not None else workflowContext.get('currentAction', 0))
- totalActions = totalActions if totalActions is not None else workflowStats.get('totalActions', 0)
+ # Extract round, task, and action numbers from resultLabel first, then fallback to workflow state
+ currentRound = self._extractRoundNumberFromLabel(resultLabel) if resultLabel else workflow.getRoundIndex()
+ currentTask = self._extractTaskNumberFromLabel(resultLabel) if resultLabel else (taskIndex if taskIndex is not None else workflow.getTaskIndex())
+ currentAction = self._extractActionNumberFromLabel(resultLabel) if resultLabel else (actionIndex if actionIndex is not None else workflow.getActionIndex())
+ # totalTasks and totalActions not needed - removed from architecture
# Debug logging for round number extraction
logger.info(f"Action message round number extraction: resultLabel='{resultLabel}', extractedRound={currentRound}, workflowRound={workflowContext.get('currentRound', 0)}")
@@ -183,13 +186,17 @@ class MessageCreator:
except Exception as e:
logger.error(f"Error creating action message: {str(e)}")
- async def createTaskCompletionMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int, totalTasks: int, reviewResult: ReviewResult = None):
+ async def createTaskCompletionMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int, totalTasks: int = None, reviewResult: ReviewResult = None):
"""Create a task completion message for the user"""
try:
# Check workflow status before creating message
checkWorkflowStopped(self.services)
- # Create a task completion message for the user
+ # Use workflow state if taskIndex not provided
+ if taskIndex is None:
+ taskIndex = workflow.getTaskIndex()
+
+ # Create a task completion message for the user (totalTasks not needed - kept for backward compatibility)
taskProgress = str(taskIndex)
# Enhanced completion message with criteria details
diff --git a/modules/workflows/processing/modes/modeActionplan.py b/modules/workflows/processing/modes/modeActionplan.py
deleted file mode 100644
index eee563e8..00000000
--- a/modules/workflows/processing/modes/modeActionplan.py
+++ /dev/null
@@ -1,811 +0,0 @@
-# modeActionplan.py
-# Actionplan mode implementation for workflows
-
-import json
-import logging
-import uuid
-from datetime import datetime, timezone
-from typing import List, Dict, Any
-from modules.datamodels.datamodelChat import (
- TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus,
- ActionResult, ReviewResult, ReviewContext
-)
-from modules.datamodels.datamodelChat import ChatWorkflow
-from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, ProcessingModeEnum, PriorityEnum
-from modules.workflows.processing.modes.modeBase import BaseMode
-from modules.workflows.processing.shared.stateTools import checkWorkflowStopped
-from modules.shared.timeUtils import parseTimestamp
-from modules.workflows.processing.shared.executionState import TaskExecutionState
-from modules.workflows.processing.shared.promptGenerationActionsActionplan import (
- generateActionDefinitionPrompt,
- generateResultReviewPrompt
-)
-from modules.workflows.processing.adaptive import IntentAnalyzer, ContentValidator, LearningEngine, ProgressTracker
-from modules.workflows.processing.adaptive.adaptiveLearningEngine import AdaptiveLearningEngine
-
-logger = logging.getLogger(__name__)
-
-class ActionplanMode(BaseMode):
- """Actionplan mode implementation - batch planning and sequential execution"""
-
- def __init__(self, services):
- super().__init__(services)
- # Initialize adaptive components for enhanced validation and learning
- self.intentAnalyzer = IntentAnalyzer(services)
- self.learningEngine = LearningEngine()
- self.adaptiveLearningEngine = AdaptiveLearningEngine()
- self.contentValidator = ContentValidator(services, self.adaptiveLearningEngine)
- self.progressTracker = ProgressTracker()
- self.workflowIntent = None
- self.taskIntent = None
-
- async def generateActionItems(self, taskStep: TaskStep, workflow: ChatWorkflow,
- previousResults: List = None, enhancedContext: TaskContext = None) -> List[ActionItem]:
- """Generate actions for a given task step using batch planning approach"""
- try:
- # Check workflow status before generating actions
- checkWorkflowStopped(self.services)
-
- retryInfo = f" (Retry #{enhancedContext.retryCount})" if enhancedContext and enhancedContext.retryCount > 0 else ""
- logger.info(f"Generating actions for task: {taskStep.objective}{retryInfo}")
-
- # Log criteria progress if this is a retry
- if enhancedContext and hasattr(enhancedContext, 'criteriaProgress') and enhancedContext.criteriaProgress is not None:
- progress = enhancedContext.criteriaProgress
- logger.info(f"Retry attempt {enhancedContext.retryCount} - Criteria progress:")
- if progress.get('met_criteria'):
- logger.info(f" Met criteria: {', '.join(progress['met_criteria'])}")
- if progress.get('unmet_criteria'):
- logger.warning(f" Unmet criteria: {', '.join(progress['unmet_criteria'])}")
-
- # Show improvement trends
- if progress.get('attempt_history'):
- recentAttempts = progress['attempt_history'][-2:] # Last 2 attempts
- if len(recentAttempts) >= 2:
- prevScore = recentAttempts[0].get('quality_score', 0)
- currScore = recentAttempts[1].get('quality_score', 0)
- if currScore > prevScore:
- logger.info(f" Quality improving: {prevScore} -> {currScore}")
- elif currScore < prevScore:
- logger.warning(f" Quality declining: {prevScore} -> {currScore}")
- else:
- logger.info(f" Quality stable: {currScore}")
-
- # Enhanced retry context logging
- if enhancedContext and enhancedContext.retryCount > 0:
- logger.info("=== RETRY CONTEXT FOR ACTION GENERATION ===")
- logger.info(f"Retry Count: {enhancedContext.retryCount}")
- logger.debug(f"Previous Improvements: {enhancedContext.improvements}")
- logger.debug(f"Previous Review Result: {enhancedContext.previousReviewResult}")
- logger.debug(f"Failure Patterns: {enhancedContext.failurePatterns}")
- logger.debug(f"Failed Actions: {enhancedContext.failedActions}")
- logger.debug(f"Successful Actions: {enhancedContext.successfulActions}")
- logger.info("=== END RETRY CONTEXT ===")
-
- # Log that we're starting action generation
- logger.info("=== STARTING ACTION GENERATION ===")
-
- # Create proper context object for action definition
- if enhancedContext and isinstance(enhancedContext, TaskContext):
- # Use existing TaskContext if provided
- actionContext = TaskContext(
- taskStep=enhancedContext.taskStep,
- workflow=enhancedContext.workflow,
- workflowId=enhancedContext.workflowId,
- availableDocuments=enhancedContext.availableDocuments,
- availableConnections=enhancedContext.availableConnections,
- previousResults=enhancedContext.previousResults or previousResults or [],
- previousHandover=enhancedContext.previousHandover,
- improvements=enhancedContext.improvements or [],
- retryCount=enhancedContext.retryCount or 0,
- previousActionResults=enhancedContext.previousActionResults or [],
- previousReviewResult=enhancedContext.previousReviewResult,
- isRegeneration=enhancedContext.isRegeneration or False,
- failurePatterns=enhancedContext.failurePatterns or [],
- failedActions=enhancedContext.failedActions or [],
- successfulActions=enhancedContext.successfulActions or [],
- criteriaProgress=enhancedContext.criteriaProgress
- )
- else:
- # Create new context from scratch
- actionContext = TaskContext(
- taskStep=taskStep,
- workflow=workflow,
- workflowId=workflow.id,
- availableDocuments=None,
- availableConnections=None,
- previousResults=previousResults or [],
- previousHandover=None,
- improvements=[],
- retryCount=0,
- previousActionResults=[],
- previousReviewResult=None,
- isRegeneration=False,
- failurePatterns=[],
- failedActions=[],
- successfulActions=[],
- criteriaProgress=None
- )
-
- # Check workflow status before calling AI service
- checkWorkflowStopped(self.services)
-
- # Build prompt bundle (template + placeholders)
- bundle = generateActionDefinitionPrompt(self.services, actionContext)
- actionPromptTemplate = bundle.prompt
- placeholders = bundle.placeholders
-
-
- # Centralized AI call: Action planning (quality, detailed) with placeholders
- options = AiCallOptions(
- operationType=OperationTypeEnum.PLAN,
- priority=PriorityEnum.QUALITY,
- compressPrompt=False,
- compressContext=False,
- processingMode=ProcessingModeEnum.DETAILED,
- maxCost=0.10,
- maxProcessingTime=30
- )
-
- prompt = await self.services.ai.callAiPlanning(
- prompt=actionPromptTemplate,
- placeholders=placeholders,
- debugType="actionplan"
- )
-
- # Check if AI response is valid
- if not prompt:
- raise ValueError("AI service returned no response")
-
- # Log action response received
- logger.info("=== ACTION PLAN AI RESPONSE RECEIVED ===")
- logger.info(f"Response length: {len(prompt) if prompt else 0}")
-
- # Parse action response
- jsonStart = prompt.find('{')
- jsonEnd = prompt.rfind('}') + 1
- if jsonStart == -1 or jsonEnd == 0:
- raise ValueError("No JSON found in response")
- jsonStr = prompt[jsonStart:jsonEnd]
-
- try:
- actionData = json.loads(jsonStr)
- except Exception as e:
- logger.error(f"Error parsing action response JSON: {str(e)}")
- actionData = {}
-
- if 'actions' not in actionData:
- raise ValueError("Action response missing 'actions' field")
-
- actions = actionData['actions']
- if not actions:
- raise ValueError("Action response contains empty actions list")
-
- if not isinstance(actions, list):
- raise ValueError(f"Action response 'actions' field is not a list: {type(actions)}")
-
- if not self.validator.validateAction(actions, actionContext):
- logger.error("Generated actions failed validation")
- raise Exception("AI-generated actions failed validation - AI is required for action generation")
-
- # Convert to ActionItem objects
- taskActions = []
- for i, a in enumerate(actions):
- if not isinstance(a, dict):
- logger.warning(f"Skipping invalid action {i+1}: not a dictionary")
- continue
-
-
- # Handle compound action format (new) or separate method/action format (old)
- action_name = a.get('action', 'unknown')
- if '.' in action_name:
- # New compound action format: "method.action"
- method_name, action_name = action_name.split('.', 1)
- else:
- # Old separate format: method + action fields
- method_name = a.get('method', 'unknown')
-
- taskAction = self._createActionItem({
- "execMethod": method_name,
- "execAction": action_name,
- "execParameters": a.get('parameters', {}),
- "execResultLabel": a.get('resultLabel', ''),
- "expectedDocumentFormats": a.get('expectedDocumentFormats', None),
- "status": TaskStatus.PENDING,
- # Extract user-friendly message if available
- "userMessage": a.get('userMessage', None)
- })
-
- if taskAction:
- taskActions.append(taskAction)
- else:
- logger.warning(f"Skipping invalid action {i+1}: failed to create ActionItem")
-
- validActions = [ta for ta in taskActions if ta]
-
- if not validActions:
- raise ValueError("No valid actions could be created from AI response")
-
- return validActions
- except Exception as e:
- logger.error(f"Error in generateActionItems: {str(e)}")
- return []
-
-
- async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
- taskIndex: int = None, totalTasks: int = None) -> TaskResult:
- """Execute all actions for a task step using Actionplan mode"""
- logger.info(f"=== STARTING TASK {taskIndex or '?'}: {taskStep.objective} ===")
-
- # Use workflow-level intent from planning phase (stored in workflow object)
- # This avoids redundant intent analysis - intent was already analyzed during task planning
- if hasattr(workflow, '_workflowIntent') and workflow._workflowIntent:
- self.workflowIntent = workflow._workflowIntent
- logger.info(f"Using workflow intent from planning phase")
- else:
- # Fallback: analyze if not available (shouldn't happen in normal flow)
- originalPrompt = self.services.currentUserPrompt if self.services and hasattr(self.services, 'currentUserPrompt') else taskStep.objective
- self.workflowIntent = await self.intentAnalyzer.analyzeUserIntent(originalPrompt, context)
- logger.warning(f"Workflow intent not found in workflow object, analyzed fresh")
-
- # Task-level intent is NOT needed - use task.objective + task format fields (dataType, expectedFormats, qualityRequirements)
- # These format fields are populated from workflow intent during task planning
- self.taskIntent = None # Removed redundant task-level intent analysis
- logger.info(f"Workflow intent: {self.workflowIntent}")
- if taskStep.dataType or taskStep.expectedFormats or taskStep.qualityRequirements:
- logger.info(f"Task format info: dataType={taskStep.dataType}, expectedFormats={taskStep.expectedFormats}")
-
- # Reset progress tracking for new task
- self.progressTracker.reset()
-
- # Update workflow object before executing task
- if taskIndex is not None:
- self._updateWorkflowBeforeExecutingTask(taskIndex)
-
- # Update workflow context for this task
- if taskIndex is not None:
- self.services.chat.setWorkflowContext(taskNumber=taskIndex)
-
- # Create task start message
- await self.messageCreator.createTaskStartMessage(taskStep, workflow, taskIndex, totalTasks)
-
- state = TaskExecutionState(taskStep)
- retryContext = context
- maxRetries = state.max_retries
-
- for attempt in range(maxRetries):
- logger.info(f"Task execution attempt {attempt+1}/{maxRetries}")
-
- # Check workflow status before starting task execution
- checkWorkflowStopped(self.services)
-
- # Update retry context with current attempt information
- if retryContext:
- retryContext.retryCount = attempt + 1
-
- actions = await self.generateActionItems(taskStep, workflow,
- previousResults=retryContext.previousResults,
- enhancedContext=retryContext)
-
- # Log total actions count for this task
- totalActions = len(actions) if actions else 0
- logger.info(f"Task {taskIndex or '?'} has {totalActions} actions")
-
- # Update workflow object after action planning
- self._updateWorkflowAfterActionPlanning(totalActions)
- self._setWorkflowTotals(totalActions=totalActions)
-
- if not actions:
- logger.error("No actions defined for task step, aborting task execution")
- break
-
- actionResults = []
- for actionIdx, action in enumerate(actions):
- # Check workflow status before each action execution
- checkWorkflowStopped(self.services)
-
- # Update workflow object before executing action
- actionNumber = actionIdx + 1
- self._updateWorkflowBeforeExecutingAction(actionNumber)
-
-
- # Log action start
- logger.info(f"Task {taskIndex} - Starting action {actionNumber}/{totalActions}")
-
- # Create action start message
- actionStartMessage = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": f"⚡ **Action {actionNumber}** (Method {action.execMethod}.{action.execAction})",
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.timestampGetUtc(),
- "documentsLabel": f"action_{actionNumber}_start",
- "documents": [],
- "actionProgress": "running",
- "roundNumber": workflow.currentRound,
- "taskNumber": taskIndex,
- "actionNumber": actionNumber
- }
-
- # Add user-friendly message if available
- if action.userMessage:
- actionStartMessage["message"] += f"\n\n💬 {action.userMessage}"
-
- self.services.chat.storeMessageWithDocuments(workflow, actionStartMessage, [])
- logger.info(f"Action start message created for action {actionNumber}")
-
- # Execute single action
- result = await self.actionExecutor.executeSingleAction(action, workflow, taskStep,
- taskIndex, actionNumber, totalActions)
- actionResults.append(result)
-
- # Enhanced validation: Content validation after each action (like Dynamic mode)
- if getattr(self, 'workflowIntent', None) and result.documents:
- # Pass ALL documents to validator - validator decides what to validate (generic approach)
- # Pass taskStep so validator can use task.objective and format fields
- # Pass action name so validator knows which action created the documents
- actionName = f"{action.execMethod}.{action.execAction}"
- validationResult = await self.contentValidator.validateContent(result.documents, self.workflowIntent, taskStep, actionName)
- qualityScore = validationResult.get('qualityScore', 0.0)
- if qualityScore is None:
- qualityScore = 0.0
- logger.info(f"Content validation for action {actionNumber}: {validationResult['overallSuccess']} (quality: {qualityScore:.2f})")
-
- # Record validation result for adaptive learning
- actionContext = {
- 'actionName': f"{action.execMethod}.{action.execAction}",
- 'workflowId': context.workflowId
- }
-
- self.adaptiveLearningEngine.recordValidationResult(
- validationResult,
- actionContext,
- context.workflowId,
- actionNumber
- )
-
- # Learn from feedback
- feedback = self._collectFeedback(result, validationResult, self.workflowIntent)
- self.learningEngine.learnFromFeedback(feedback, context, self.workflowIntent)
-
- # Update progress
- self.progressTracker.updateOperation(result, validationResult, self.workflowIntent)
-
- if result.success:
- state.addSuccessfulAction(result)
- else:
- state.addFailedAction(result)
-
- # Check workflow status before review
- checkWorkflowStopped(self.services)
-
- reviewResult = await self._reviewTaskCompletion(taskStep, actions, actionResults, workflow)
- success = reviewResult.status == 'success'
- feedback = reviewResult.reason
- error = None if success else reviewResult.reason
-
- if success:
- logger.info(f"=== TASK {taskIndex or '?'} COMPLETED SUCCESSFULLY: {taskStep.objective} ===")
-
- # Create task completion message
- await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, totalTasks, reviewResult)
-
- return TaskResult(
- taskId=taskStep.id,
- status=TaskStatus.COMPLETED,
- success=True,
- feedback=feedback,
- error=None
- )
-
- elif reviewResult.status == 'retry' and state.canRetry():
- logger.warning(f"Task step '{taskStep.objective}' requires retry: {reviewResult.improvements}")
-
- # Enhanced logging of criteria status
- if reviewResult.metCriteria:
- logger.info(f"Met criteria: {', '.join(reviewResult.metCriteria)}")
- if reviewResult.unmetCriteria:
- logger.warning(f"Unmet criteria: {', '.join(reviewResult.unmetCriteria)}")
-
- state.incrementRetryCount()
-
- # Update retry context with retry information and criteria tracking
- if retryContext:
- retryContext.retryCount = state.retry_count
- retryContext.improvements = reviewResult.improvements
- retryContext.previousActionResults = actionResults
- retryContext.previousReviewResult = reviewResult
- retryContext.isRegeneration = True
- retryContext.failurePatterns = state.getFailurePatterns()
- retryContext.failedActions = state.failed_actions
- retryContext.successfulActions = state.successful_actions
-
- # Track criteria progress across retries
- if not hasattr(retryContext, 'criteriaProgress'):
- retryContext.criteriaProgress = {
- 'met_criteria': set(),
- 'unmet_criteria': set(),
- 'attempt_history': []
- }
-
- # Update criteria progress
- if reviewResult.metCriteria:
- retryContext.criteriaProgress['met_criteria'].update(reviewResult.metCriteria)
- if reviewResult.unmetCriteria:
- retryContext.criteriaProgress['unmet_criteria'].update(reviewResult.unmetCriteria)
-
- # Record this attempt's criteria status
- attemptRecord = {
- 'attempt': state.retry_count,
- 'met_criteria': reviewResult.metCriteria or [],
- 'unmet_criteria': reviewResult.unmetCriteria or [],
- 'quality_score': reviewResult.qualityScore,
- 'improvements': reviewResult.improvements or []
- }
- retryContext.criteriaProgress['attempt_history'].append(attemptRecord)
-
- # Create retry message
- await self.messageCreator.createRetryMessage(taskStep, workflow, taskIndex, reviewResult)
-
- continue
- else:
- logger.error(f"=== TASK {taskIndex or '?'} FAILED: {taskStep.objective} after {attempt+1} attempts ===")
-
- # Create error message
- await self.messageCreator.createErrorMessage(taskStep, workflow, taskIndex, reviewResult.reason)
-
- return TaskResult(
- taskId=taskStep.id,
- status=TaskStatus.FAILED,
- success=False,
- feedback=feedback,
- error=reviewResult.reason if reviewResult and hasattr(reviewResult, 'reason') else "Task failed after retry attempts"
- )
-
- logger.error(f"=== TASK {taskIndex or '?'} FAILED AFTER ALL RETRIES: {taskStep.objective} ===")
-
- # Create final error message
- await self.messageCreator.createErrorMessage(taskStep, workflow, taskIndex, "Task failed after all retries")
-
- return TaskResult(
- taskId=taskStep.id,
- status=TaskStatus.FAILED,
- success=False,
- feedback="Task failed after all retries.",
- error="Task failed after all retries."
- )
-
- async def _reviewTaskCompletion(self, taskStep: TaskStep, taskActions: List[ActionItem],
- actionResults: List[ActionResult], workflow: ChatWorkflow) -> ReviewResult:
- """Review task completion and determine success/failure/retry"""
- try:
- # Check workflow status before reviewing task completion
- checkWorkflowStopped(self.services)
-
- logger.info(f"=== STARTING TASK COMPLETION REVIEW ===")
- logger.info(f"Task: {taskStep.objective}")
- logger.info(f"Actions executed: {len(taskActions) if taskActions else 0}")
- logger.info(f"Action results: {len(actionResults) if actionResults else 0}")
-
- # Create proper context object for result review
- reviewContext = ReviewContext(
- taskStep=taskStep,
- taskActions=taskActions,
- actionResults=actionResults,
- stepResult={
- 'successful_actions': sum(1 for result in actionResults if result.success),
- 'total_actions': len(actionResults),
- 'results': [self._extractResultText(result) for result in actionResults if result.success],
- 'errors': [result.error for result in actionResults if not result.success],
- 'documents': [
- {
- 'action_index': i,
- 'documents_count': len(result.documents) if result.documents else 0,
- 'documents': result.documents if result.documents else []
- }
- for i, result in enumerate(actionResults)
- ]
- },
- workflowId=workflow.id,
- previousResults=[]
- )
-
- # Check workflow status before calling AI service
- checkWorkflowStopped(self.services)
-
- # Build prompt bundle for result review
- bundle = generateResultReviewPrompt(reviewContext)
- promptTemplate = bundle.prompt
- placeholders = bundle.placeholders
-
- # Log result review prompt sent to AI
- logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===")
- logger.info(f"Task: {taskStep.objective}")
- logger.info(f"Action Results Count: {len(reviewContext.actionResults) if reviewContext.actionResults else 0}")
- logger.info(f"Task Actions Count: {len(reviewContext.taskActions) if reviewContext.taskActions else 0}")
-
- # Centralized AI call: Result validation (balanced analysis) with placeholders
- options = AiCallOptions(
- operationType=OperationTypeEnum.DATA_ANALYSE,
- priority=PriorityEnum.BALANCED,
- compressPrompt=True,
- compressContext=False,
- processingMode=ProcessingModeEnum.ADVANCED,
- maxCost=0.05,
- maxProcessingTime=30
- )
-
- response = await self.services.ai.callAiPlanning(
- prompt=promptTemplate,
- placeholders=placeholders,
- debugType="resultreview"
- )
-
- # Log result review response received
- logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
- logger.info(f"Response length: {len(response) if response else 0}")
-
- # Parse review response
- jsonStart = response.find('{')
- jsonEnd = response.rfind('}') + 1
- if jsonStart == -1 or jsonEnd == 0:
- raise ValueError("No JSON found in review response")
- jsonStr = response[jsonStart:jsonEnd]
-
- try:
- review = json.loads(jsonStr)
- except Exception as e:
- logger.error(f"Error parsing review response JSON: {str(e)}")
- review = {}
- if 'status' not in review:
- raise ValueError("Review response missing 'status' field")
- review.setdefault('status', 'unknown')
- review.setdefault('reason', 'No reason provided')
- review.setdefault('quality_score', 5.0)
-
- # Ensure improvements is a list
- improvements = review.get('improvements', [])
- if isinstance(improvements, str):
- # Split string into list if it's a single improvement
- improvements = [improvements.strip()] if improvements.strip() else []
- elif not isinstance(improvements, list):
- improvements = []
-
- # Ensure all list fields are properly typed
- metCriteria = review.get('met_criteria', [])
- if not isinstance(metCriteria, list):
- metCriteria = []
-
- unmetCriteria = review.get('unmet_criteria', [])
- if not isinstance(unmetCriteria, list):
- unmetCriteria = []
-
- reviewResult = ReviewResult(
- status=review.get('status', 'unknown'),
- reason=review.get('reason', 'No reason provided'),
- improvements=improvements,
- qualityScore=float(review.get('quality_score', review.get('qualityScore', 5.0))),
- missingOutputs=[],
- metCriteria=metCriteria,
- unmetCriteria=unmetCriteria,
- confidence=review.get('confidence', 0.5),
- # Extract user-friendly message if available
- userMessage=review.get('userMessage', None)
- )
-
- # Enhanced validation logging
- logger.info(f"VALIDATION RESULT - Task: '{taskStep.objective}' - Status: {reviewResult.status.upper()}, Quality: {reviewResult.qualityScore}/10")
- if reviewResult.status == 'success':
- logger.info(f"VALIDATION SUCCESS - Task completed successfully")
- if reviewResult.metCriteria:
- logger.info(f"Met criteria: {', '.join(reviewResult.metCriteria)}")
- elif reviewResult.status == 'retry':
- logger.warning(f"VALIDATION RETRY - Task requires retry: {reviewResult.improvements}")
- if reviewResult.unmetCriteria:
- logger.warning(f"Unmet criteria: {', '.join(reviewResult.unmetCriteria)}")
- else:
- logger.error(f"VALIDATION FAILED - Task failed: {reviewResult.reason}")
-
- logger.info(f"=== TASK COMPLETION REVIEW FINISHED ===")
- logger.info(f"Final Status: {reviewResult.status}")
- logger.info(f"Quality Score: {reviewResult.qualityScore}/10")
- logger.info(f"Improvements: {reviewResult.improvements}")
- logger.info("=== END REVIEW ===")
-
- return reviewResult
- except Exception as e:
- logger.error(f"Error in reviewTaskCompletion: {str(e)}")
- return ReviewResult(
- status='failed',
- reason=str(e),
- qualityScore=0.0
- )
-
- def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
- """Creates a new task action"""
- try:
- # Ensure ID is present
- if "id" not in actionData or not actionData["id"]:
- actionData["id"] = f"action_{uuid.uuid4()}"
-
- # Ensure required fields
- if "status" not in actionData:
- actionData["status"] = TaskStatus.PENDING
-
- if "execMethod" not in actionData:
- logger.error("execMethod is required for task action")
- return None
-
- if "execAction" not in actionData:
- logger.error("execAction is required for task action")
- return None
-
- if "execParameters" not in actionData:
- actionData["execParameters"] = {}
-
- # Use generic field separation based on ActionItem model
- simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
-
- # Create action in database
- createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
-
- # Convert to ActionItem model
- return ActionItem(
- id=createdAction["id"],
- execMethod=createdAction["execMethod"],
- execAction=createdAction["execAction"],
- execParameters=createdAction.get("execParameters", {}),
- execResultLabel=createdAction.get("execResultLabel"),
- expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
- status=createdAction.get("status", TaskStatus.PENDING),
- error=createdAction.get("error"),
- retryCount=createdAction.get("retryCount", 0),
- retryMax=createdAction.get("retryMax", 3),
- processingTime=createdAction.get("processingTime"),
- timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
- result=createdAction.get("result"),
- resultDocuments=createdAction.get("resultDocuments", []),
- userMessage=createdAction.get("userMessage")
- )
-
- except Exception as e:
- logger.error(f"Error creating task action: {str(e)}")
- return None
-
- def _extractResultText(self, result: ActionResult) -> str:
- """Extract result text from ActionResult documents"""
- if not result.success or not result.documents:
- return ""
-
- # Extract text directly from ActionDocument objects
- resultParts = []
- for doc in result.documents:
- if hasattr(doc, 'documentData') and doc.documentData:
- resultParts.append(str(doc.documentData))
-
- # Join all document results with separators
- return "\n\n---\n\n".join(resultParts) if resultParts else ""
-
- def _collectFeedback(self, result: Any, validation: Dict[str, Any], intent: Dict[str, Any]) -> Dict[str, Any]:
- """Collects comprehensive feedback from action execution"""
- try:
- # Extract content summary
- contentDelivered = ""
- if result.documents:
- firstDoc = result.documents[0]
- if hasattr(firstDoc, 'documentData'):
- data = firstDoc.documentData
- if isinstance(data, dict) and 'content' in data:
- content = str(data['content'])
- contentDelivered = content[:100] + "..." if len(content) > 100 else content
- else:
- contentDelivered = str(data)[:100] + "..." if len(str(data)) > 100 else str(data)
-
- return {
- "actionAttempted": result.resultLabel or "unknown",
- "parametersUsed": {}, # Would be extracted from action context
- "contentDelivered": contentDelivered,
- "intentMatchScore": validation.get('qualityScore', 0),
- "qualityScore": validation.get('qualityScore', 0),
- "issuesFound": validation.get('improvementSuggestions', []),
- "learningOpportunities": validation.get('improvementSuggestions', []),
- "userSatisfaction": None, # Would be collected from user feedback
- "timestamp": datetime.now(timezone.utc).timestamp()
- }
-
- except Exception as e:
- logger.error(f"Error collecting feedback: {str(e)}")
- return {
- "actionAttempted": "unknown",
- "parametersUsed": {},
- "contentDelivered": "",
- "intentMatchScore": 0,
- "qualityScore": 0,
- "issuesFound": [],
- "learningOpportunities": [],
- "userSatisfaction": None,
- "timestamp": datetime.now(timezone.utc).timestamp()
- }
-
- def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
- """Update workflow object before executing a task"""
- try:
- workflow = self.services.workflow
- updateData = {
- "currentTask": taskNumber,
- "currentAction": 0,
- "totalActions": 0
- }
-
- # Update workflow object
- workflow.currentTask = taskNumber
- workflow.currentAction = 0
- workflow.totalActions = 0
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}: {updateData}")
-
- except Exception as e:
- logger.error(f"Error updating workflow before executing task: {str(e)}")
-
- def _updateWorkflowAfterActionPlanning(self, totalActions: int):
- """Update workflow object after action planning for current task"""
- try:
- workflow = self.services.workflow
- updateData = {
- "totalActions": totalActions
- }
-
- # Update workflow object
- workflow.totalActions = totalActions
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} after action planning: {updateData}")
-
- except Exception as e:
- logger.error(f"Error updating workflow after action planning: {str(e)}")
-
- def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
- """Update workflow object before executing an action"""
- try:
- workflow = self.services.workflow
- updateData = {
- "currentAction": actionNumber
- }
-
- # Update workflow object
- workflow.currentAction = actionNumber
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}: {updateData}")
-
- except Exception as e:
- logger.error(f"Error updating workflow before executing action: {str(e)}")
-
- def _setWorkflowTotals(self, totalTasks: int = None, totalActions: int = None):
- """Set total counts for workflow progress tracking and update database"""
- try:
- workflow = self.services.workflow
- updateData = {}
-
- if totalTasks is not None:
- workflow.totalTasks = totalTasks
- updateData["totalTasks"] = totalTasks
-
- if totalActions is not None:
- workflow.totalActions = totalActions
- updateData["totalActions"] = totalActions
-
- # Update workflow object in database if we have changes
- if updateData:
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} totals in database: {updateData}")
-
- logger.debug(f"Updated workflow totals: Tasks {workflow.totalTasks if hasattr(workflow, 'totalTasks') else 'N/A'}, Actions {workflow.totalActions if hasattr(workflow, 'totalActions') else 'N/A'}")
- except Exception as e:
- logger.error(f"Error setting workflow totals: {str(e)}")
-
diff --git a/modules/workflows/processing/modes/modeAutomation.py b/modules/workflows/processing/modes/modeAutomation.py
index 43a81db6..b8600e0f 100644
--- a/modules/workflows/processing/modes/modeAutomation.py
+++ b/modules/workflows/processing/modes/modeAutomation.py
@@ -166,8 +166,8 @@ class AutomationMode(BaseMode):
async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
taskIndex: int = None, totalTasks: int = None) -> TaskResult:
"""
- Execute task using Template mode - executes predefined actions directly.
- Similar to ActionplanMode but without AI planning or review phases.
+ Execute task using Automation mode - executes predefined actions directly.
+ No AI planning or review phases - actions are executed sequentially as defined.
"""
logger.info(f"=== STARTING TASK {taskIndex or '?'}: {taskStep.objective} ===")
diff --git a/modules/workflows/processing/modes/modeBase.py b/modules/workflows/processing/modes/modeBase.py
index b1e3d062..8fbb627f 100644
--- a/modules/workflows/processing/modes/modeBase.py
+++ b/modules/workflows/processing/modes/modeBase.py
@@ -25,8 +25,7 @@ class BaseMode(ABC):
@abstractmethod
- async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
- taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> TaskResult:
"""Execute a task step - must be implemented by concrete modes"""
pass
diff --git a/modules/workflows/processing/modes/modeDynamic.py b/modules/workflows/processing/modes/modeDynamic.py
index 94c04558..cd4149d2 100644
--- a/modules/workflows/processing/modes/modeDynamic.py
+++ b/modules/workflows/processing/modes/modeDynamic.py
@@ -47,10 +47,13 @@ class DynamicMode(BaseMode):
# Dynamic mode generates actions one at a time in the execution loop
return []
- async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
- taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> TaskResult:
"""Execute task using Dynamic mode - iterative plan-act-observe-refine loop"""
- logger.info(f"=== STARTING TASK {taskIndex or '?'}: {taskStep.objective} ===")
+
+ # Get task index from workflow state
+ taskIndex = workflow.getTaskIndex()
+
+ logger.info(f"=== STARTING TASK {taskIndex}: {taskStep.objective} ===")
# Use workflow-level intent from planning phase (stored in workflow object)
# This avoids redundant intent analysis - intent was already analyzed during task planning
@@ -74,11 +77,10 @@ class DynamicMode(BaseMode):
self.progressTracker.reset()
# Update workflow object before executing task
- if taskIndex is not None:
- self._updateWorkflowBeforeExecutingTask(taskIndex)
+ self._updateWorkflowBeforeExecutingTask(taskIndex)
- # Create task start message
- await self.messageCreator.createTaskStartMessage(taskStep, workflow, taskIndex, totalTasks)
+ # Create task start message (totalTasks not needed - removed from signature)
+ await self.messageCreator.createTaskStartMessage(taskStep, workflow, taskIndex, None)
state = TaskExecutionState(taskStep)
# Dynamic mode uses max_steps instead of max_retries
@@ -190,8 +192,8 @@ class DynamicMode(BaseMode):
improvements=[]
)
- # Create task completion message
- await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, totalTasks, completionReviewResult)
+ # Create task completion message (totalTasks not needed - removed from signature)
+ await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, None, completionReviewResult)
return TaskResult(
taskId=taskStep.id,
@@ -222,19 +224,48 @@ class DynamicMode(BaseMode):
response = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
placeholders=placeholders,
- debugType="actionplan"
+ debugType="dynamic"
)
- jsonStart = response.find('{') if response else -1
- jsonEnd = response.rfind('}') + 1 if response else 0
- if jsonStart == -1 or jsonEnd == 0:
- raise ValueError("No JSON in selection response")
- selection = json.loads(response[jsonStart:jsonEnd])
+
+ # Parse response using structured parsing with ActionDefinition model
+ from modules.shared.jsonUtils import parseJsonWithModel
+ from modules.datamodels.datamodelWorkflow import ActionDefinition
+
+ try:
+ # Parse response string as ActionDefinition
+ actionDef = parseJsonWithModel(response, ActionDefinition)
+ # Convert to dict for compatibility with existing code
+ selection = actionDef.model_dump()
+ except ValueError as e:
+ logger.error(f"Failed to parse ActionDefinition from response: {e}")
+ raise ValueError(f"Invalid action selection response: {e}")
+
if 'action' not in selection or not isinstance(selection['action'], str):
raise ValueError("Selection missing 'action' as string")
# Validate document references - prevent AI from inventing Message IDs
+ # Convert string references to typed DocumentReferenceList
if 'requiredInputDocuments' in selection:
- self._validateDocumentReferences(selection['requiredInputDocuments'], context)
+ stringRefs = selection['requiredInputDocuments']
+ if isinstance(stringRefs, list):
+ # Validate string references first
+ self._validateDocumentReferences(stringRefs, context)
+ # Convert to typed DocumentReferenceList
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ selection['documentList'] = DocumentReferenceList.from_string_list(stringRefs)
+ # Remove old field
+ del selection['requiredInputDocuments']
+ elif stringRefs:
+ # Single string reference
+ self._validateDocumentReferences([stringRefs], context)
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ selection['documentList'] = DocumentReferenceList.from_string_list([stringRefs])
+ del selection['requiredInputDocuments']
+
+ # Convert connection reference if present
+ if 'requiredConnection' in selection:
+ selection['connectionReference'] = selection.get('requiredConnection')
+ del selection['requiredConnection']
# Enforce spec: Stage 1 must NOT include 'parameters'
if 'parameters' in selection:
@@ -294,26 +325,27 @@ class DynamicMode(BaseMode):
# Always request parameters in Stage 2 (spec: Stage 1 must not provide them)
logger.info("Requesting parameters in Stage 2 based on Stage 1 outputs")
- # Create a permissive Stage 2 context to avoid TaskContext attribute restrictions
- from types import SimpleNamespace
- stage2Context = SimpleNamespace()
-
- # Copy essential fields from original context for fallbacks
- stage2Context.taskStep = getattr(context, 'taskStep', None)
- stage2Context.workflowId = getattr(context, 'workflowId', None)
-
- # Set Stage 1 data directly on the permissive context (snake_case for promptGenerationActionsDynamic compatibility)
+ # Update context from Stage 1 selection (replaces SimpleNamespace workaround)
+ # Convert dict selection to ActionDefinition if needed
+ from modules.datamodels.datamodelWorkflow import ActionDefinition
if isinstance(selection, dict):
- stage2Context.action_objective = selection.get('actionObjective', '')
- stage2Context.parameters_context = selection.get('parametersContext', '')
- stage2Context.learnings = selection.get('learnings', [])
+ # Create ActionDefinition from dict for updateFromSelection
+ actionDef = ActionDefinition(
+ action=selection.get('action', ''),
+ actionObjective=selection.get('actionObjective', ''),
+ parametersContext=selection.get('parametersContext', ''),
+ learnings=selection.get('learnings', [])
+ )
+ context.updateFromSelection(actionDef)
+ elif isinstance(selection, ActionDefinition):
+ context.updateFromSelection(selection)
else:
- stage2Context.action_objective = ''
- stage2Context.parameters_context = ''
- stage2Context.learnings = []
+ # Fallback: create empty ActionDefinition
+ context.updateFromSelection(ActionDefinition(action='', actionObjective=''))
# Build and send the Stage 2 parameters prompt (always)
- bundle = generateDynamicParametersPrompt(self.services, stage2Context, compoundActionName, self.adaptiveLearningEngine)
+ # Use context directly (no SimpleNamespace workaround)
+ bundle = generateDynamicParametersPrompt(self.services, context, compoundActionName, self.adaptiveLearningEngine)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
@@ -334,51 +366,56 @@ class DynamicMode(BaseMode):
placeholders=placeholders,
debugType="paramplan"
)
- # Parse JSON response
- js = paramsResp[paramsResp.find('{'):paramsResp.rfind('}')+1] if paramsResp else '{}'
+
+ # Parse JSON response using structured parsing with ActionDefinition model
+ from modules.shared.jsonUtils import parseJsonWithModel
+ from modules.datamodels.datamodelWorkflow import ActionDefinition
+
try:
- paramObj = json.loads(js)
- parameters = paramObj.get('parameters', {}) if isinstance(paramObj, dict) else {}
- except Exception as e:
- logger.error(f"Failed to parse AI parameters response as JSON: {str(e)}")
- logger.error(f"Response was: {paramsResp}")
- raise ValueError("AI parameters response invalid JSON")
+ # Parse response string as ActionDefinition (Stage 2 adds parameters)
+ actionDef = parseJsonWithModel(paramsResp, ActionDefinition)
+ # Extract parameters from parsed model
+ parameters = actionDef.parameters if actionDef.parameters else {}
+ except ValueError as e:
+ logger.error(f"Failed to parse ActionDefinition from parameters response: {e}")
+ logger.error(f"Response was: {paramsResp[:500]}...")
+ raise ValueError(f"AI parameters response invalid: {e}")
+
if not isinstance(parameters, dict):
raise ValueError("AI parameters response missing 'parameters' object")
# Merge Stage 1 resource selections into Stage 2 parameters (only if action expects them)
try:
- requiredDocs = selection.get('requiredInputDocuments')
- if requiredDocs:
- # Ensure list
- if isinstance(requiredDocs, list):
- # Only attach if target action defines 'documentList'
- methodName, actionName = compoundActionName.split('.', 1)
- from modules.workflows.processing.shared.methodDiscovery import getActionParameterList, methods as _methods
- expectedParams = getActionParameterList(methodName, actionName, _methods)
- if 'documentList' in expectedParams:
- parameters['documentList'] = requiredDocs
- requiredConn = selection.get('requiredConnection')
- if requiredConn:
+ # Use typed documentList from selection (required)
+ from modules.datamodels.datamodelDocref import DocumentReferenceList
+ docList = selection.get('documentList')
+
+ if docList and isinstance(docList, DocumentReferenceList):
+ # Only attach if target action defines 'documentList'
+ methodName, actionName = compoundActionName.split('.', 1)
+ from modules.workflows.processing.shared.methodDiscovery import getActionParameterList, methods as _methods
+ expectedParams = getActionParameterList(methodName, actionName, _methods)
+ if 'documentList' in expectedParams:
+ # Pass DocumentReferenceList directly
+ parameters['documentList'] = docList
+
+ # Use connectionReference from selection (required)
+ connectionRef = selection.get('connectionReference')
+ if connectionRef:
# Only attach if target action defines 'connectionReference'
methodName, actionName = compoundActionName.split('.', 1)
from modules.workflows.processing.shared.methodDiscovery import getActionParameterList, methods as _methods
expectedParams = getActionParameterList(methodName, actionName, _methods)
if 'connectionReference' in expectedParams:
- parameters['connectionReference'] = requiredConn
- except Exception:
+ parameters['connectionReference'] = connectionRef
+ except Exception as e:
+ logger.warning(f"Error merging Stage 1 resources into Stage 2 parameters: {e}")
pass
# Apply minimal defaults in-code (language)
if 'language' not in parameters and hasattr(self.services, 'user') and getattr(self.services.user, 'language', None):
parameters['language'] = self.services.user.language
- # Build merged parameters object
- mergedParamObj = {
- "schema": (paramObj.get('schema') if isinstance(paramObj, dict) else 'parameters_v1'),
- "parameters": parameters
- }
-
# Build a synthetic ActionItem for execution routing and labels
currentRound = getattr(self.services.workflow, 'currentRound', 0)
currentTask = getattr(self.services.workflow, 'currentTask', 0)
@@ -393,7 +430,7 @@ class DynamicMode(BaseMode):
})
# Execute using existing single action flow (message creation is handled internally)
- result = await self.actionExecutor.executeSingleAction(taskAction, workflow, taskStep, currentTask, stepIndex, 1)
+ result = await self.actionExecutor.executeSingleAction(taskAction, workflow, taskStep)
return result
@@ -668,51 +705,35 @@ class DynamicMode(BaseMode):
debugType="refinement"
)
- # More robust JSON extraction
+ # Parse response using structured parsing with ReviewResult model
+ from modules.shared.jsonUtils import parseJsonWithModel
+ from modules.datamodels.datamodelChat import ReviewResult
+
if not resp:
return ReviewResult(
status="continue",
reason="default",
qualityScore=5.0
)
- else:
- # Find JSON boundaries more safely
- start_idx = resp.find('{')
- end_idx = resp.rfind('}')
+
+ try:
+ # Parse response string as ReviewResult
+ decision = parseJsonWithModel(resp, ReviewResult)
- if start_idx != -1 and end_idx != -1 and end_idx > start_idx:
- js = resp[start_idx:end_idx+1]
- else:
- js = '{}'
+ # Map "stop" decision to "success" status for ReviewResult
+ if hasattr(decision, 'decision') and decision.decision == 'stop':
+ decision.status = 'success'
+ elif not hasattr(decision, 'status') or not decision.status:
+ decision.status = 'continue'
- try:
- decision = json.loads(js)
- # Ensure decision is a dictionary
- if not isinstance(decision, dict):
- return ReviewResult(
- status="continue",
- reason="default",
- qualityScore=5.0
- )
-
- # Convert decision dict to ReviewResult model
- decisionValue = decision.get('decision', 'continue')
- # Map "stop" to "success" for ReviewResult status
- status = 'success' if decisionValue == 'stop' else 'continue'
- return ReviewResult(
- status=status,
- reason=decision.get('reason', 'No reason provided'),
- qualityScore=float(decision.get('quality_score', decision.get('qualityScore', 5.0))),
- confidence=float(decision.get('confidence', 0.5)),
- userMessage=decision.get('userMessage', None)
- )
- except Exception as e:
- logger.warning(f"Failed to parse refinement decision JSON: {e}")
- return ReviewResult(
- status="continue",
- reason="default",
- qualityScore=5.0
- )
+ return decision
+ except ValueError as e:
+ logger.warning(f"Failed to parse ReviewResult from response: {e}. Using default.")
+ return ReviewResult(
+ status="continue",
+ reason="default",
+ qualityScore=5.0
+ )
async def _createDynamicActionMessage(self, workflow: ChatWorkflow, selection: Dict[str, Any],
step: int, maxSteps: int, taskIndex: int, messageType: str,
diff --git a/modules/workflows/processing/shared/placeholderFactory.py b/modules/workflows/processing/shared/placeholderFactory.py
index 1a21aced..4f3f6ad8 100644
--- a/modules/workflows/processing/shared/placeholderFactory.py
+++ b/modules/workflows/processing/shared/placeholderFactory.py
@@ -8,19 +8,19 @@ NAMING CONVENTION:
- Placeholder names are in UPPER_CASE with underscores
- Function names are in camelCase
-MAPPING TABLE (keys → function) with usage [taskplan | actionplan | dynamic]:
-{{KEY:USER_PROMPT}} -> extractUserPrompt() [taskplan, actionplan, dynamic]
+MAPPING TABLE (keys → function) with usage [taskplan | dynamic]:
+{{KEY:USER_PROMPT}} -> extractUserPrompt() [taskplan, dynamic]
{{KEY:OVERALL_TASK_CONTEXT}} -> extractOverallTaskContext() [dynamic]
{{KEY:TASK_OBJECTIVE}} -> extractTaskObjective() [dynamic]
-{{KEY:USER_LANGUAGE}} -> extractUserLanguage() [actionplan, dynamic]
+{{KEY:USER_LANGUAGE}} -> extractUserLanguage() [dynamic]
{{KEY:LANGUAGE_USER_DETECTED}} -> extractLanguageUserDetected() [taskplan]
-{{KEY:WORKFLOW_HISTORY}} -> extractWorkflowHistory() [taskplan, actionplan, dynamic]
-{{KEY:AVAILABLE_CONNECTIONS_INDEX}} -> extractAvailableConnectionsIndex() [actionplan, dynamic]
+{{KEY:WORKFLOW_HISTORY}} -> extractWorkflowHistory() [taskplan, dynamic]
+{{KEY:AVAILABLE_CONNECTIONS_INDEX}} -> extractAvailableConnectionsIndex() [dynamic]
{{KEY:AVAILABLE_CONNECTIONS_SUMMARY}} -> extractAvailableConnectionsSummary() []
-{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}} -> extractAvailableDocumentsSummary() [taskplan, actionplan, dynamic]
+{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}} -> extractAvailableDocumentsSummary() [taskplan, dynamic]
{{KEY:AVAILABLE_DOCUMENTS_INDEX}} -> extractAvailableDocumentsIndex() [dynamic]
-{{KEY:AVAILABLE_METHODS}} -> extractAvailableMethods() [actionplan, dynamic]
-{{KEY:REVIEW_CONTENT}} -> extractReviewContent() [actionplan, dynamic]
+{{KEY:AVAILABLE_METHODS}} -> extractAvailableMethods() [dynamic]
+{{KEY:REVIEW_CONTENT}} -> extractReviewContent() [dynamic]
{{KEY:PREVIOUS_ACTION_RESULTS}} -> extractPreviousActionResults() [dynamic]
{{KEY:LEARNINGS_AND_IMPROVEMENTS}} -> extractLearningsAndImprovements() [dynamic]
{{KEY:LATEST_REFINEMENT_FEEDBACK}} -> extractLatestRefinementFeedback() [dynamic]
diff --git a/modules/workflows/processing/shared/promptGenerationActionsActionplan.py b/modules/workflows/processing/shared/promptGenerationActionsActionplan.py
deleted file mode 100644
index 002169e0..00000000
--- a/modules/workflows/processing/shared/promptGenerationActionsActionplan.py
+++ /dev/null
@@ -1,234 +0,0 @@
-"""
-Actionplan Mode Prompt Generation
-Handles prompt templates and extraction functions for actionplan mode action handling.
-"""
-
-import logging
-from typing import Dict, Any, List
-from modules.datamodels.datamodelChat import PromptBundle, PromptPlaceholder
-from modules.workflows.processing.shared.placeholderFactory import (
- extractUserPrompt,
- extractAvailableDocumentsSummary,
- extractWorkflowHistory,
- extractAvailableMethods,
- extractUserLanguage,
- extractAvailableConnectionsIndex,
- extractReviewContent,
-)
-
-logger = logging.getLogger(__name__)
-
-def generateActionDefinitionPrompt(services, context: Any) -> PromptBundle:
- """Define placeholders first, then the template; return PromptBundle."""
- placeholders: List[PromptPlaceholder] = [
- PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
- PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
- PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
- PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services), summaryAllowed=True),
- PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
- PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
- ]
-
- template = """# Action Definition
-
-Generate the next action to advance toward completing the task objective.
-
-## 📋 Context
-
-### User Language
-{{KEY:USER_LANGUAGE}}
-
-### Task Objective
-{{KEY:USER_PROMPT}}
-
-### Available Documents
-{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
-
-### Available Connections
-{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
-
-### Workflow History
-{{KEY:WORKFLOW_HISTORY}}
-
-### Available Methods
-{{KEY:AVAILABLE_METHODS}}
-
-## ⚠️ RULES
-
-### Action Names
-- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
-- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
-- **DO NOT separate** method and action names - use the full compound name
-
-### Parameter Guidelines
-- **Use exact document references** from AVAILABLE_DOCUMENTS_INDEX
-- **Use exact connection references** from AVAILABLE_CONNECTIONS_INDEX
-- **Include user language** if relevant
-- **Avoid unnecessary fields** - host applies defaults
-
-## 📊 Required JSON Structure
-
-```json
-{
- "actions": [
- {
- "action": "method.action_name",
- "parameters": {},
- "resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
- "description": "What this action accomplishes",
- "userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}'"
- }
- ]
-}
-```
-
-## ✅ Correct Example
-
-```json
-{
- "actions": [
- {
- "action": "document.extract",
- "parameters": {"documentList": ["docList:msg_123:results"]},
- "resultLabel": "round1_task1_action1_extract_results",
- "description": "Extract data from documents",
- "userMessage": "Extracting data from documents"
- }
- ]
-}
-```
-
-
-## 🎯 Action Planning Guidelines
-
-### Method Selection
-- **Choose appropriate method** based on task requirements
-- **Consider available resources** (documents, connections)
-- **Match method capabilities** to task objectives
-
-### Parameter Design
-- **Use ACTION SIGNATURE** to understand required parameters
-- **Convert objective** into appropriate parameter values
-- **Include all required parameters** for the action
-
-### Result Labeling
-- **Use descriptive labels** that explain what the action produces
-- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
-- **Make labels meaningful** for future reference
-
-### User Messages
-- **Write in user language:** '{{KEY:USER_LANGUAGE}}'
-- **Explain what's happening** in user-friendly terms
-- **Keep messages concise** but informative
-
-## 🚀 Response Format
-Return ONLY the JSON object with complete action objects. If you cannot complete the full response, set "continuation" to a brief description of what still needs to be generated. If you can complete the response, keep "continuation" as null.
-"""
-
- return PromptBundle(prompt=template, placeholders=placeholders)
-
-def generateResultReviewPrompt(context: Any) -> PromptBundle:
- """Define placeholders first, then the template; return PromptBundle."""
- placeholders: List[PromptPlaceholder] = [
- PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
- PromptPlaceholder(label="REVIEW_CONTENT", content=extractReviewContent(context), summaryAllowed=True),
- ]
-
- template = f"""# Result Review & Validation
-
- Review task execution outcomes and determine success, retry needs, or failure.
-
- ## 📋 Context
-
- ### Task Objective
- {{KEY:USER_PROMPT}}
-
- ### Execution Results
- {{KEY:REVIEW_CONTENT}}
-
- ## 🔍 Validation Criteria
-
- ### Action Assessment
- - **Review each action's success/failure status**
- - **Check if required documents were produced**
- - **Validate document quality and completeness**
- - **Assess if success criteria were met**
- - **Identify any missing or incomplete outputs**
-
- ### Decision Making
- - **Determine if retry would help** or if task should be marked as failed
- - **Consider business value** and user satisfaction
- - **Evaluate technical execution** and results quality
-
- ## 📊 Required JSON Structure
-
- ```json
- {{
- "status": "success|retry|failed",
- "reason": "Detailed explanation of the validation decision",
- "improvements": ["specific improvement 1", "specific improvement 2"],
- "quality_score": 8,
- "met_criteria": ["criteria1", "criteria2"],
- "unmet_criteria": ["criteria3", "criteria4"],
- "confidence": 0.85,
- "userMessage": "User-friendly message explaining the validation result in language '{{KEY:USER_LANGUAGE}}'"
- }}
- ```
-
- ## 🎯 Validation Principles
-
- ### Assessment Approach
- - **Be thorough but fair** in assessment
- - **Focus on business value** and outcomes
- - **Consider both technical execution** and business results
- - **Provide specific, actionable** improvement suggestions
-
- ### Quality Scoring
- - **Use quality scores** to track progress across retries
- - **Scale 1-10**: 1 = Poor, 5 = Average, 10 = Excellent
- - **Consider completeness, accuracy, and usefulness**
-
- ### Criteria Evaluation
- - **Clearly identify** which success criteria were met vs. unmet
- - **List specific criteria** that were achieved
- - **Note missing requirements** that need attention
-
- ### Confidence Levels
- - **Set appropriate confidence levels** based on evidence quality
- - **Scale 0.0-1.0**: 0.0 = No confidence, 1.0 = Complete confidence
- - **Consider data quality** and result reliability
-
- ## 📝 Status Definitions
-
- ### Success
- - **All objectives met** - User got what they asked for
- - **Quality standards met** - Results are complete and accurate
- - **No retry needed** - Task is fully complete
-
- ### Retry
- - **Partial success** - Some but not all objectives met
- - **Improvement possible** - Retry could lead to better results
- - **Technical issues** - Action failures that can be resolved
-
- ### Failed
- - **No progress made** - Objectives not achieved
- - **Technical limitations** - Cannot be resolved with retry
- - **Resource constraints** - Missing required inputs
-
- ## 💡 Improvement Suggestions
-
- ### Actionable Improvements
- - **Be specific** - Don't just say "improve quality"
- - **Focus on process** - How to do better next time
- - **Consider resources** - What additional inputs might help
- - **Technical fixes** - Address specific technical issues
-
- ### Examples
- - "Use more specific document references from AVAILABLE_DOCUMENTS_INDEX"
- - "Include user language parameter for better localization"
- - "Break down complex objective into smaller, focused actions"
- - "Verify document references before processing"
-"""
-
- return PromptBundle(prompt=template, placeholders=placeholders)
-
diff --git a/modules/workflows/processing/shared/promptGenerationActionsDynamic.py b/modules/workflows/processing/shared/promptGenerationActionsDynamic.py
index 794f4175..6dfd9090 100644
--- a/modules/workflows/processing/shared/promptGenerationActionsDynamic.py
+++ b/modules/workflows/processing/shared/promptGenerationActionsDynamic.py
@@ -174,15 +174,16 @@ Excludes documents/connections/history entirely.
actionParametersText = _formatBusinessParameters(actionParameterList)
# determine action objective if available, else fall back to user prompt
- if hasattr(context, 'action_objective') and context.action_objective:
- actionObjective = context.action_objective
+ if hasattr(context, 'actionObjective') and context.actionObjective:
+ actionObjective = context.actionObjective
elif hasattr(context, 'taskStep') and context.taskStep and getattr(context.taskStep, 'objective', None):
actionObjective = context.taskStep.objective
else:
actionObjective = extractUserPrompt(context)
# Minimal Stage 2 (no fallback)
- parametersContext = getattr(context, 'parameters_context', None)
+ parametersContext = getattr(context, 'parametersContext', None)
+
learningsText = ""
try:
# If Stage 1 learnings were attached to context, pass them textually
diff --git a/modules/workflows/processing/workflowProcessor.py b/modules/workflows/processing/workflowProcessor.py
index 88bb25fd..df502ffa 100644
--- a/modules/workflows/processing/workflowProcessor.py
+++ b/modules/workflows/processing/workflowProcessor.py
@@ -6,7 +6,6 @@ from typing import Dict, Any, Optional, List
from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskPlan, TaskResult
from modules.datamodels.datamodelChat import ChatWorkflow, WorkflowModeEnum
from modules.workflows.processing.modes.modeBase import BaseMode
-from modules.workflows.processing.modes.modeActionplan import ActionplanMode
from modules.workflows.processing.modes.modeDynamic import DynamicMode
from modules.workflows.processing.modes.modeAutomation import AutomationMode
from modules.workflows.processing.shared.stateTools import checkWorkflowStopped
@@ -24,8 +23,6 @@ class WorkflowProcessor:
"""Create the appropriate mode implementation based on workflow mode"""
if workflowMode == WorkflowModeEnum.WORKFLOW_DYNAMIC:
return DynamicMode(self.services)
- elif workflowMode == WorkflowModeEnum.WORKFLOW_ACTIONPLAN:
- return ActionplanMode(self.services)
elif workflowMode == WorkflowModeEnum.WORKFLOW_AUTOMATION:
return AutomationMode(self.services)
else:
@@ -81,11 +78,13 @@ class WorkflowProcessor:
self.services.chat.progressLogFinish(operationId, False)
raise
- async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
- taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> TaskResult:
"""Execute a task step using the appropriate mode"""
import time
+ # Get task index from workflow state
+ taskIndex = workflow.getTaskIndex()
+
# Init progress logger
operationId = f"taskExec_{workflow.id}_{taskIndex}_{int(time.time())}"
@@ -98,7 +97,7 @@ class WorkflowProcessor:
operationId,
"Workflow Execution",
"Task Execution",
- f"Task {taskIndex}/{totalTasks}"
+ f"Task {taskIndex}"
)
logger.info(f"=== STARTING TASK EXECUTION ===")
@@ -110,7 +109,7 @@ class WorkflowProcessor:
self.services.chat.progressLogUpdate(operationId, 0.2, "Executing")
# Delegate to the appropriate mode
- result = await self.mode.executeTask(taskStep, workflow, context, taskIndex, totalTasks)
+ result = await self.mode.executeTask(taskStep, workflow, context)
# Complete progress tracking
self.services.chat.progressLogFinish(operationId, True)
diff --git a/pytest.ini b/pytest.ini
index e3d8c35e..ae59338f 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,6 +1,6 @@
[pytest]
testpaths = tests
-python_paths = .
+pythonpath = .
python_files = test_*.py
python_classes = Test*
python_functions = test_*
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 00000000..02c8ae04
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,228 @@
+# Test Suite Documentation
+
+## Overview
+
+This test suite includes:
+- **Unit Tests**: Fast, isolated tests for individual components
+- **Integration Tests**: Tests for component interactions
+- **Validation Tests**: End-to-end architecture validation
+- **Functional Tests**: Standalone async test scripts for real-world scenarios
+
+## Running Tests
+
+### Prerequisites
+
+```bash
+# Install dependencies (pytest is already in requirements.txt)
+cd gateway
+pip install -r requirements.txt
+
+# Or install pytest separately if needed
+pip install pytest pytest-asyncio pytest-cov
+```
+
+### Running Pytest Tests
+
+**All tests:**
+```bash
+cd gateway
+pytest
+```
+
+**By category:**
+```bash
+# Unit tests only
+pytest tests/unit/
+
+# Integration tests only
+pytest tests/integration/
+
+# Validation tests only
+pytest tests/validation/
+```
+
+**Specific test:**
+```bash
+# Specific file
+pytest tests/unit/datamodels/test_workflow_models.py
+
+# Specific test class
+pytest tests/unit/datamodels/test_workflow_models.py::TestActionDefinition
+
+# Specific test function
+pytest tests/unit/datamodels/test_workflow_models.py::TestActionDefinition::test_actionDefinition_needsStage2_without_parameters
+```
+
+**With options:**
+```bash
+# Verbose output
+pytest -v
+
+# Show print statements
+pytest -s
+
+# Stop on first failure
+pytest -x
+
+# Run tests matching pattern
+pytest -k "test_actionDefinition"
+
+# Run with coverage
+pytest --cov=modules --cov-report=html
+```
+
+### Running Functional Tests
+
+These are standalone async scripts that test real AI operations. They are **NOT pytest-compatible** and must be run directly:
+
+```bash
+cd gateway
+
+# AI Models Test (IMAGE_GENERATE)
+python tests/functional/test_ai_models.py
+
+# AI Model Selection Test
+python tests/functional/test_ai_model_selection.py
+
+# AI Behavior Test
+python tests/functional/test_ai_behavior.py
+
+# AI Operations Test
+python tests/functional/test_ai_operations.py
+```
+
+**Note:** These functional tests require:
+- Valid API keys configured in environment/config
+- Database access
+- May make actual AI API calls (costs may apply)
+- Must be run directly (not via pytest)
+
+## Test Structure
+
+```
+tests/
+├── unit/ # Unit tests (fast, isolated, pytest-compatible)
+│ ├── datamodels/ # Data model tests
+│ ├── services/ # Service layer tests
+│ ├── workflows/ # Workflow tests
+│ └── utils/ # Utility function tests
+├── integration/ # Integration tests (pytest-compatible)
+│ └── workflows/ # Workflow integration tests
+├── validation/ # Architecture validation tests (pytest-compatible)
+└── functional/ # Functional tests (standalone scripts, NOT pytest-compatible)
+ ├── test_ai_models.py
+ ├── test_ai_behavior.py
+ ├── test_ai_model_selection.py
+ └── test_ai_operations.py
+```
+
+## Test Categories
+
+### Unit Tests (`tests/unit/`)
+
+**Data Models:**
+- `test_workflow_models.py` - ActionDefinition, AiResponse, etc.
+- `test_docref.py` - DocumentReference models
+
+**Services:**
+- `test_ai_service.py` - AI service methods (mocked)
+
+**Workflows:**
+- `test_state_management.py` - ChatWorkflow state management
+
+**Utils:**
+- `test_json_utils.py` - JSON parsing utilities
+
+### Integration Tests (`tests/integration/`)
+
+- `test_workflow_execution.py` - Full workflow execution flows
+
+### Validation Tests (`tests/validation/`)
+
+- `test_architecture_validation.py` - End-to-end architecture validation
+
+### Functional Tests (`tests/functional/`)
+
+**Note:** These are standalone scripts that must be run directly (not via pytest):
+
+- `test_ai_models.py` - Real AI model testing (IMAGE_GENERATE)
+- `test_ai_model_selection.py` - Model selection logic
+- `test_ai_behavior.py` - AI behavior with different prompts
+- `test_ai_operations.py` - AI operations testing
+
+## Pytest Configuration
+
+Configuration is in `pytest.ini`:
+- Default: Runs non-expensive tests only
+- Use `pytest -m ""` to run ALL tests (including expensive ones)
+- Test paths: `tests/`
+- Python paths: `.` (gateway directory)
+
+## Markers
+
+Tests can be marked with pytest markers:
+
+```python
+@pytest.mark.asyncio
+async def test_something():
+ ...
+
+@pytest.mark.expensive
+def test_expensive_operation():
+ ...
+```
+
+Run only expensive tests:
+```bash
+pytest -m expensive
+```
+
+## Debugging Tests
+
+**Run with debugger:**
+```bash
+pytest --pdb # Drop into debugger on failure
+```
+
+**Show local variables:**
+```bash
+pytest -l # Show local variables in traceback
+```
+
+**Run last failed tests:**
+```bash
+pytest --lf
+```
+
+## Continuous Integration
+
+For CI/CD, use:
+```bash
+# Run all tests with coverage
+pytest --cov=modules --cov-report=xml --cov-report=html
+
+# Run only fast tests (exclude expensive)
+pytest -m "not expensive"
+```
+
+## Troubleshooting
+
+**Import errors (`ModuleNotFoundError: No module named 'modules'`):**
+- Ensure you're running pytest from the `gateway/` directory
+- The `conftest.py` file automatically adds the gateway directory to `sys.path`
+- If issues persist, verify `pytest.ini` has `pythonpath = .` (not `python_paths`)
+- You can also set PYTHONPATH manually:
+ ```powershell
+ $env:PYTHONPATH = "."
+ pytest
+ ```
+
+**Async test issues:**
+- Ensure `pytest-asyncio` is installed
+- Tests marked with `@pytest.mark.asyncio` will run correctly
+
+**Path issues:**
+- Standalone scripts automatically add gateway to `sys.path`
+- Pytest tests use `conftest.py` to set up the path automatically
+- If running from a different directory, use: `python -m pytest` from the gateway directory
+
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..9cfa3406
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,4 @@
+"""
+Test suite for PowerOn gateway modules
+"""
+
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 00000000..ab22ee17
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,14 @@
+"""
+Pytest configuration file for test suite.
+Ensures proper Python path setup for importing modules.
+"""
+
+import sys
+import os
+from pathlib import Path
+
+# Add gateway directory to Python path
+gateway_dir = Path(__file__).parent.parent
+if str(gateway_dir) not in sys.path:
+ sys.path.insert(0, str(gateway_dir))
+
diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py
new file mode 100644
index 00000000..f02847fc
--- /dev/null
+++ b/tests/functional/__init__.py
@@ -0,0 +1,10 @@
+"""
+Functional tests directory.
+
+These tests are not pytest-compatible and must be run directly:
+ python tests/functional/test_ai_models.py
+ python tests/functional/test_ai_behavior.py
+ python tests/functional/test_ai_model_selection.py
+ python tests/functional/test_method_ai_operations.py
+"""
+
diff --git a/test2_ai_model_selection.py b/tests/functional/test01_ai_model_selection.py
similarity index 98%
rename from test2_ai_model_selection.py
rename to tests/functional/test01_ai_model_selection.py
index 62324cba..a9e2ae29 100644
--- a/test2_ai_model_selection.py
+++ b/tests/functional/test01_ai_model_selection.py
@@ -12,9 +12,10 @@ import os
import sys
import base64
-
-# Ensure gateway is on path when running directly
-sys.path.append(os.path.dirname(__file__))
+# Add the gateway to path (go up 2 levels from tests/functional/)
+_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
+if _gateway_path not in sys.path:
+ sys.path.insert(0, _gateway_path)
from modules.features.chatPlayground.mainChatPlayground import getServices
from modules.datamodels.datamodelAi import (
@@ -249,7 +250,7 @@ class ModelSelectionTester:
print(f"{'='*80}")
options = AiCallOptions(
- operationType=OperationTypeEnum.WEB_RESEARCH,
+ operationType=OperationTypeEnum.WEB_SEARCH,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
@@ -324,7 +325,7 @@ class ModelSelectionTester:
# This method uses webQuery internally, so it uses the same model selection as web research
options = AiCallOptions(
- operationType=OperationTypeEnum.WEB_RESEARCH,
+ operationType=OperationTypeEnum.WEB_SEARCH,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.03,
@@ -433,7 +434,7 @@ class ModelSelectionTester:
print("\n Testing: aiObjects.webQuery() - Web Research")
try:
options = AiCallOptions(
- operationType=OperationTypeEnum.WEB_RESEARCH,
+ operationType=OperationTypeEnum.WEB_SEARCH,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
@@ -500,4 +501,3 @@ async def main() -> None:
if __name__ == "__main__":
asyncio.run(main())
-
diff --git a/test1_ai_models.py b/tests/functional/test02_ai_models.py
similarity index 62%
rename from test1_ai_models.py
rename to tests/functional/test02_ai_models.py
index 97d49efb..2f3081ae 100644
--- a/test1_ai_models.py
+++ b/tests/functional/test02_ai_models.py
@@ -1,23 +1,19 @@
#!/usr/bin/env python3
"""
-AI Models Test - Tests IMAGE_GENERATE functionality on all models that support it
+AI Models Test - Tests ALL operation types on ALL models that support them
-This script tests all models that have IMAGE_GENERATE capability, validates that
-they can generate images from text prompts, and analyzes the quality of results.
+This script tests all available models with all their supported operation types:
+- PLAN: Planning operations
+- DATA_ANALYSE: Data analysis
+- DATA_GENERATE: Data generation
+- DATA_EXTRACT: Data extraction
+- IMAGE_ANALYSE: Image analysis
+- IMAGE_GENERATE: Image generation
+- WEB_SEARCH: Web search
+- WEB_CRAWL: Web crawling
-CODE FLOW ANALYSIS:
-
-1. methodAi.generateImage() is called with prompt and optional size/quality/style
-2. mainServiceAi.generateImage() is called
- -> delegates to subCoreAi.generateImage()
- -> which calls aiObjects.generateImage()
- -> which creates AiModelCall and calls model.functionCall()
-
-WHERE FUNCTIONS ARE USED:
-- mainServiceAi.generateImage(): Public API entry point for image generation
-- subCoreAi.generateImage(): Internal implementation, called by mainServiceAi
-- aiObjects.generateImage(): Creates standardized call and invokes model
-- model.functionCall(): Direct model plugin call (e.g., DALL-E 3)
+For each model, it tests every operation type the model supports and validates
+the results. Results are saved to files for analysis.
"""
import asyncio
@@ -28,8 +24,10 @@ import base64
from datetime import datetime
from typing import Dict, Any, List
-# Add the gateway to path
-sys.path.append(os.path.dirname(__file__))
+# Add the gateway to path (go up 2 levels from tests/functional/)
+_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
+if _gateway_path not in sys.path:
+ sys.path.insert(0, _gateway_path)
# Import the service initialization
from modules.features.chatPlayground.mainChatPlayground import getServices
@@ -52,8 +50,9 @@ class AIModelsTester:
self.services = getServices(testUser, None) # Test user, no workflow
self.testResults = []
- # Create logs directory if it doesn't exist
- self.logsDir = os.path.join(os.path.dirname(__file__), "..", "local", "logs")
+ # Create logs directory if it doesn't exist (go up 2 levels from tests/unit/services/)
+ _gateway_dir = os.path.dirname(_gateway_path)
+ self.logsDir = os.path.join(_gateway_dir, "local", "logs")
os.makedirs(self.logsDir, exist_ok=True)
# Create modeltest subdirectory
@@ -84,7 +83,7 @@ class AIModelsTester:
self.services.extraction = ExtractionService(self.services)
# Create a minimal workflow context
- from modules.datamodels.datamodelChat import ChatWorkflow
+ from modules.datamodels.datamodelChat import ChatWorkflow, WorkflowModeEnum
import uuid
self.services.currentWorkflow = ChatWorkflow(
@@ -100,62 +99,126 @@ class AIModelsTester:
totalActions=0,
mandateId="test_mandate",
messageIds=[],
- workflowMode="React",
+ workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
maxSteps=5
)
print("✅ AI Service initialized successfully")
print(f"📁 Results will be saved to: {self.modelTestDir}")
- async def testModel(self, modelName: str) -> Dict[str, Any]:
- """Test a specific AI model with IMAGE_GENERATE operation."""
- print(f"\n{'='*60}")
- print(f"TESTING MODEL: {modelName}")
- print(f"OPERATION TYPE: IMAGE_GENERATE")
- print(f"{'='*60}")
+ def _getTestPromptForOperation(self, operationType) -> str:
+ """Get appropriate test prompt for each operation type."""
+ from modules.datamodels.datamodelAi import OperationTypeEnum
- # Test prompt for image generation
- testPrompt = 'Create a creative birthday cake designed to look like a monster truck tire/wheel. The cake appears to be chocolate-flavored and is decorated to resemble a large black tire with treads around the sides. On top of the cake, there is a mound of chocolate cake or brownie material meant to look like dirt or mud, with a toy monster truck positioned on top. The monster truck has large wheels and appears to be reddish in color. There are several small decorative flags in light blue and mint green colors stuck into the "dirt" mound. The words "HAPPY BIRTHDAY" are written in white letters around the side of the tire-shaped cake. The image appears to be from Yandex Images, as indicated by Russian text at the bottom. The status bar at the top shows 13:02 time and 82% battery level.'
- size = "1024x1024"
- quality = "standard"
- style = "vivid"
+ prompts = {
+ OperationTypeEnum.PLAN: "Create a project plan for developing a mobile app with 5 main tasks.",
+ OperationTypeEnum.DATA_ANALYSE: "Analyze the pros and cons of cloud computing.",
+ OperationTypeEnum.DATA_GENERATE: "Generate a list of 10 creative marketing ideas for a tech startup.",
+ OperationTypeEnum.DATA_EXTRACT: "Extract key information from this text about artificial intelligence trends.",
+ OperationTypeEnum.IMAGE_ANALYSE: "Describe what you see in this image.",
+ OperationTypeEnum.IMAGE_GENERATE: "A futuristic cityscape with flying cars and neon lights.",
+ OperationTypeEnum.WEB_SEARCH: "Who works in valueon ag in switzerland?", # Search query for valueon.ch
+ OperationTypeEnum.WEB_CRAWL: "https://www.valueon.ch" # URL to crawl
+ }
+ return prompts.get(operationType, "Test prompt for this operation type.")
+
+ def _createTestImage(self) -> str:
+ """Load test image file and convert to base64 data URL."""
+ import base64
- print(f"Test prompt: {testPrompt}")
- print(f"Size: {size}, Quality: {quality}, Style: {style}")
+ # Path to test image (relative to gateway directory)
+ testImagePath = os.path.join(
+ os.path.dirname(__file__), # tests/functional/
+ "..", # tests/
+ "testdata", # tests/testdata/
+ "Foto20250906_125903.jpg"
+ )
+
+ # Resolve absolute path
+ testImagePath = os.path.abspath(testImagePath)
+
+ if not os.path.exists(testImagePath):
+ raise FileNotFoundError(f"Test image not found at: {testImagePath}")
+
+ # Read image file and convert to base64
+ with open(testImagePath, 'rb') as f:
+ imageBytes = f.read()
+
+ imageBase64 = base64.b64encode(imageBytes).decode('utf-8')
+ return f"data:image/jpeg;base64,{imageBase64}"
+
+ async def testModelOperation(self, modelName: str, operationType, model) -> Dict[str, Any]:
+ """Test a specific AI model with a specific operation type."""
+ print(f"\n Testing operation: {operationType.name}")
+
+ testPrompt = self._getTestPromptForOperation(operationType)
startTime = asyncio.get_event_loop().time()
try:
- # Get model directly from registry and test it
- from modules.aicore.aicoreModelRegistry import modelRegistry
- model = modelRegistry.getModel(modelName)
+ # Create messages - format differs for IMAGE_ANALYSE
+ from modules.datamodels.datamodelAi import OperationTypeEnum
- if not model:
- raise Exception(f"Model {modelName} not found")
-
- # Create messages for image generation (plain text prompt)
- messages = [
- {
+ if operationType == OperationTypeEnum.IMAGE_ANALYSE:
+ # For image analysis, content must be a list with text and image
+ testImage = self._createTestImage()
+ messages = [{
"role": "user",
- "content": testPrompt
- }
- ]
+ "content": [
+ {"type": "text", "text": testPrompt},
+ {"type": "image_url", "image_url": {"url": testImage}}
+ ]
+ }]
+ else:
+ # For other operations, simple text content
+ messages = [{"role": "user", "content": testPrompt}]
+
+ # Create model call options
+ from modules.datamodels.datamodelAi import (
+ AiModelCall, AiCallOptions, AiCallPromptImage,
+ AiCallPromptWebSearch, AiCallPromptWebCrawl
+ )
+ import json
+
+ options = AiCallOptions(operationType=operationType)
+
+ # Format message content based on operation type
+ if operationType == OperationTypeEnum.IMAGE_GENERATE:
+ # Create structured prompt with image generation parameters
+ imagePrompt = AiCallPromptImage(
+ prompt=testPrompt,
+ size="1024x1024",
+ quality="standard",
+ style="vivid"
+ )
+ # Update message content to JSON format
+ messages[0]["content"] = json.dumps(imagePrompt.model_dump())
+ elif operationType == OperationTypeEnum.WEB_SEARCH:
+ # Create structured prompt for web search
+ webSearchPrompt = AiCallPromptWebSearch(
+ instruction=testPrompt,
+ maxNumberPages=5 # Limit for testing
+ )
+ # Update message content to JSON format
+ messages[0]["content"] = json.dumps(webSearchPrompt.model_dump())
+ elif operationType == OperationTypeEnum.WEB_CRAWL:
+ # Create structured prompt for web crawl
+ webCrawlPrompt = AiCallPromptWebCrawl(
+ instruction="Extract the main content from this page",
+ url=testPrompt, # testPrompt contains the URL
+ maxDepth=1, # Limit for testing
+ maxWidth=3 # Limit for testing
+ )
+ # Update message content to JSON format
+ messages[0]["content"] = json.dumps(webCrawlPrompt.model_dump())
- # Create model call with image generation parameters
- from modules.datamodels.datamodelAi import AiModelCall, AiCallOptions
modelCall = AiModelCall(
messages=messages,
model=model,
- options=AiCallOptions(
- operationType=OperationTypeEnum.IMAGE_GENERATE,
- size=size,
- quality=quality,
- style=style
- )
+ options=options
)
# Call model directly
- print(f"Calling model.functionCall() for {modelName}")
modelResponse = await model.functionCall(modelCall)
if not modelResponse.success:
@@ -166,65 +229,54 @@ class AIModelsTester:
endTime = asyncio.get_event_loop().time()
processingTime = endTime - startTime
- # Analyze result (base64 image data)
- if result:
- analysisResult = {
- "modelName": modelName,
- "status": "SUCCESS",
- "processingTime": round(processingTime, 2),
- "responseLength": len(result) if result else 0,
- "responseType": "base64_image",
- "hasContent": True,
- "error": None,
- "testPrompt": testPrompt,
- "size": size,
- "quality": quality,
- "style": style,
- "isBase64": result.startswith("data:image") if isinstance(result, str) else False
- }
-
- # Check if result is base64
+ # Analyze result based on operation type
+ analysisResult = {
+ "modelName": modelName,
+ "operationType": operationType.name,
+ "status": "SUCCESS",
+ "processingTime": round(processingTime, 2),
+ "responseLength": len(str(result)) if result else 0,
+ "hasContent": bool(result),
+ "error": None,
+ "testPrompt": testPrompt,
+ "fullResponse": str(result) if result else ""
+ }
+
+ # Operation-specific analysis
+ if operationType == OperationTypeEnum.IMAGE_GENERATE:
+ analysisResult["responseType"] = "base64_image"
import base64
try:
- # If it's a data URL, extract the base64 part
- if result.startswith("data:image"):
+ if isinstance(result, str) and result.startswith("data:image"):
base64Data = result.split(",")[1] if "," in result else result
else:
- base64Data = result
-
- # Try to decode to verify it's valid base64
- imageBytes = base64.b64decode(base64Data)
- analysisResult["isValidBase64"] = True
- analysisResult["imageByteSize"] = len(imageBytes)
+ base64Data = result if isinstance(result, str) else ""
+ if base64Data:
+ imageBytes = base64.b64decode(base64Data)
+ analysisResult["isValidBase64"] = True
+ analysisResult["imageByteSize"] = len(imageBytes)
+ else:
+ analysisResult["isValidBase64"] = False
+ analysisResult["imageByteSize"] = 0
except:
analysisResult["isValidBase64"] = False
analysisResult["imageByteSize"] = 0
-
- analysisResult["responsePreview"] = result[:100] + "..." if len(result) > 100 else result
- analysisResult["fullResponse"] = result
-
- print(f"✅ SUCCESS - Processing time: {processingTime:.2f}s")
- print(f"📄 Response length: {len(result)} characters")
- print(f"🖼️ Valid base64: {analysisResult.get('isValidBase64', False)}")
- if analysisResult.get('imageByteSize'):
- print(f"🖼️ Image size: {analysisResult['imageByteSize']} bytes")
-
- result = analysisResult
-
- # Validate that content was extracted
- if result.get("status") == "SUCCESS" and result.get("fullResponse"):
- self._validateImageResponse(modelName, result)
+ elif operationType in [OperationTypeEnum.DATA_ANALYSE, OperationTypeEnum.DATA_GENERATE, OperationTypeEnum.PLAN]:
+ analysisResult["responseType"] = "text"
+ try:
+ import json
+ json.loads(str(result))
+ analysisResult["isValidJson"] = True
+ except:
+ analysisResult["isValidJson"] = False
else:
- result = {
- "modelName": modelName,
- "status": "ERROR",
- "processingTime": round(processingTime, 2),
- "responseLength": 0,
- "responseType": "error",
- "hasContent": False,
- "error": "Empty response",
- "fullResponse": ""
- }
+ analysisResult["responseType"] = "text"
+
+ analysisResult["responsePreview"] = str(result)[:200] + "..." if len(str(result)) > 200 else str(result)
+
+ print(f" ✅ SUCCESS - Processing time: {processingTime:.2f}s, Response length: {analysisResult['responseLength']} chars")
+
+ return analysisResult
except Exception as e:
endTime = asyncio.get_event_loop().time()
@@ -232,6 +284,7 @@ class AIModelsTester:
result = {
"modelName": modelName,
+ "operationType": operationType.name,
"status": "EXCEPTION",
"processingTime": round(processingTime, 2),
"responseLength": 0,
@@ -239,23 +292,52 @@ class AIModelsTester:
"hasContent": False,
"error": str(e),
"testPrompt": testPrompt,
- "size": size,
- "quality": quality,
- "style": style
+ "fullResponse": ""
}
- print(f"💥 EXCEPTION - {str(e)}")
+ print(f" 💥 EXCEPTION - {str(e)}")
+ return result
+
+ async def testModel(self, modelInfo: Dict[str, Any]) -> List[Dict[str, Any]]:
+ """Test a specific AI model with all its supported operation types."""
+ modelName = modelInfo["displayName"]
+ operationTypes = modelInfo["operationTypes"]
- self.testResults.append(result)
+ print(f"\n{'='*60}")
+ print(f"TESTING MODEL: {modelName}")
+ print(f"Supported operations: {', '.join([op.name for op in operationTypes])}")
+ print(f"{'='*60}")
- # Save text response even for exceptions to log the prompt
- if result.get("status") in ["SUCCESS", "EXCEPTION", "ERROR"]:
- self._saveImageResponse(modelName, result)
+ # Get model from registry
+ from modules.aicore.aicoreModelRegistry import modelRegistry
+ model = modelRegistry.getModel(modelName)
- # Save individual model result immediately
- self._saveIndividualModelResult(modelName, result)
+ if not model:
+ errorResult = {
+ "modelName": modelName,
+ "operationType": "ALL",
+ "status": "ERROR",
+ "processingTime": 0,
+ "responseLength": 0,
+ "responseType": "error",
+ "hasContent": False,
+ "error": f"Model {modelName} not found in registry",
+ "fullResponse": ""
+ }
+ self.testResults.append(errorResult)
+ return [errorResult]
- return result
+ # Test each operation type
+ results = []
+ for operationType in operationTypes:
+ result = await self.testModelOperation(modelName, operationType, model)
+ results.append(result)
+ self.testResults.append(result)
+
+ # Save individual result
+ self._saveIndividualModelResult(f"{modelName}_{operationType.name}", result)
+
+ return results
def _saveImageResponse(self, modelName: str, result: Dict[str, Any]):
"""Save image generation response as image file."""
@@ -607,31 +689,38 @@ Width: {crawlWidth}
except Exception as e:
print(f"❌ Error saving individual result: {str(e)}")
- def getAllAvailableModels(self) -> List[str]:
- """Get all available model names that support IMAGE_GENERATE."""
+ def getAllAvailableModels(self) -> List[Dict[str, Any]]:
+ """Get all available models with their supported operation types."""
from modules.aicore.aicoreModelRegistry import modelRegistry
from modules.datamodels.datamodelAi import OperationTypeEnum
# Get all models from registry
allModels = modelRegistry.getAvailableModels()
+ totalModels = len(allModels)
- # Filter models that support IMAGE_GENERATE
- imageGenerateModels = []
+ print(f"\n📊 Total models in registry: {totalModels}")
+
+ # Collect all models with their supported operation types
+ modelsToTest = []
for model in allModels:
- if model.operationTypes and any(
- ot.operationType == OperationTypeEnum.IMAGE_GENERATE
- for ot in model.operationTypes
- ):
- imageGenerateModels.append(model.name)
+ if model.operationTypes and len(model.operationTypes) > 0:
+ supportedOps = [ot.operationType for ot in model.operationTypes]
+ modelsToTest.append({
+ "displayName": model.displayName,
+ "name": model.name,
+ "operationTypes": supportedOps
+ })
- # Filter to common models for testing (remove filter to test all models)
- # imageGenerateModels = [m for m in imageGenerateModels if "dall-e" in m.lower()]
+ print(f"✅ Found {len(modelsToTest)} model(s) with operation type support (will test all):")
+ for i, modelInfo in enumerate(modelsToTest, 1):
+ opsStr = ", ".join([op.name for op in modelInfo["operationTypes"]])
+ print(f" {i}. {modelInfo['displayName']} - Operations: {opsStr}")
- print(f"Found {len(imageGenerateModels)} models that support IMAGE_GENERATE:")
- for modelName in imageGenerateModels:
- print(f" - {modelName}")
+ if len(modelsToTest) < totalModels:
+ skipped = totalModels - len(modelsToTest)
+ print(f"ℹ️ {skipped} model(s) have no operation types and will be skipped.")
- return imageGenerateModels
+ return modelsToTest
def saveTestResults(self):
"""Save detailed test results to file."""
@@ -668,54 +757,65 @@ Width: {crawlWidth}
print("AI MODELS TEST SUMMARY")
print(f"{'='*80}")
- totalModels = len(self.testResults)
- successfulModels = len([r for r in self.testResults if r["status"] == "SUCCESS"])
- errorModels = len([r for r in self.testResults if r["status"] == "ERROR"])
- exceptionModels = len([r for r in self.testResults if r["status"] == "EXCEPTION"])
+ totalTests = len(self.testResults)
+ successfulTests = len([r for r in self.testResults if r["status"] == "SUCCESS"])
+ errorTests = len([r for r in self.testResults if r["status"] == "ERROR"])
+ exceptionTests = len([r for r in self.testResults if r["status"] == "EXCEPTION"])
- print(f"📊 Total models tested: {totalModels}")
- print(f"✅ Successful: {successfulModels}")
- print(f"❌ Errors: {errorModels}")
- print(f"💥 Exceptions: {exceptionModels}")
- print(f"📈 Success rate: {(successfulModels/totalModels*100):.1f}%" if totalModels > 0 else "0%")
+ # Count unique models
+ uniqueModels = len(set(r["modelName"] for r in self.testResults))
+
+ print(f"📊 Total tests executed: {totalTests}")
+ print(f"📦 Unique models tested: {uniqueModels}")
+ print(f"✅ Successful tests: {successfulTests}")
+ print(f"❌ Error tests: {errorTests}")
+ print(f"💥 Exception tests: {exceptionTests}")
+ print(f"📈 Success rate: {(successfulTests/totalTests*100):.1f}%" if totalTests > 0 else "0%")
print(f"\n{'='*80}")
print("DETAILED RESULTS")
print(f"{'='*80}")
+ # Group results by model
+ from collections import defaultdict
+ resultsByModel = defaultdict(list)
for result in self.testResults:
- status_icon = {
- "SUCCESS": "✅",
- "ERROR": "❌",
- "EXCEPTION": "💥"
- }.get(result["status"], "❓")
-
- print(f"\n{status_icon} {result['modelName']}")
- print(f" Status: {result['status']}")
- print(f" Processing time: {result['processingTime']}s")
- print(f" Response length: {result['responseLength']} characters")
- print(f" Response type: {result['responseType']}")
-
- if result.get("isValidJson") is not None:
- print(f" Valid JSON: {'Yes' if result['isValidJson'] else 'No'}")
-
- if result.get("crawledUrl"):
- print(f" Crawled URL: {result['crawledUrl']}")
-
- if result.get("contentLength") is not None:
- print(f" Content length: {result['contentLength']} characters")
-
- if result.get("pagesCrawled") is not None:
- print(f" Pages crawled: {result['pagesCrawled']}")
-
- if result["error"]:
- print(f" Error: {result['error']}")
-
- if result.get("responsePreview"):
- print(f" Preview: {result['responsePreview']}")
+ resultsByModel[result['modelName']].append(result)
- # Find fastest and slowest models
- if successfulModels > 0:
+ for modelName, modelResults in resultsByModel.items():
+ print(f"\n📦 {modelName}")
+ for result in modelResults:
+ status_icon = {
+ "SUCCESS": "✅",
+ "ERROR": "❌",
+ "EXCEPTION": "💥"
+ }.get(result["status"], "❓")
+
+ opType = result.get("operationType", "UNKNOWN")
+ print(f" {status_icon} {opType}: {result['status']} - {result['processingTime']}s - {result['responseLength']} chars")
+
+ if result.get("isValidJson") is not None:
+ print(f" Valid JSON: {'Yes' if result['isValidJson'] else 'No'}")
+
+ if result.get("isValidBase64") is not None:
+ print(f" Valid Base64: {'Yes' if result['isValidBase64'] else 'No'}")
+ if result.get("imageByteSize"):
+ print(f" Image size: {result['imageByteSize']} bytes")
+
+ if result.get("crawledUrl"):
+ print(f" Crawled URL: {result['crawledUrl']}")
+
+ if result.get("contentLength") is not None:
+ print(f" Content length: {result['contentLength']} characters")
+
+ if result.get("pagesCrawled") is not None:
+ print(f" Pages crawled: {result['pagesCrawled']}")
+
+ if result.get("error"):
+ print(f" Error: {result['error']}")
+
+ # Find fastest and slowest tests
+ if successfulTests > 0:
successfulResults = [r for r in self.testResults if r["status"] == "SUCCESS"]
fastest = min(successfulResults, key=lambda x: x["processingTime"])
slowest = max(successfulResults, key=lambda x: x["processingTime"])
@@ -723,8 +823,8 @@ Width: {crawlWidth}
print(f"\n{'='*80}")
print("PERFORMANCE HIGHLIGHTS")
print(f"{'='*80}")
- print(f"🚀 Fastest model: {fastest['modelName']} ({fastest['processingTime']}s)")
- print(f"🐌 Slowest model: {slowest['modelName']} ({slowest['processingTime']}s)")
+ print(f"🚀 Fastest test: {fastest['modelName']} - {fastest.get('operationType', 'UNKNOWN')} ({fastest['processingTime']}s)")
+ print(f"🐌 Slowest test: {slowest['modelName']} - {slowest.get('operationType', 'UNKNOWN')} ({slowest['processingTime']}s)")
# Find models with most content
modelsWithContent = [r for r in successfulResults if r.get("contentLength", 0) > 0]
@@ -747,36 +847,43 @@ Width: {crawlWidth}
print(f"📊 Total pages crawled across all models: {totalPages} pages")
async def main():
- """Run AI models testing for IMAGE_GENERATE operation."""
+ """Run AI models testing for all operation types."""
tester = AIModelsTester()
- print("Starting AI Models Testing for IMAGE_GENERATE...")
+ print("Starting AI Models Testing for ALL Operation Types...")
print("Initializing AI service...")
await tester.initialize()
- # Get all available models
+ # Get all available models with their operation types
models = tester.getAllAvailableModels()
- print(f"\nFound {len(models)} models to test:")
- for i, model in enumerate(models, 1):
- print(f" {i}. {model}")
+ if not models:
+ print("\n⚠️ No models found with operation type support.")
+ print(" Please check that models with operation types are registered.")
+ return
+
+ # Count total tests (models * operation types)
+ totalTests = sum(len(model["operationTypes"]) for model in models)
print(f"\n{'='*80}")
- print("STARTING IMAGE_GENERATE TESTS")
+ print("STARTING COMPREHENSIVE MODEL TESTS")
print(f"{'='*80}")
- print("Testing each model's ability to generate images from text prompts...")
- print("Press Enter after each model test to continue to the next one...")
+ print(f"Testing {len(models)} model(s) with {totalTests} total operation type test(s)...")
+ print("All models and their supported operation types will be tested automatically.")
+ print(f"{'='*80}\n")
- # Test each model individually
- for i, modelName in enumerate(models, 1):
- print(f"\n[{i}/{len(models)}] Testing model: {modelName}")
+ # Test each model with all its operation types
+ testCount = 0
+ for i, modelInfo in enumerate(models, 1):
+ print(f"\n{'='*80}")
+ print(f"[Model {i}/{len(models)}] Testing: {modelInfo['displayName']}")
+ print(f"{'='*80}")
- # Test the model
- await tester.testModel(modelName)
+ # Test the model (tests all its operation types)
+ results = await tester.testModel(modelInfo)
+ testCount += len(results)
- # Pause for user input (except for the last model)
- if i < len(models):
- input(f"\nPress Enter to continue to the next model...")
+ print(f"\n✅ Completed {len(results)} test(s) for {modelInfo['displayName']}")
# Save detailed results to file
resultsFile = tester.saveTestResults()
@@ -787,8 +894,10 @@ async def main():
print(f"\n{'='*80}")
print("TESTING COMPLETED")
print(f"{'='*80}")
+ print(f"📊 Total tests executed: {testCount}")
print(f"📄 Results saved to: {resultsFile}")
print(f"📁 Test results saved to: {tester.modelTestDir}")
if __name__ == "__main__":
asyncio.run(main())
+
diff --git a/test4_method_ai_operations.py b/tests/functional/test03_ai_operations.py
similarity index 66%
rename from test4_method_ai_operations.py
rename to tests/functional/test03_ai_operations.py
index e4f91587..1341373c 100644
--- a/test4_method_ai_operations.py
+++ b/tests/functional/test03_ai_operations.py
@@ -10,11 +10,13 @@ import os
from datetime import datetime
from typing import Dict, Any, List
-# Add the gateway to path
-sys.path.append(os.path.dirname(__file__))
+# Add the gateway to path (go up 2 levels from tests/functional/)
+_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
+if _gateway_path not in sys.path:
+ sys.path.insert(0, _gateway_path)
from modules.datamodels.datamodelAi import OperationTypeEnum
-from modules.datamodels.datamodelChat import ChatWorkflow, ChatDocument
+from modules.datamodels.datamodelChat import ChatWorkflow, ChatDocument, WorkflowModeEnum
from modules.datamodels.datamodelUam import User
@@ -31,8 +33,9 @@ class MethodAiOperationsTester:
self.methodAi = None
self.testResults = []
- # Create logs directory if it doesn't exist
- self.logsDir = os.path.join(os.path.dirname(__file__), "..", "local", "logs")
+ # Create logs directory if it doesn't exist (go up 1 level from gateway/)
+ _gateway_dir = os.path.dirname(_gateway_path)
+ self.logsDir = os.path.join(_gateway_dir, "local", "logs")
os.makedirs(self.logsDir, exist_ok=True)
# Create modeltest subdirectory
@@ -62,21 +65,21 @@ class MethodAiOperationsTester:
"aiPrompt": "Analyze this image and describe what you see, including any text or numbers visible.",
"resultType": "json",
# documentList should contain document references resolvable by workflow service
- # For testing, leave empty if no test image is available
- "documentList": []
+ # The test image will be uploaded and referenced during initialization
+ "documentList": [] # Will be populated in initialize() if test image is available
},
OperationTypeEnum.IMAGE_GENERATE: {
"aiPrompt": "A beautiful sunset over the ocean with purple and orange hues",
"resultType": "png"
},
OperationTypeEnum.WEB_SEARCH: {
- "aiPrompt": "Find recent articles about ValueOn AG in Switzeerland in 2025",
+ "aiPrompt": "Who works in valueon ag in switzerland?",
"resultType": "json"
},
OperationTypeEnum.WEB_CRAWL: {
"aiPrompt": "Extract who works in this company",
"resultType": "json",
- "documentList": ["https://www.valueon.com"]
+ "documentList": ["https://www.valueon.ch"]
}
}
@@ -116,7 +119,7 @@ class MethodAiOperationsTester:
totalActions=0,
mandateId=self.testUser.mandateId,
messageIds=[],
- workflowMode="React",
+ workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
maxSteps=5
)
@@ -125,13 +128,13 @@ class MethodAiOperationsTester:
workflowDict = testWorkflow.model_dump()
interfaceDbChat.createWorkflow(workflowDict)
- # Set the workflow in services
- self.services.currentWorkflow = testWorkflow
+ # Set the workflow in services (Services class uses .workflow, not .currentWorkflow)
+ self.services.workflow = testWorkflow
# Debug: Print workflow status
- print(f"Debug: services.currentWorkflow is set: {hasattr(self.services, 'currentWorkflow') and self.services.currentWorkflow is not None}")
- if self.services.currentWorkflow:
- print(f"Debug: Workflow ID: {self.services.currentWorkflow.id}")
+ print(f"Debug: services.workflow is set: {hasattr(self.services, 'workflow') and self.services.workflow is not None}")
+ if self.services.workflow:
+ print(f"Debug: Workflow ID: {self.services.workflow.id}")
# Import and initialize methodAi AFTER setting workflow
from modules.workflows.methods.methodAi import MethodAi
@@ -139,11 +142,87 @@ class MethodAiOperationsTester:
# Verify methodAi has access to the workflow
if hasattr(self.methodAi, 'services'):
- print(f"Debug: methodAi.services.currentWorkflow is set: {hasattr(self.methodAi.services, 'currentWorkflow') and self.methodAi.services.currentWorkflow is not None}")
+ print(f"Debug: methodAi.services.workflow is set: {hasattr(self.methodAi.services, 'workflow') and self.methodAi.services.workflow is not None}")
+
+ # Prepare test image document for IMAGE_ANALYSE if available
+ await self._prepareTestImageDocument()
print("✅ Services initialized")
print(f"📁 Results will be saved to: {self.modelTestDir}")
+ async def _prepareTestImageDocument(self):
+ """Upload test image as a document for IMAGE_ANALYSE testing."""
+ try:
+ # Path to test image (relative to gateway directory)
+ testImagePath = os.path.join(
+ os.path.dirname(__file__), # tests/functional/
+ "..", # tests/
+ "testdata", # tests/testdata/
+ "Foto20250906_125903.jpg"
+ )
+ testImagePath = os.path.abspath(testImagePath)
+
+ if not os.path.exists(testImagePath):
+ print(f"⚠️ Test image not found at: {testImagePath}")
+ print(" IMAGE_ANALYSE tests will be skipped or will fail")
+ return
+
+ # Read image file
+ with open(testImagePath, 'rb') as f:
+ imageData = f.read()
+
+ # Create a ChatDocument
+ from modules.datamodels.datamodelChat import ChatDocument
+ import uuid
+
+ testImageDoc = ChatDocument(
+ id=str(uuid.uuid4()),
+ documentName="Foto20250906_125903.jpg",
+ mimeType="image/jpeg",
+ documentData=imageData,
+ workflowId=self.services.workflow.id if self.services.workflow else None
+ )
+
+ # Create a message with this document
+ from modules.datamodels.datamodelChat import ChatMessage
+ import time
+
+ testMessage = ChatMessage(
+ id=str(uuid.uuid4()),
+ workflowId=self.services.workflow.id if self.services.workflow else None,
+ role="user",
+ content="Test image for IMAGE_ANALYSE",
+ language="en",
+ timestamp=time.time(),
+ documents=[testImageDoc]
+ )
+
+ # Save message to database
+ if self.services.workflow:
+ import modules.interfaces.interfaceDbChatObjects as interfaceDbChatObjects
+ interfaceDbChat = interfaceDbChatObjects.getInterface(self.testUser)
+ messageDict = testMessage.model_dump()
+ interfaceDbChat.createMessage(messageDict)
+
+ # Update workflow messageIds
+ if self.services.workflow.messageIds is None:
+ self.services.workflow.messageIds = []
+ self.services.workflow.messageIds.append(testMessage.id)
+
+ # Update documentList for IMAGE_ANALYSE test
+ # Format: messageId:label (using documentName as label)
+ docRef = f"{testMessage.id}:{testImageDoc.documentName}"
+ self.testPrompts[OperationTypeEnum.IMAGE_ANALYSE]["documentList"] = [docRef]
+
+ print(f"✅ Test image uploaded: {testImageDoc.documentName}")
+ print(f" Document reference: {docRef}")
+ else:
+ print("⚠️ No workflow available, cannot upload test image")
+
+ except Exception as e:
+ print(f"⚠️ Failed to prepare test image document: {str(e)}")
+ print(" IMAGE_ANALYSE tests may fail")
+
async def testOperation(self, operationType: OperationTypeEnum) -> Dict[str, Any]:
"""Test a specific operation type."""
print(f"\n{'='*80}")
@@ -180,7 +259,7 @@ class MethodAiOperationsTester:
parameters["documentList"] = testConfig["documentList"]
# Ensure workflow is still set in both self.services AND methodAi.services
- if not self.services.currentWorkflow or (hasattr(self, 'methodAi') and hasattr(self.methodAi, 'services') and not self.methodAi.services.currentWorkflow):
+ if not self.services.workflow or (hasattr(self, 'methodAi') and hasattr(self.methodAi, 'services') and not self.methodAi.services.workflow):
print(f"⚠️ Warning: Workflow is None, trying to re-set it...")
import time
import uuid
@@ -196,20 +275,26 @@ class MethodAiOperationsTester:
currentAction=0,
totalTasks=0,
totalActions=0,
- mandateId="test_mandate",
+ mandateId=self.testUser.mandateId,
messageIds=[],
- workflowMode="React",
+ workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
maxSteps=5
)
- self.services.currentWorkflow = testWorkflow
+ # Save workflow to database
+ import modules.interfaces.interfaceDbChatObjects as interfaceDbChatObjects
+ interfaceDbChat = interfaceDbChatObjects.getInterface(self.testUser)
+ workflowDict = testWorkflow.model_dump()
+ interfaceDbChat.createWorkflow(workflowDict)
+
+ self.services.workflow = testWorkflow
# Also set in methodAi.services if it exists
if hasattr(self, 'methodAi') and hasattr(self.methodAi, 'services'):
- self.methodAi.services.currentWorkflow = testWorkflow
+ self.methodAi.services.workflow = testWorkflow
# Call methodAi.process()
print(f"Calling methodAi.process()...")
- print(f"Debug: Current workflow ID before call: {self.services.currentWorkflow.id if self.services.currentWorkflow else 'None'}")
- print(f"Debug: methodAi.services.currentWorkflow: {self.methodAi.services.currentWorkflow.id if hasattr(self.methodAi, 'services') and self.methodAi.services.currentWorkflow else 'None/NotSet'}")
+ print(f"Debug: Current workflow ID before call: {self.services.workflow.id if self.services.workflow else 'None'}")
+ print(f"Debug: methodAi.services.workflow: {self.methodAi.services.workflow.id if hasattr(self.methodAi, 'services') and self.methodAi.services.workflow else 'None/NotSet'}")
print(f"Debug: Is same services object? {self.services is self.methodAi.services}")
print(f"Debug: services id: {id(self.services)}")
print(f"Debug: methodAi.services id: {id(self.methodAi.services)}")
@@ -283,13 +368,36 @@ class MethodAiOperationsTester:
async def testAllOperations(self):
"""Test all operation types."""
print(f"\n{'='*80}")
- print("STARTING METHODAI OPERATIONS TESTS - DATA_GENERATE ONLY")
+ print("STARTING METHODAI OPERATIONS TESTS - ALL OPERATION TYPES")
print(f"{'='*80}")
- print("Testing DATA_GENERATE operation type...")
- # Test only ONE operation type TODO
- await self.testOperation(OperationTypeEnum.IMAGE_ANALYSE)
- print(f"\n{'─'*80}")
+ # Get all operation types
+ allOperationTypes = list(OperationTypeEnum)
+
+ # Filter to only operation types that have test configurations
+ operationTypesToTest = [
+ opType for opType in allOperationTypes
+ if opType in self.testPrompts
+ ]
+
+ print(f"Testing {len(operationTypesToTest)} operation type(s):")
+ for i, opType in enumerate(operationTypesToTest, 1):
+ print(f" {i}. {opType.name}")
+
+ print(f"\n{'='*80}")
+ print("STARTING TESTS")
+ print(f"{'='*80}\n")
+
+ # Test each operation type
+ for i, operationType in enumerate(operationTypesToTest, 1):
+ print(f"\n{'─'*80}")
+ print(f"[{i}/{len(operationTypesToTest)}] Testing: {operationType.name}")
+ print(f"{'─'*80}")
+
+ await self.testOperation(operationType)
+
+ if i < len(operationTypesToTest):
+ print(f"\n{'─'*80}")
# Print summary
self.printSummary()
diff --git a/test3_ai_behavior.py b/tests/functional/test04_ai_behavior.py
similarity index 82%
rename from test3_ai_behavior.py
rename to tests/functional/test04_ai_behavior.py
index 76db986b..f138e57c 100644
--- a/test3_ai_behavior.py
+++ b/tests/functional/test04_ai_behavior.py
@@ -9,30 +9,28 @@ import sys
import os
from typing import Dict, Any, List
-# Add the gateway to path
-sys.path.append(os.path.dirname(__file__))
+# Add the gateway to path (go up 2 levels from tests/functional/)
+_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
+if _gateway_path not in sys.path:
+ sys.path.insert(0, _gateway_path)
# Import the service initialization
-from modules.features.chatPlayground.mainChatPlayground import getServices
+from modules.services import getInterface as getServices
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
from modules.datamodels.datamodelUam import User
+from modules.datamodels.datamodelWorkflow import AiResponse
# The test uses the AI service which handles JSON template internally
class AIBehaviorTester:
def __init__(self):
- # Create a minimal user context for testing
- testUser = User(
- id="test_user",
- username="test_user",
- email="test@example.com",
- fullName="Test User",
- language="en",
- mandateId="test_mandate"
- )
+ # Use root user for testing (has full access to everything)
+ from modules.interfaces.interfaceDbAppObjects import getRootInterface
+ rootInterface = getRootInterface()
+ self.testUser = rootInterface.currentUser
# Initialize services using the existing system
- self.services = getServices(testUser, None) # Test user, no workflow
+ self.services = getServices(self.testUser, None) # Test user, no workflow
self.testResults = []
async def initialize(self):
@@ -41,30 +39,38 @@ class AIBehaviorTester:
import logging
logging.getLogger().setLevel(logging.DEBUG)
- # The AI service needs to be recreated with proper initialization
- from modules.services.serviceAi.mainServiceAi import AiService
- self.services.ai = await AiService.create(self.services)
-
- # Create a minimal workflow context
- from modules.datamodels.datamodelChat import ChatWorkflow
+ # Create and save workflow in database using the interface
+ from modules.datamodels.datamodelChat import ChatWorkflow, WorkflowModeEnum
import uuid
+ import time
+ import modules.interfaces.interfaceDbChatObjects as interfaceDbChatObjects
- self.services.currentWorkflow = ChatWorkflow(
+ currentTimestamp = time.time()
+
+ testWorkflow = ChatWorkflow(
id=str(uuid.uuid4()),
name="Test Workflow",
status="running",
- startedAt=self.services.utils.timestampGetUtc(),
- lastActivity=self.services.utils.timestampGetUtc(),
+ startedAt=currentTimestamp,
+ lastActivity=currentTimestamp,
currentRound=1,
currentTask=0,
currentAction=0,
totalTasks=0,
totalActions=0,
- mandateId="test_mandate",
+ mandateId=self.testUser.mandateId,
messageIds=[],
- workflowMode="React",
+ workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
maxSteps=5
)
+
+ # SAVE workflow to database so it exists for access control
+ interfaceDbChat = interfaceDbChatObjects.getInterface(self.testUser)
+ workflowDict = testWorkflow.model_dump()
+ interfaceDbChat.createWorkflow(workflowDict)
+
+ # Set the workflow in services (Services class uses .workflow, not .currentWorkflow)
+ self.services.workflow = testWorkflow
async def testPromptBehavior(self, promptName: str, prompt: str, maxIterations: int = 2) -> Dict[str, Any]:
"""Test actual AI behavior with a specific prompt structure."""
@@ -79,24 +85,30 @@ class AIBehaviorTester:
# Use the AI service directly with the user prompt - it will build the generation prompt internally
try:
- # Use the existing AI service with JSON format - it handles looping internally
- response = await self.services.ai.callAiDocuments(
+ # Use callAiContent (replaces deprecated callAiDocuments)
+ options = AiCallOptions(
+ operationType=OperationTypeEnum.DATA_GENERATE
+ )
+ aiResponse: AiResponse = await self.services.ai.callAiContent(
prompt=prompt, # Use the raw user prompt directly
- documents=None,
+ options=options,
outputFormat="json",
title="Prime Numbers Test"
)
- if isinstance(response, dict):
- result = json.dumps(response, indent=2)
+ # Extract content from AiResponse
+ if isinstance(aiResponse, AiResponse):
+ result = aiResponse.content if aiResponse.content else json.dumps({})
+ elif isinstance(aiResponse, dict):
+ result = json.dumps(aiResponse, indent=2)
else:
- result = str(response)
+ result = str(aiResponse)
print(f"Response length: {len(result)} characters")
print(f"Response preview: {result[:200]}...")
# If we got an error response, try to extract the actual AI content from debug files
- if isinstance(response, dict) and not response.get("success", True):
+ if isinstance(aiResponse, AiResponse) and aiResponse.metadata and hasattr(aiResponse.metadata, 'error'):
# The AI service wrapped the response in an error format
# We need to get the actual AI content from the debug files
print("⚠️ AI returned error response, but may have generated content")
@@ -129,7 +141,9 @@ class AIBehaviorTester:
accumulatedContent.append(result)
except Exception as e:
- print(f"❌ Error in AI call: {str(e)}")
+ import traceback
+ print(f"❌ Error in AI call: {type(e).__name__}: {str(e)}")
+ print(f" Traceback: {traceback.format_exc()}")
accumulatedContent.append("")
# Analyze results
@@ -151,10 +165,11 @@ class AIBehaviorTester:
"""Get the latest AI response from debug files."""
try:
import glob
- import os
- # Look for the most recent debug response file
- debug_pattern = "local/logs/debug/prompts/*document_generation_response*.txt"
+ # Look for the most recent debug response file (go up 2 levels from tests/functional/ to gateway/, then up 1 to poweron/)
+ gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
+ gateway_dir = os.path.dirname(gateway_path)
+ debug_pattern = os.path.join(gateway_dir, "local", "logs", "debug", "prompts", "*document_generation_response*.txt")
debug_files = glob.glob(debug_pattern)
if debug_files:
@@ -357,3 +372,4 @@ async def main():
if __name__ == "__main__":
asyncio.run(main())
+
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 00000000..d5b31da0
--- /dev/null
+++ b/tests/integration/__init__.py
@@ -0,0 +1,4 @@
+"""
+Integration tests
+"""
+
diff --git a/tests/integration/workflows/test_workflow_execution.py b/tests/integration/workflows/test_workflow_execution.py
new file mode 100644
index 00000000..35142de6
--- /dev/null
+++ b/tests/integration/workflows/test_workflow_execution.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+"""
+Integration tests for workflow execution
+Tests full workflow execution with state management, Stage 1/2, document extraction flow.
+"""
+
+import pytest
+import uuid
+from unittest.mock import Mock, AsyncMock, patch
+
+from modules.datamodels.datamodelChat import ChatWorkflow, TaskContext, TaskStep
+from modules.datamodels.datamodelWorkflow import ActionDefinition
+from modules.datamodels.datamodelDocref import DocumentReferenceList, DocumentListReference, DocumentItemReference
+
+
+class TestWorkflowStateManagement:
+ """Test workflow state management during execution"""
+
+ @pytest.mark.asyncio
+ async def test_workflow_state_increments(self):
+ """Test that workflow state increments correctly during execution"""
+ workflow = ChatWorkflow(
+ id=str(uuid.uuid4()),
+ name="Test Workflow",
+ mandateId="test_mandate"
+ )
+
+ # Initial state
+ assert workflow.currentRound == 0
+ assert workflow.currentTask == 0
+ assert workflow.currentAction == 0
+
+ # Simulate workflow progression
+ workflow.incrementAction()
+ assert workflow.currentAction == 1
+
+ workflow.incrementTask()
+ assert workflow.currentTask == 1
+ assert workflow.currentAction == 0 # Reset when task increments
+
+ workflow.incrementRound()
+ assert workflow.currentRound == 1
+ assert workflow.currentTask == 0 # Reset when round increments
+ assert workflow.currentAction == 0
+
+
+class TestStage1ToStage2Flow:
+ """Test Stage 1 → Stage 2 parameter generation flow"""
+
+ def test_actionDefinition_needsStage2_logic(self):
+ """Test needsStage2() deterministic logic"""
+ # Stage 1: No parameters
+ actionDef = ActionDefinition(
+ action="ai.process",
+ actionObjective="Process documents"
+ )
+ assert actionDef.needsStage2() is True
+
+ # Stage 2: Parameters added
+ actionDef.parameters = {"resultType": "pdf"}
+ assert actionDef.needsStage2() is False
+
+ def test_actionDefinition_stage1_resources(self):
+ """Test that Stage 1 always defines documentList and connectionReference if needed"""
+ docList = DocumentReferenceList(references=[
+ DocumentListReference(label="task1_results")
+ ])
+ actionDef = ActionDefinition(
+ action="ai.process",
+ actionObjective="Process documents",
+ documentList=docList,
+ connectionReference="conn123"
+ )
+ # Stage 1 resources are set, but parameters are not
+ assert actionDef.documentList is not None
+ assert actionDef.connectionReference == "conn123"
+ assert actionDef.needsStage2() is True # Still needs Stage 2 for parameters
+
+
+class TestDocumentExtractionFlow:
+ """Test document extraction → AI processing flow"""
+
+ def test_extractContentParameters_structure(self):
+ """Test ExtractContentParameters structure"""
+ from modules.datamodels.datamodelWorkflow import ExtractContentParameters
+
+ docList = DocumentReferenceList(references=[
+ DocumentListReference(label="input_docs")
+ ])
+ params = ExtractContentParameters(documentList=docList)
+
+ assert params.documentList is not None
+ assert len(params.documentList.references) == 1
+ assert params.extractionOptions is None # Optional
+
+ def test_documentReferenceList_parsing(self):
+ """Test DocumentReferenceList parsing from strings"""
+ stringList = [
+ "docList:task1_results",
+ "docItem:doc123:test.pdf"
+ ]
+ refList = DocumentReferenceList.from_string_list(stringList)
+
+ assert len(refList.references) == 2
+ assert isinstance(refList.references[0], DocumentListReference)
+ assert isinstance(refList.references[1], DocumentItemReference)
+
+
+class TestDocumentReferenceLookup:
+ """Test document reference lookup across tasks/rounds"""
+
+ def test_documentListReference_with_messageId(self):
+ """Test DocumentListReference with messageId for cross-round references"""
+ ref = DocumentListReference(
+ messageId="msg123",
+ label="task1_results"
+ )
+ assert ref.messageId == "msg123"
+ assert ref.label == "task1_results"
+ assert ref.to_string() == "docList:msg123:task1_results"
+
+ def test_documentListReference_without_messageId(self):
+ """Test DocumentListReference without messageId (current message)"""
+ ref = DocumentListReference(label="task1_results")
+ assert ref.messageId is None
+ assert ref.to_string() == "docList:task1_results"
+
+
+class TestJsonParsing:
+ """Test JSON parsing with broken/incomplete JSON"""
+
+ def test_parseJsonWithModel_with_code_fences(self):
+ """Test parseJsonWithModel handles code fences"""
+ from modules.shared.jsonUtils import parseJsonWithModel
+
+ jsonStr = '```json\n{"action": "ai.process", "actionObjective": "Process"}\n```'
+ result = parseJsonWithModel(jsonStr, ActionDefinition)
+
+ assert isinstance(result, ActionDefinition)
+ assert result.action == "ai.process"
+
+ def test_parseJsonWithModel_with_extra_text(self):
+ """Test parseJsonWithModel extracts JSON from text with extra content"""
+ from modules.shared.jsonUtils import parseJsonWithModel
+
+ jsonStr = 'Some text before {"action": "ai.process", "actionObjective": "Process"} some text after'
+ result = parseJsonWithModel(jsonStr, ActionDefinition)
+
+ assert isinstance(result, ActionDefinition)
+ assert result.action == "ai.process"
+
+
+if __name__ == "__main__":
+ pytest.main([__file__, "-v"])
+
diff --git a/tests/testdata/Foto20250906_125903.jpg b/tests/testdata/Foto20250906_125903.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..beefbd87d9cf5515515cdb41d0dd0475944844d8
GIT binary patch
literal 2074421
zcmbTccUV(P*Ebw`M|xK&p%Z#+GMX5>^
zL=aT`@SJm>`}yAM`QyFb$)1_tUbEKPYt5{+lk7demwtZ$P-$y`H2^p`007R-2k`r2
z;gouiy9)rIs|yeS006`Q@*6VV4Z^wAbJO7bjfq$O5CFJ;aR2}XI8=XOlAH5#9Gbs*
z{^-MD0TBF=y{Qd2tpDP69KL^eZZzPC|0PRz!_$LH`(N2t?g0FMWanbI|AX;}Z_LT@
zwTJrp!(0Kzx*$y%Sy9a!J^(=LAMU?;aZzz8c~LoeQE7HDae1j5APE4lN9O#;23)Ji
z{C}}S4CjCCiQ)YR^8o-U007aAR(w%$anTgK|JaG^k%Ip(j!602_ZvQ3cnbAj^glQ!
z73XhR99%pA90x%9w{^J1aNK{dTMftim)-=&{}*?{3I65jgA@ICPT(Z};2ULd(tmlz
z;3@xJcLtvNFUG*(|Kg3p|M(&%Dq8r@I{sOiLfrq%?X8|dy#HY0l|q95VBF8ejQ_Qv
zrUm>pm;cb|nf~xeNJ@yxi30%tXODYh&YwWWHtoBWqAf8hTy4!{wUlM$5{6}vfZMoDgl|H%v-m79$GPnNXc
z{AUFSZ&duRS%^>m-z>zZ`=42e`>!5+Hr#(BBCq;y%l`kl^?T_z77zu%``-#a9^Rkh
zKZTG0AD;jp51)XDfPj#Ykcg0gh=hcQjGT;;k`@P_;AZr%LU_~O4FB(i=!Q&0L`-~B
zNy$iw$tWnusHmyv>Dhkw0H}!pEjTK8IGg}nY8*UjoZkaDH$i|yfb&=R$9TX^3gQzG
z5)qS-lHDXL4jvvZKHi^e-*ATiNl$!g0vZmn`-HTH_C%cCbmGrbi-@^Y8++-E-hTl~
zIQT@8kTNhbF|%;<@bd8sNJ>e|$jZq-P*c~?)BCKbcXamk4-5_skBpAZ%+Ad(
zEG{i$K5lGoZSU-U`n-2?`t|JF`Nj9ktD9W?+nt;CZ;$?mKQ}iV99;aH87BJU4-RhN
zA7`oY2{^^m*?ZG+ia#f&Q%x;u>?PrnF#1C8;Pal80Vw&A`{a*He|hwO=TPMT
zlSltL^shg^F#vMBo1CM@qXygq*y@-k0ULw4n^D>IaQBZXB*IYS@z0?+hL%~NU~9>?q1Ibikx`_YJ%vZ3
ztvVuM6hGRFG=buj6Dp4Lj_6*{osFjTL57K$Ar8J2?5Nb)?8ch45nUt$KQTq)c{K2l
zU=cJ089Mm+Ah-yu@PLa*9b%ae*BLj!d2A=mrxu^iiq?#b+Pb
zoc>gNyh~gii_JiBzqJ&+myxW}bdr<_>hkp4giI22ncHg*3MJvBv7^RD^kzzw%v2
zHJ?3s{deD;jlYgD9{-qqs541FSdG1PNJOiaNkuXvHw&Bh&k*(fDsVuNdC%`s8M%ox
z9>{b#CmXO1s$hX4bAb>53Ma}3EYWsZ23NPyT1WSC!NeiT>y{>9WLLh%#^_FGYc$40
zbcLCN6KwRbJ$S~l&ReQ9kn~%W^!km9FtP6{dEs=_rWvEcR
z78_NFHLK^?XeKw6n;Do2it{QjT$eeqXC6;Av9i+1S#*CC=-VmDXPlcM??L0^V-*G}
zi?F_Ey-<3%XKXQCy7B;u+b?E$J;|nQ0||yTNod$w3$#-{fbl5OzRpc{_!5}VD!8Ok
zR%v%1)FQnx31xJ%)bdJE)Vu3D<@DMSW)PsEK}Hz?oB1Yk=2kmllU0i4>3Q9`Z9!|ew21Kzn$}#L8lkt;
zvE$wpALlCWw&a5~#9MOXy^X*voo`^Mq~iKl(E^IiVCA1xEjbH|wqeM4t@xtOWp(hP0|G1A};-&OVeoo#rW5N-%|JM*Y4j9YBfz%i$I
zaJIt($$QcpknwNP_dHIKw&dp01|$O~QY|TR!h`vBT&B#9r>Kd^WOVyt@CL;L>m-t6
z$umwv`n<)o2jsprDW94iI+_QbzJr-Qu1Y5>XzlB*oUYmnaYmGYTFFaq)7s
ztI8R~FMvL$6klPW#@>r6?C6ZVt-N4BPV+#zNLE$&XOx86SL)cuE~7Se8t1!gqPew&YSf!my8#@;
z_Ny#x)!
z^Ew?)6m-hg)cBt&?U)9BY@+i5Y*bxZ$nBmYb?;idZxw8ILMhZKj&dYACzi-4gGrr}
zw=t&E6YWDENbmp~5>Fkju4@W)s9QUl;Nu5Seo|EGnq}*IH{WrluGsls(s94w2>VDR
z(^y83N!0d8o>R^a_>I|>0x#q!%;rm0)}x%<__*$z1q|eS`d3sQxlwkLSVH){U;g7O
zL5P(-V~ll|J%~99$&P8#sK|^(+4uB|kdtsd@2%Q8kuFGtvWc}^(2miUtQ4}Z+*g#|
zl{HhOD8%*M-+5YV$=EVrR3HKma)mmqw=_5VrOV~bMCd>cAiwgHwR01NosDf-E}jmC
zTA#=~n+n=D9;&3&i^#80m7OsGb0t9i%xx09dZ<(PUl@UEWNdIg39jxY6qhjujoK=I
zKJfAxmCl)`Lx^$ou!nn$VF{FcF9A=jx)5XCVy}%SA3CZir0rYE`RP9}y)Cse=|n2~
z;Y#?aod<4r46tD9#TDs1Uw-o?jv>8X|JNcVl8AEq1YcB=!d!%Ph)b(>l@dQ-ScPbz
zCTlXgk9NiA@_}qkZEa}sh;dlSzTO43@fc?wmz0;MwtK*rbBRv$Qw5G&g=wknZrgX1q^xw9#y|Vozh#`Z)|ZAUAEF3bG@Fw4*!X
z7MSzs#5M6IQqj<_iVYUUoA3e3XRfJ(Vdf_lgBac4fS9wYL#qvl>}G4cxwH)Nm!=~c
zpoan154vkXQ3fDlauJZLO
z2z2u#5NR7;5VYaa8L&cTqv1H>|D35xW69(&HujU2U353+EmlhoQs(Q4nPJIAYHin`
zB61!-(yC6~rjd`?=1fN7A?Bk->W&|Ofxq#*KG^3zICjLPOV@!&x;YuCtT%}_(z6D>
zVkg>z=qcIlF=x*YwvCh(XsM))eH{m%zXWxZSd6RU=2c+^;H#QDe$6)16h{6HOR;(x
z?xC72@i{VG`4++}i;Eeb-hcb%W#REY!By8+L$L~Z2Vod!-8!?2IcISmi^->1g+Vbo
z0V~{a&J<_G9(s*)O3g=nRU?BQ9(V4B>*$-JeY3a{H{naQfe#yAViIY8B3bWPvI0J8
z0O)fIA-)1aIuI&BDpD7;vSn+$K<$`8VnQ|H)(G#;HJ4?Y#~aBR|D~Or>sgao&>`%o
z?vQ3xa-7V~F{;HNn?=LD-pWkpQ~W*=NzHMpuPnujGN(iuzQ6%I;Img0t*d1ejAP=v
zz{TC~*%}4PWfo;tIT^>S<+JnU`Nz=w+3w`0YZ=7q^{#@@Ze-bYgjwSXBv2Z&`tvhk
z3DntFU{LBXs*JrOlaQL0nSQ0^i48p4CGa6sB;f;Q-91F8ij;V}wSYHn(-4*Ks1qN{
zxRaoPwPSWv^E>UioXdZaYN;%kY1Gvc4W;KrHKHu_R
z#o;2ti+&HIE@dIu#Xw(AH+M4>&zB)o+fKCyw{j^wX`8ti6T7BC-Y8zL)7$(GrdAmO
z(&gLynwlV6Y*@AY>Z%|ek}f?Y;#I{e2BV)?%>7EeNI1R|{O7#W3wJ@C%aq5WMxatc?
z^y^k8tKx_@@#a!B7>5G%?Y>FQ4HxE@
z0ZBndm#<$J4SV2Xwne0Lr9}LO3Le++W;;UW10iyUnm!D9!+q#0k
z{Jf>`Ib?pm`xMh09(m2F{QfTDSPW9?GLV!@S|Su|nBa=rz{6rXlpZmuT;VOMwe=ei
z`zjOiWEQKjV?oW}Q2Ii$Tq5$6cv)jM?`vn+UH9%demm8shbdg-;vB+7`5Eb+@sE8Q
z4zCt#i;ilvO)kR_jkYXsXuT5COvt`dH2=g*=0tATJ359H7FELgF657Z1+VhhSk7*d
zS$LSgf&OgM_qB&PM<0|{LDtuO7JnSP;$}AXeWz%zsLmT%^_W}88U&ct_@gSr2q-g*>3>VXN>dU(tOJhG(cj>~hJ0wuZXzYUQ>xe~vt4@;cSP8gSJSd}>PF;l@_*+&zER76Qp6I`v22Kqb9AmZfOM)?NcKL$l=C@%Bd
zMYHC}^*{5yLlB^=k-_t!1Jx9`e}R5fD7k=mU(iw5!!QP%+u%HWxai^r=SL93im9xJ
zZZGu<>adnXwFb*DPkr!yF}LC_VTsYK39{XugL&>STtDoSWsm0Db&r@OXMYU`P>ugtR)mdjcUA}wUun>x7`<3jJj0DJnrJXyD9hJLOm
z0(Y75&COVuji@WMsamVGwHd=uUf$`>^f1rBfN$0%Ea~PFyQ|3gg3M0$t24=`EfiA~
z0@mxl%4K2D!Z?=|25^E?SR=#ZliEbi5<=sY?AtXp?+y#x^oqP)ePQ>tN-s;=vrD4Y
z)RRQ%!m0&UM>S&r%MHGPH*B19ks^Xtr@+2R|#_
zV^M2lNMjjG-p}t(L+nls;V;vo5uQ0=$j{E>5uL@n+8&aw+IBM%Np+JL!+>F#3Kixi{oYm!Y>|m%u419>LUEL>gw#@WP5VFai|`cVC%rL8Qwc)r^dUR
zxlVuP83lzwCDY?Jv>bzN#u_!n$9ZNdEz7BktEY4#drbh+N!~d>gi4qb@*2KdPD#0i
z@&?QrSMX_&A#}01wzOb>WnX611bXcxN_x4rnW@A5?bVfiiaf(g5GUWg*lHstS`OCW
zZ^ORey}|pRv?H~)0?RLNeInX^J@uwF(5GW_^vuAJuj%NM>_z$I
zgO0lu-;mUlE8BtL!PkB->7L>yS6ye6Ek0mqoh#ZFse5Xrn0HZw4hQ_K#H?=_`jFab
z9p`u@YI=S>s}8AaEOSGg+InQZvr?yt_7rR8AonVAG_Z}o1HenO37;OkM`S%LShhMp
zu10k!ijmjqosl6|JQgA#lL4k)SSMxV?afx4%q1vUX{3~yJtg4R4d(d`pnazTbSyll
zLo1he8SQTB=8SJIe>&Ao{0$Jwl+9@4_ic5IeRb8HAh#>aTxaz(wtyGmoB{iYdiikT
zi>6b`fl2Cp3a;Rrud+|{jRnDSb^M!OT2?#q)-Q~Iu4ZloXcELCP7EHcJ>B;GZ17^H
z>07?4(w!m5kEm4$z^vVWpH4lprTv~Dfe;Bn=JSO&Zm5$t9Ryo4!zTGVaNC1S;TbGP
z`YX5RLkv8Ry=q!*y%@_JzVB|B$JDc@sZ-qkHaT(EEt&~gD5_6)f1;uKk~QkJ$O=Jx
zG+D}ud4BAa=p4U7UvZT}r4@h81+)cjjm}T^8T%$MycSi&D(5kKroA3J9j5U;@%$0B
zuA(@FzB(FZ7K<#gc6HB)URouG54Y;|+2D2Edzfa&0b0mq%RII680VfqQQ6(kDjjLJ
zT+NPkGt59Rdw!T>LUGw52t3Y!IFA0EueX)jXu_qLo}|`!r6@H&rfv0QeCl
zDrl-a_M>9}8VKrXjkDZBF@^%O9bK?Hl5YCqnsn7;WPClma&=J9M#p2*a!!Eq
z%o2BkhD!BV)a^pa7mCl50v0w_R;-qP1Dt!#Pc=b%E4586!~}B!qus87OCvFZVLhgz
z`VVaC_ytu3{r#IzwuoP0lLu}0)D-zV>ycY=(kRP;vLl+E4P##(YD0{4OlMD#IXmIV
zCXVm(caN6V$po|f(0&D%r_P8SNp#p4T(Ip!3^y-vcUw^p6a6-ZesViq=*i3)X2Wcm
zn0jm-{rtAch-#6!pV4diUlTOe?2DzVL7TcWEll${k_((o&sj8IOrpNJayZhU5G6uI
zNh@K64O70!fp~d_2$u4@wB}2uQNWeedWBEva=90)SSyRu6IrlEmxrqIj5u6df$ujU
zZlTK$<-7eGK(AT#z@I1AU-_~~M*3h;OunbNtMT&XdC~FTPN|LXg;
zCEv=F?gO?t^mJWkA2VE>-oc4!-rrw5QPS^UIM=OcvhlF0dmWPd;f2NQ(LPGL
z6LbZ~U0J#qG=XKisaXZ#g9rk?kqEckZQ=)nz|?eLm1%l)+fnx_ms390T>A>OnpO-|
z${r4r?FiSmg}*PR9dYVNg76FDGC#op`Ou}tr`Xjr4nx{@N`M27RBH9E4tl=U(H(U5
z12Xpj7u$ELiAT%YVe0Mxx>Z0m(lj>BpDdChs9}AV>bvT`F1!b`65a}H`TkLeVjwe9
z+mXVf(sZXCLEOJJ^1dq@BWGyyrn!D?U%IU-l&<(lT`O?($*AP}%vI)q@I#?A+2BXx
z3OWr;eQ}=>g*}owo6CTRe6~jgnTr)(PwD4#01_8W2L@GSr>|fRF!YcxsLN7C*}_?);%9+z_rJrU8ByZ_V=E3TcPM|
zj@fTuck+w#88u|G586v^O@p1W@O2mK<M8exO*-4*(V+n1=Z$S>(b|#`dcYWyvR8)I?Ho1cFpO4h-_62mBypI(&
zSy?67ZHpeP@Hau3fSq&PHgso$_us5M&SQ9p>uou{8kznZ5VVo0w`Sh7XyAcq^%Z|g
z|A{iB$v25|WkE4&{zv7nA5nLCa3&Id5O0C_4{X?g!
zPNxeyiaI1IZ3_<%J+pNPO)nb&$Z1wLm#J~x?&h{O21dZo1yUrGyi&0xLAE1zQhXSY
zJ4tSOaW|{MW8<(SyBH2+ALr6AlGGHx2OV$(u=m_1S^u14aPAYC&RJY{Soh
ziVLBfs#grk(JCEIs{Dw(%V=A6vI?xp&|`IJ@EBKY3Z
zJ0mELc^PW*m^DN5=!awRZTj6iBC=dkg4VVLkpcTv7%VE%)Fmf!c6?at|1r|B6fLW2(K)O;LU3_l4-%hGAzUDU0Oo6mFIa%-DP^9W=R
z$EtnRh3%Yk!|C&M1u1oOxD;NgQ7!{;?EF0hLb=1!Lf%T(Zwc2tz)G?Dy82Q+_Yb&&
z)?jpfor4;%)8%XGq~l)iCg~@;;1WU$ySSEMRjvTTeskrVf*8M09ruZ$DY`fhh@@);E<
z28P$-aYVbDPoOfK+OW~y?c0V8+KUA??rv?2=4Nd&h@l;_O}7gu3$Se=o@_YijUzUy
z6|p)$nrNByfD$k)vq=_omP|Q_&(cxwv)5B}K+`sGMw%gtA1CUTr~}d1772@{oR}VlF}cwy9jzhO2<=nANC#
z&6x6xShD!e_Iz)LOueA~nkW?G=9D~=v%vtD3`F35fg0Qz>uKk?mswwBq$iPdG`qnV
zcQ%iTSx2sTn8>e_1v^nkrWU2Zc^?lrDm3|`Vp5A{W)DpyK(cM#-#~D6?04}L75%u
zTB$asMbK-VGD4rk9N|z=HCk(Hz6mNNV|CAYcv_KHY$<{o!Us|Y^wn|!GcU8hn@r6X
zD0{cgwNA=_=Ry9~5Uji3eyH4L+Z=T5_U0kA)o(ye2H_UgPokG-`{udyxAgh4XF$&R
zGL%0=#}x38i(QRNT|=u(*aYST_*sJK-c26n<2_{mjNH@AbgB@n%VoJl0F
z^sf%R-?o`=4Ci=BR*lUbc1w^FB`PTeElTU3s>fKbgmtXPz5739K3w
z^2qQjJF0RkK%cIH+MaUN6;LMF1iPv8{#uQ=@GYAI)xqz!PJ3M-d9!Sf?q{z=);W1u
zz^l4_Up;}1;?Yl!O!0ykF8oQcU2rY)UD7>Kb5Nu#&MWqf`sJWkdb2H~PIE^k1!f(I
zTal08S`}_#J@~%hov^rlYJfKFxbf>;DKk%IYjI>-K2#C}LJp*qB_o`ZjzotV8XE^lwrF|iSV?OT!GkESRvQnAep4zpt~8J0u<$8Ct;0;kWA1)ov_0#!BU@Q
zqL)BV5idv1CIY?cz6BLc-8em8y*Q8qf02EYf-&7K-(79CL5|@2m`^uv5n?hNL>XU<
z)R}9}v|fI*X8lnau?jn}Y>#d@iS0LIgM-s}kh^{-Ol9a2T&;d*Q@CO&DI1p9q2
zM}$^=BVd#vNY_A&6av+>n7Njz!_m0VzO59N=HNDAQ-Qt=dD9Y@GgHM82$t7TfL1s?
zZmgIugm)ibUa`m+47=ms!=T;)%iR;iH2I!&S<}BSmF=rD1SkdG$={!nx_N+yWo?In
z>LU6*&y$e%T|kU8P_Mw)t7YMt+7g
z4Qam;%?~Q=-dDFAj=+4g^_MBrkX)Da>sZQvpl7|4UmLb6^>c|k{c~#n4r@!cPD6n^
z4T*uA*rn$w-Pk7mxb3g}Iu{0J1}J}MkaOBapUjUUteaUF0YS8Q2#&?Zkv;P(&~_Ta
zC=hRvd1MpG*)Ds}?`i0F8DWXJU-A(qb0)U4q1J(@il-9A6YZy?5U%9T{e&a-NG;Z9
zJ?xddcx-I`HHNXpsk}kV2u|Cs593T+GyrXSj)w8mUB$SH9h85PQ5G?>$rM)wEGNe+
z*L&7O*b0(N47rwuP~0-=OnUNKd|_eJeS)o9zR>b&6#z9F3FJx?08x^
zwu>CUPaUnZ-)%4sZ2MA{@#&crBb*SebK=lz8CLa7T(OpLp6Qe3#FWnqos725ROP@GX&3my_A
zQho2#W-(geE+hP+d0?ryB)BZ_6vfZd&N
zfnViGYoT%}x&&R#>Ri}D<8uT+zQetABGkHKY<}`n$Xe8sw@o1R@94Pwsi3;9nwnq^
z%f|$ElBSx@Mp`Zkrb8M&zFqYC_Hy^v#9!-ZFX-5vxS!4Kgke5AO(x$wl8#QpXX&bx
zJh}a$X-@dMpR&kV`n4~aEJDH6m_{*@w%vI<^$m2q#WAp7G|^bykz%#HDA37R2Mltt
zwl-fS6C(5_LFMf5ak>23XnM%vqx^*2Kheum(jD3xnk+9OI0gyA+~d!%0-I2ABUYLi
z4M+=x0n4McCbK(G`{JhAvlx8Dfn*01=+{Nq7k*5!Lalc`-5U5Se`hIDBZg78bhgZP
zzWk%I#a#Ssc^r1#?uSxBZYRp*LITD)CkZ80x@!pE8#+~}N!07JCU{vYkol0iy|rBf
zZr%||OrK$)+N&0J#J>1!KQL!JJ~ZFM5>}R~GwdBGm-BhSV9CLDJe$!$kWkLHnz-I)
zvCFb{NoxMk|k(Iaa9jejNsC7swFBrm`i=UBk<;x$y|A7
zvi@srPA1T9Tk3+8&bS+#>{%n7F=;cG3lDcMukwq9l0!Nx&s7AHx9k_ScOu3ose2*|
z2Hl9;;P9jv&S^}0!?c+pEkX0%MvF{sD>cJFMkH*bRdL*U>F^86yfa%pc>?V(dg&mk
zo<#OooJEH9frOTmjr5?R+3oy&td(e@W1L3p;p%2Eewv`p4y^zq-zF<90E(%}n3m#B
z9P9ULo-$Px-b`PaOu%h;4MNB@fGydmqFg#Ja}v|u@0d-oJorOL*
z85zj)lkXXbC&|*&|JuTozHdqP=6t{uxeP14CF#ZQV&Wbkk{!7f%_S>41wcN%^~lnF
z5wSQ!(??U=PdQ2VUe;@`w+DRNFqMsj4%g?ERNSs%SM6$>PW@WHN$bj|pN}BbgQadr
zUwdzcRH!R`e)o}MP{9>rj$GtOQ-{e~Q#q4HZ!u;}i+(rSyd~9CyCsT*5w?i2E^gXR
z|MfnDOYu~21v^L$5%uU)M`CTeuM?|c*UtIuP0^}6_vw7J8KhS`)tqnfT>EI~21yjg
zeOwY%)@hsR5mjIX@);kn6>*eyZ2-eYH5pcEx&e9nX#_6cXQRcDvG
z*GsW2`_C!HkHp6PRI$+kX6aoT;*HoQ$z`)!%!XsnbH0A3h6m6pHfaHUCf|s!xjP%-
zZe`=S27G)`u)5#!-tPz6{qbDAgXh>ZwvgVLC1?^EH-fFv#liT`dd*6V{+vY-PUyWqWpC0BduX%qb_=iT;({I^{y((STr
z=DBA6Vv^}RwXaPVZAC6DhD@r!+MsSf<=FWyol@y67=AW8T`(ZL_7f)0;4#${wM*O@yXY;I5Qi>ClP*!tUV-X|shUF1
zm{o*6TE+r>OT9R9_@geoDNX=V18HVIZEL275*VG@1>BJkAD4@z}kf~=p{erW$Kf3PgT7I
zFxlx3U#;
zic)&X#l~Pq1{&i)-xn++(I)y*q+mRzflwX09hehZ|*sH!s7Sz_2xw8m3l%x92Oj
zCMqh~e<4d)|z$B@j_k+14doAlG3*k{j^f@8J8S%
z$U7d0g;k*qOjCF4Zn`H^?|y~;)P>@m+X1)DjhdQKZ|9%P427cNjHwZF@N4G+zT`|!
z`4KCo7qHZ@L5<7a-a3sReJlqo=&lg|Eo~&Jl`Uw3;T$dz^HDrs)y#rcTukI^G0z4%
z8c^Se?Nz=ro3U|1G@#^jNQr0ZAwOS+j4T8`vDS$hEA~z7_ynktE+G2uEAaUuN8`17
zWyzkARrlwvf={sMzCBf{(lw(L`}iY1Ci;1O5{;;>64NP$WnkUKe76~L@A#8QIB+i5
z`nc*uxTJVLO-*^da@i7d?3ZMX-krmTYpcO!vK?_6j!PR>)
z8m4}?32k+?UuV^VycNZ{*fvFA#K1f@b8P0-bd;h*$U^FOGbwbORMxm-sa5;Z#2(Ys
zj!>gpZjEc%lOXFM!=PfQHq#T=*`WfVWjp_eBPfk08Ky3VEh-XU@4gW%7waFa6o8J~
zuDy;-UQ-x1oyb6QKb?Hd`FWktvz7c(nc^f&tZvA##v>=Z%ST#E=!;ZqNhRK0&MDSB
zqPoTiA`fW>nM@NX31RM1Cif4m#dPyM*x8Tdtyd&4p|3prKiV
zuEwUBmP5pBYgeGTzGS39W&PXnNGacbkNRDS6a|~xi`X`yg?r)CA8AP;hWM+mW9vC9m
zW29ocidy^jB%o;_FA(>6dgpwILQqzI1#tMz*YL3V!aVkc#_q0EUG6W)`IeBxwh{qb
zYt7wXaUZN=xX|#2N}YlXzGU4q&18?nj6=g$NI!AS`15~^4i$9U>4>y6c!QXkR7CC^
zdBgfdnsr*!N2Y*YjzQlqb%US&)NW-R!RpO!bU+_GY_o3s@pffJEm*50@;;}7X=3(D
z!F|&9LI0ev!-`RRsgC@z0x-j%`7Z&&?lAtiLq{xgLXFV#5?Nw0F^}(8T?=4{%(+X(
z7254=ANi7B`lz5^yxQs8ES0g3`f*ukkX*}d>8i#lfE~7L9U~4M##cUZ{W?oZk|}E
zkRFG#zf-}FEho4;-L_s>lL5a0{`;V@xH8$9?F2+y#J;Cqye+6CoMgTnrQg`MqVHX}
z?{LKXLmg2e)#7buk#-v^bjM^yq4$Ym7KWaaUPalnJHKbK2E%kLbbPN_=*}Ad6Fdql
zNKk>Mb#{R}nXPOFe(I_|rzCCtgtFI`(bK-h`Qx)qcb~P}&7BMolC!871kU$M*CYX;*n9|4Fu%XjS{}IU_URMV`!=IeF7S(huSYJkusv2k#Z6IX@%LJc9hBWDtSyPDih?I%-ND`GfxlYZfMP)372MOQF!(x
zntEK(6QF{W8lXWPn@6lfllGV|vQ#`Pdh3FCQ0U%#pW<{G>|TA;mFW5mTZ-%E2d2ZO
zs1Nm_;g4;(jUVv^z1Dh4w&$ofPkV@JLwv%sLbI}tY`=06R|lp7lejnvz~sSjqdfy3
z%~o|2gZO+IB8X+p(;lg4#HVR-u|}@32l)fU*H7~Dp82eN)M5*v3kk=we!_dJ0GDb4fn^QFEjAew*%9^tE;*w5GF>_(#dH=YSZn&Il)Q4C
z4=)hW+ZaiS2k@SzyH@dBR{46peOvfe`gT?m?z5p8$ujP!Q6cZc)cgz*_0hHB#-kEP
z4gi<1#*m)js~^Cx3A>Rv=jF^tE;NGnfk$f%swp+>0$bvJg-o+!l@>wqjMo%U9$;O6RZf
z(la?3YR6n?v!i+aHB5+E?}YeRErB!d24}O(~f4Wleos`>xN5p4Z#WaW?wuTtYSSmct`JfEOq%uR;@mX
z4E#=!i8sFlHRPP(f|(d**GP(4QE?hAQY|5{**b~bo>w@8m+rFlecD*AXVGWIH6BN$
zo1e**ikVgrj3Oh|usaSa)sjlnJAYQC4{V^rv!Iq}pbX%mTUBaFCTw-}BU>5)ZRAHx
z)JO~q^jSkm**lwAK6uiHMCaV(j|o2J>Au=0Qok@Irpse8h$7QFMI
zuyb>9>7^I0d0S?$WBv45f|5c>9>4R?{gE55wP_6@lRHXf!6zOmW(|+xVlZG#&4X>?
z<+rPXl`VQJg5|iYe{|YyaR&oSiUwbbJ*Wj{GW0!o@?``uGbn?VUa{`v3>FtqBs=%z
z&1DV%3d@e(HqGC(`5tOAdu&kE_Rg}l=v1-&!w;#$9MMFzU~{ioM*ol5SslDm6<^M$
z)w;r67LDE=cmfllTwgtR=h$;dOtB#vPwF|T^4}%riDV~^=GXRh-_rdKt;a&B_b#jg
zUzL6NHHhef`eBiluX5d=LKK!S(ZUoHjlr*7T)<9A{%Y)JJgt>Q>V{QjrH;(wtLWyP
zw(*YQggiJH9YK`z9f!N4yK@G5v&ZzFNhB+iH|wa|shw6^uW)1$X?8w0sW(zo{B)Oo;BO{Kok`wc`b2_{(3~u*=)s5FJr?vck-5L=iFJlP>bc6
zt@>H+IEZKCXm36&a`g6=k=@FwWZ|0~u;LDD2D8d!z#aKn?jMiNw-H4Z`eg1`z@c}8
zK}{3FLbdFH@zBiW+=<|>zG5HM4ml#u79^uxT%>N5<>zIOxQxxf@V6gw%u#nD@^@ZY
z?2vOJJX^4;T&4*FKoM_ohF_Q{g&`&_?{|(EJuWk$p$<*~&Q9*e
zQ~aPYvSsAqPu`7C3Z{u_6l3R|3eP90Eze6CX2UZgGkxL1Aqn4l>a=A;x_-tbyM^}mOffu)2R|>k$X~{KSP(e>$aVkdVaw1f~a>gMC~m_`Fxj;-{)`Np{!O5{_%+d
zOAmZ2e#Hs0R@pHYY+Uh~3O%={^R)_YRN%pnE6V96(xGO7-E1dj8b25l|K8z%be+E<
z`D|uj_cgxRT_;mx{%E5mAfP)rZHACLDqwb`Y(p(3ZC5hMdz%2jKtBFD1I$Lgn={+=
zzEuudZ;7mDS(rrVd?Y8Hv6h!frBzUj8NZwR6dx8MI7!nmYW2hJH{gS3X^k~PN@lXzOpAr4x;|a+!UChVn!+li4m$5ffyz^Ya_-USR81D|F(59aJHruZ
zUsByr#xY=~iQ8Bq7&Xc#9;IPBCfEn9?^3eg3|Ht!bl!bstQ{`BbKSEx85~-_yk{o;
z8{q!x32G5VJyfVs#-&mg75c(_7V|d#Ow6~e|08&^taEA2{c0-BJNUec1+&9+#1%>=
z7`sw3-1In+tWP_e2rr`e6JTMj^It
zX;vVAtlC;d&iC@`b%asVjw)C&
zpgn7rkrOQbZHR@CIo89%oCoC5JGYv;N)9QtWiq@jRfc&KlXTSsDbwoJ1y`g7s=Sr6
zg~YM8I(Imm;QDd_<@o)pCE0TVWR8?)a9mb|_S>Ei9TYj?{1DhZ@ebwu26#XS>l?
zGgHhrJ8kT#e
z!YQS7H`@~t-EuwEZ`i-gSH#pE*12|cwMoC9q{MpTfa&KA7{mz$Oa$rKonTddo)n+2
z=nqf}zII)iRAlaE8yox_)X|h^I+Z#m`fFZA+Pnqfd~+)a(|i7OzowOwF22nSWtNPJ
z_+B9ELw-qhwsuQ(Y+cKXTL--|vC-LTd8g>)#7TSz;_@FM+7*wr{)&A4k|jodq2>vt
z%+ifvPvuT`pZmUecl{$TWD!#RU9?3RoOTpSloX>l&zZZq
zCaq_#iKgzY)z}m6tQYy4z^`rrX&~^#*4seKeWuRF;@n_ok|$i_+Pq^Rup@NZA%Z(JrrW$r)D#
zN*KUW2suW@SW6u~0KP??gc~ul$X;e>eH~#>~v{4)@3>~%Z
zF#H1XP_Yt4u2@BX9_NY^b$an4Hs))1UCa4tyJ#72nN)5fyRQZ65NKKkhiL?eZB2!d
zjl+UvlHw(mq|Sb3mEE7}+*hYBiY#KZp5o@_C?L0jN#~YV2$97S7kM3t!m6s4B=iEj
z>M+68_iD+xYOLBz^1t5C!849D;&qLwODe6oi1C<&9$P$fI+8Ji>DqnHsdE?G
z79?`~iEiMwWs)?65MyhU!-R~Cs^=UWSDpCR#%BKSSn&$!cGp&|_In6ZQHDt^P*M=mwSYw88iElJm
zw?{FRbpbrhsQZn1ag^}WuPiMZa&B!ZlZ$e3wvkbVrRMcp?0(&stVSmTgsqOH^?YSY
zH0NHDdQ+&9UK(3x=^xK{{R%E{Z)@o*wanjHva%$>&*69?(N}egX*`)
z)5IGR3FAo8}kZ3bO8usL9T2%)Ce79}IYg&e64x6>5>+2%>F9$4bx^
z?kKGzYljmh?8>(ROt+4vKP*S{MluT;eAhRFPl;OE*l2Bi9FtsF#Vzfv<=wkS=EB!j
zu*T8Y+bj_~!z?PrvP$5tW$?^eedYcBm3!hDwEa0EhU-tck4L+{Nv`jSlHS7B-%l~(
zSz6_g%F4)%kkI862zczyO~KBal1a4Sl3skxwbgHJ%lf)3;y)DY8db!M(OX&Q_BtHK+C#G5cD|DC_-sd!eAJHIHUay%uSU~;8-CCF
zZLXXzyeWCA%PbH^ZuJin>K4m!bg?LutXh522Hhc3h&bE_*F9LiIuyy+{!k-T?5SjeTyZwJrV2K>k5W??OXoE7_xkAc_LgzJl
z8$SR-u+3rc-{F;nGNZh0Hie|>a%B`cMDkzQ9kZ0fvNB;JU`ut!op?qKTU#3Kta@b6
zcctckvuSs-%<|dDsps7^bCDd%TJ|YFc5uE}W^(EcXhEfFmKIlfnZ1Q{dr5TJjn$*2
zxP}?;Ue)Ee7Sc#Ayq!TDnBs}lEMa#5z*j8m=W$6(6)AHiE5;GEo%BvAS*_l?Gk;<4
z#x5yy!<|{TXxZ=gHgo4ihr;@1nc=-QRF+1XM`5MucH7h>
zSQ1QS+JFtfoSX|UgWnCVq74p%JhOR*S)rd?)O_VZ8XqMdH%Ju>g_KDzJu*QUKD*)n
z00L^hDbbQ^2YK~aHJQrT+)9b1+(C0{%?j9~+C-vAq-H3aQa0S6DDu{`d_AXYj+Qs`
zSsl-)!xfF4`Hkj#gECBQC$*T3w%hN}s~92*t15&eb5~qx=2(XnO0{PhUGplHU&9N#
zYaKR!GX0!kQk*I%MO5W?X<0^FH`?1jG5hSFYLMCtk**Dzb#YmYkJJV>@t+Z(;_sK~AZT*{_9b&=!WxOt_ECpgZo
z7ZXNw)l5|DN>0*)sZrV~CY-sXt*1e%Qs#<;6uFyGvQ0^=ts3*{bXO5xBgY!}TR?{1
zVj){geHz9kSgvjS=-EP9&L(we{?D|Glhh8*-(9`3ywq=R#l_9ep6;+%y{(kw
zt>`F>zalFbnpJK1HK*a79^1x#5wY;u)*egkI{NoozY$LZj}B@uOAFiivB;6h7V8PF
zW0hbl8$G)q8`GEUIbkTcLNSf3)NZBSzGvRnckAYLEHzgtMSMysNq230H@55aXC)Vl
z$cjR3V24c?6WX<`@K_X@=F#LwNQJl?0xT0BHMf>v?5GBFsQx3?Q&+Ku!&?lNRh`C&|rPG4@q$}jPk#;)byfeq1F2C@{iFGdzM{g6|Sx<9ot0n6j+h(~}
ziOsr1zFM>i96n1K%*@JLajehyO>Z>qN5Xo|y{?f2mm17hPYt_W3v$mqaY~UYT`)Fi
zma&5nc*4k7stl5Tk1pYSPG^9`W|pN-1%{{1Q8_18(&o1$99zHc+i&u@wB}Iy6`Tu;Z0ZVml}?joqGClZ*Czr%>)s{b9T~7ZNdgv!x-J%tL1^(L9X9N
zz026zOL=c~G`3eF811Bv8&!bCCA6}}mp?n)F>oX(B%kiGvIfm)c=yCp_={E1lU>tv
z=_V5S5dDN38#IFY8>nuCu-;ERQxp4=o?t8(VOSC~(aGU`9@gVdw($0$rs}d2?!3
z8mN1hxzgN1)4>4J^oQFkm5jkI1|JXLAG*{g(FDFD>JaL2teoAdY#WNf4{;mN}i7ogX9+
z10R=PR`Az|BGddmb9ZB@>Hh#~@Wz{I95*pfW}j-bj@6n+QVPr-+sO(r2##&Qax3Y3
zT_Z!Zhf|6CLt$-vR@Ps&YC0C0`c0zVLV{TC=GFBJL39h-$+At8$cG0F--gt@Ti^j@
zr|9QU@lm&wOL1#;G+rBnOOE!}RG&}2npo^0UEXZ>SyOE%l5Nr%HW%Dms)cHlVHD{#
z8c|T*PEA>KNjv(qYg-S6sZGWeX6Gw4%&EmE-u&{qeu~~_fp}j=*ED^0EkbE8WQHc1
zSA$5s8g0aqI+BrzuB`5&TXB%AjSFs2KqEP?a`2v|ZK>+fSYLl>MK#ODy0j5U(x2?v
zOflcZ8^Yq`5pVXz81oD7RU;si<(@A1fuZPLL->A6eO^YFWyEQFb0y4Rrcap*Xx8yY
zQb16YxSJ%540BlC1@S}?Yr5BnuI%3F^;I^r!DXpjT2E^Xwt*Au*OuEPwYpu`yJSOz
zb`C)c&!zpLIv85&G^Z%lgO4hcQ(ViI>zTBg=$@U<8aZ@vnA#Ni;H64j>B`)#L*HrI
z*G}5l`nTd|!ruY-yIHrqxSr2Pn^m;AYcCXPTE)GFr(+J1w&yyHiz0T+DNAX?V-Np6(uJd2EnJxCCbuH6Rr7VIG4A*NXo{_u^cX>~=;dsEV
z+I>Gn(7Z*gS=z~Xnw9p2rC(_gt>ibFjhyFD@`BGMf4zCW-ZM_;fZbK@ji_dI(4F4
zMRy$Sa~YYwYf#rL@xo;C<9)EG;c&Pl0CA35^K~U#5lvI07;|YD%ipwhl)d%UTSd^C
z9I#aA&Qqx>&NpsRw0-Y&FLk}2;&fW<_ji99Egs4%J82`-EnyMaTu*hU8*4~RcaLuJ
zsh7%@7;YR!`P;v4NExqH*1T=uYek>;zivs1j_3P27rGBB2^;+(lK?lBZz}^QRYpAl
zuPpIfu9xCX3s{O+66V$vm`1Tq(!H`YaWYScJe){H_SQ)Y5aV_YX?TZK@O_twbhWmE
z($wkN#-|Rdu<267Hl@A91sh}lPCViV%fC6#O6bGRH884a#yrkCOQqGc+q1usm2~OW
zsTAAglUix!ze^tR;mvvtLq*eW?88TEEwTs^)FUi$6A)HaVib=r7zVYk@5^brfX+);
zUM@bcNB|Y{&xHJOplLQ2nsoQl#RZ{g27%B_E6dV)OitAH@O7$YN?v5s;r1WiE_h|J1$030P0x)>z
z(xo@P_27DM#UBkVm8iMYC4zN@Ba%0>x@HX!2w0fmWy5XUInD^k2Cx4B!qo7dKjbRJ
z`hAM?KdrrAJtzMFFC+J&Z6)?!+0$W>#ud6
zRmb740xpB5+GtX$Md53Ek*aCelSdbcTWilh+N~|MeO$|QumZ}a+AW?+6C1cuTyMm$
z2n~C{m-k)})oyNdXR}>i^7i)LJwMOUw78r5TFoBb?q>5A;`dD|-@D!s<0`?5*G&Ha
z3VczbKBuU7Urp1@vs%k(W2Z@fZLJq!J>Zf%YpXaT({3ZUwDRNgiC9>TUWQa(X|KD+fK2!x;FO`+QQ>dODoSVYdE2pgr4oz(SA;%yc|X8
z!WGpt>Pm22w4&!xZFx$cGv$ib^7p*#cT>oCZoX+#8AAaG;vX}GTCFH5x3ZLURGn8O
znv$m$+ivXeoj<`IBJm%DAd61$&x=L%^cL27cs?D~6HU}EZQ3kJYp4*GF~Sljjg`V}
zh$|EFn)R;@co)Rp7Si#KEMrpG>;DTivrqB1D%B4142D
zfOj!v9`_H&9Zyy8#=E9NbEN4vqeQ*6(;JNNzOy+q)|}?J@&&mztdCZL5WjXwAyT!Nd7-i3S@E7aM`=oTW+$x)-YXo%!2b
zx6?%)S$*f)V6rMWDwQRgVjVhDrsW@Ip)}gn-g1oN@{E3Gp?GV+ehcx&sePiwp!mDP
zej}4oyN-EppnJU{%k1d!{gm6?JaNXh$Q@%vLhFo*v#jV^KACCbZ97e~(=}Z_%F=yW
z<6PeM8jZ!Con*4I-itvCNhW;Qn%ojLDCFX|G+89^Hmhv~mx}?_ba?NL^V!?oTE}sI
zlC-v05xl67%%Nmho-k2%!n=1flU)~xJZGqAvfF7^ei+cz^=O{Y#2zKqH25I0iW|u9
z7g0G53~%~U0T7g{-6xUEsdrtM(1wXliWCou_<
zFOeJOuVB#Z{use(@~)$8b9--X0$j8jz4zE23yGxrJgA~@(4xpDPbjlt!9YnMADliU
z{6X$YU9-XOJ7&XhS
zQ^Xeca9Z38ja-{OHZd`Fo?9tmXF&{7MqkT6GMi*$#dyZO;jK%;(%$%nT|oGs!-B1K4~Wv;Mz~ci~j%%YkF*YrTyAy>SUDL
z+TARYi9XSDasm(}7U?C#qBabiY;%pmy`#ienys#r;;lBwvq9o{Ke1m?vynxW&YcRR
z65LtkHcq}`M67U1#_#hDxfsfoSk650<&32sUR7)AqVCnTR$5u^$(0%yu6fRU^QN8K
zo90`|D?8pY*ITPt^m{w;ZGEWxQPVY8H0>8tO*-IO+pI$729oXzBld$0;%zd`IWjic
z7tED#Ijmn5`0L^Jlj3b}PSs}Dqqp(ZuBh5r{>XW(;*#zmG*c{bT)NBlO-ABY4Dd#~
z7v=;4TY>QZ0E+al3+a!h$En*x5UWAsq
zAB++y@BA?jiSD%cwF|8tbP(Cx!*?Wh@=VDxx<+PXw6{B*WFyN^Ml)YWJhPt%3|=D}
z95ZcBT&^joxY`d`%{qRKkVZ6&s#^URTmo@;GLher_Y^JEe3VwKcU!i_
znehdK$ERN0U8UUCS2jy3>E_T$cxJd_H^mc1iUTr%$p*L~N*E~NrAHHpsN?)cDM7pY
z*0PN#xkqNxpHh`+;;T*=suXF#rx+-~wPzK6ZfcDu(OMl5^=y77(fnJhcy~(IW*UW_
ztz?&QSln96Wq2&F?=9ZdE?pieZ5DUaVU?Aa_nYHoQNK0i7Mfm_55)ffh&C5q7q!zg
zO%GK2c7kQJwYOKZ7I!Ij>vtWzQb_~JD$1zuDGNTr+-AJG+r<}JM!BeXpHhnc^Te0B
zPL-&v)5g%k@OW+&vw#TYK4fwertiB=#H_^ppb?Ev4)~Kq@qdqEmr}X38r9aBX`tOh
zX=!@|O=ld^3B+P{bk|fH7bqDbtBC_ZcbLAE8a1TR*u_r
zHKkh*8uTc_tkkTcp$~EjE^ptId2HOI?$USjJ?F<-%UF0cX__vwmuUu_E#;-}?w$w#
z0EBl?j`Gs?e&!~3wpKx~q6nc}j%(%5haM4|R=w7IYMwMpE9=b*#CrSM*;!fJXcHS`
zov)#|j_IehxV32QrY>fWZZc0i*U;WN(!{V@vG-q%SbFySSeSr*y2q
zFCw*#7_MvDe9BbQYeb`EYc}q*Pvl-It5L(sH-wkJWhZ;y>D^l0QuR516nJC6{xh`k
z2iaq_yZDzjpQdUzI_0d|h_DwkSzBLhjuAU5UINQ)95KYo<-r3vI$y#cBTLaW
zEo)w|jKO0sh%7YQKMk2y{yPB;%&BvI1emqi3vEdziw@)hdyi`HcY{;mr;2a1{S4je
zTF;2IR-XR=`+nlr?N0>KUfWM)Zxm$~>EenJcCbdzC;Br08z5I#Yv8X3_^$QfJ}E{O
z8>b0g347LyO~o}QZd-cX%2{1JOfb0GllQ9oNlGerl9Sdg>ve0V?=4Rh_`9w6FUate
zULVx7eOmr)cEH@)XcIQje
zt?%^hatkXIl2^MhOE{J|wK%QPd1a9
z*OnSin`5O*szq@f#8>jYy{)ao*xUJgph=oZWnd+d_y)a0Pm{qqMweuKVw%*ql0)`r
zn&VK{HA@TWbo@hbwy!kpZl(#hMI=GiAlPI9SXObw%GXpBAwx?y`y&_S2ZE152(pg#?r-Dc$Xne^TK4wUaY5uYB7Xa~H3Pv`amgdc4LkxRN
zA1|5nZbb^qmGZ&EEhvK%1cMSm%6pIIPD)z)6+=P
z;FC|%wF3pLP<^xeKK(@L5c1=hU3B@p!ttW9kVxCkTRhhn;_W9*y1QE${2I2V`$)Ko
z*TfcDoc1?saTT4##l@0Y2_b^&)Z3e<^KN5l2uv6OJ5Q_KX-4EG?Nd{Ps5}18bLDx5
zAm!FH01=Pi*gdNv?@h6q{?R8lce2`Oy1$ima0HRt%Or8ykS8iUo2lTtkwzIyq8@lP
z&4`3?6r3tfq+*{p?&?&hE5}u>IYqVJE!p)()Ftd|Q>hrKIJKkM>$B@;v$dJ>M!Vsk
zhEFGg?Jm3ptZCZ3>#0~zHMYABpZ%$*Y1)cd!!6_9&kPGOjs}~|vk}J0JYkq^$?0`F
ztra!cuvkeU36P(rjk7}ZDH?C#9w68E%nypI8>77oa)P(ZcW|Pl(~|X)Nk;l
zlC_SCuAFgLnAE2j(wwEFAs=_Fd0SUj+jHgb9sbW6?yq}m2Z1!5F4EIYw+*UVPvTa&
zRn~8#hDjC+yQ^5*c*U|>@?(qx+PLos{?ES3LV2dQI03`9&y^j7nD+}#D
zdyPU%beru~aucduTdJePZdJn@v9$wO?81hS_|^VwTTA+yJ-)S*C+u^lws!dH?M47lbO^{8$EKt`E9pl|(tmZY
zYVfJY%D>s}IP`0cIzn^xq%!L99(DOTap&6F%yHW*3Pev{)m-xKzUBT#Jdx~=mZ$xW
zc*Gt#Q~v;6bW)l6g}rQc=~;9B$9kbsA@Pow-}TbQc=PObvvc&XuNL#f3&_?q;=FAV
z=+^;><2hv1v5$SVJ8HVZ+TLnc0MAz
z{gAv-1Z^PkSB3;gxo5d+>vR|+mLl1(K9%Mg{{XzqTUSnJn}*y930
zqj^J+Pt714^{>)@?VWt*8h(!;7{)bQkjI{QZ6qi-{{U;BYH-$dZ{yJxPKRx^>{6U@
zo@5b^C)Xd=>GNNz`=Tih7OX*xe#(TCSp+ZlN+e+3Ggd#9PMk%l93U-^ddx
zl_*$}pb=kr_+#OH{QehP3mr#Nx3Ik}Dn6SO@meHt6>~I7@v&wzd2$6o=Rff0BDzgi
z_G{4mPd}X_-foEb56`lhRMxw0o
zHD^w&Ic*z%%a-GJZC%oL*=~;fW+JsEhL#ePXG(3!%}IY6ns&3%d`img^*%*|!BJ1)
zEkjmI-68{^_;yR3HLfli@?A}kOC&aSyry2Tz~;dGLE)_!*#G$33Q-<3EO8BEA04
z)$L=9F1@E--I6SBp}Kp2GApH#P(wKLVePIW={QEz2zj}!`$NS5)e
zB3Zy@2`NAUR3%iCjMvo~M!yZMr}l=Wa}}MUMqTE)l@T@~<%weC3=l!d2Owt^^BnZ~
zNO@;0>
ztJ^3dyq#`tp!0V?@yj|hq@)O4Nd?qyJimZG8S5I?iS-W^c**q(tDP!%t~@u{`XxUqC-2FfCx8sHFzL907=^t#;tu_0r>lL{uklbEq
zaU?ePmk=o;duaUKM%=5AjPWb7n)9E9uc_X6XUAG$xw;pcJ(a{74a8}-;K6aF$o4Wr
zG=DUvJAttBEa@j^wgwoZwW1faL3y98T
zlj3Q~V(>VBtVT9o&ZjpoHoVSGa8rY7$u4AmZ2VL3o$rFYU#n=>+LW50lKbs)>T<(0
zGs$yst1ZW#k^m%~8gseVD)|nq17g0y_!;pU(#zq`io7D)EPBkh`VOmc;pDx%NR@Rh
zI@Fk+tO(z
zXu_zT3GGy88-R};D<9!bp>+q0w1Fko$Ro9u7X9cEC9A1ouYgf&JAtK)gNZ(
z&o>CZdo3iAdwy#}nMyFP&gs5J-?-aXUQc~DPhF3G@dll5;_Yj}aJ7{BPN92j_IEQ$
zE2+6i5)`$W;*poixe#o(c?*2&0yb;yFAsQARPfJ-^c@z@Ns`umP}r=Omve(_Z)9gK
z^Befxp=Xdp;Kc-uDUMXYJFDicX4Rqb4u`8l8Z?#{1gJ2q;aXydq{iT*F76KSeq4dp
zzPZvqD9x)wccEQgohtfkH##n>Zu8s071WSIvBc>F(X6-^GrVZ-slMDPKQPC{W3xJv
zsfBGns{77yyZ0?7xwPKV)#>PVLlE%MgyBg}RTR13TPC!>`J>K0CjQRa2aGR2wRN3W
zRj`jyxLd2;J4n>*Zfszj!*68wFx%YN$25~nnnk-bziCoeIFFuq?I+-`hx`g>w9u^l
zX|F)VhQ{$NrLK)M$s{vJaxI$n+{YlA-YFJFwvpA45CHWI1=sH5xQM-_#k8@*D?>7Qwu?4YvLdMYn(
z`eMDR7@7Ogbv>mxD5&zv@zb=`u9dX0(OxuaLWO#9<(+P6Hr1W4r@MB${l~|5o)+;2
ztEuT)llWstisMbWFBFsOmv=YcWVyMH;`ZGYV|!UX(vh@GXNo`ru^iT=uftCjrk8J}
zX;!)=g2fbQ`h~WUbtU`B9G+rLB&1^`g*zSCE-}4_HTDE?#UBwv9BbdoP^vzoj8wTi
zmBAm&^EKD?s-DeKOZRE>CBC+|Q;k!MXYE#%HsLqdYF2u7kE+{hTHc>x
zmQvY^f9y>f-q!NcD57^a2I+4aDPw^Vqw*y64V;tG+Wa>0B==f&p{Yfri=8gzF0}b(
zzmDqW+0+oq799y9*x-Q6y|c7(F<#sqf&T#OEYmqa!f62t
zs<>h{;dRLq8YOO>u6*Bf`#bnXe;42Q!VNk
zwjwzsv~&=(N};1DLt~R)LN1>X>o7U#wRVyC=byw>zhSjOx!K4Z6C;2>Fa|06O6kR_
zjX3S85``JYQJfT`Ez?P-@;qV>0t-86>}>TWSgc}#+THK1R`Pd-D5F$qBoZcDf~0}Y
z+@7_WdEtFx*-D9Y%iZU6M|74sXp%zP({2z2I1FWMmgEZdbhXm2H7naVt|j|rwXKcR
ziEVdr62}GGT*V6zujI2c*w2tdFxoc^5nnrg(9HTT{3G`##5dLwOAXX<%q;Auk(Mc-
z`9v!eDRJes!nVgHg0zeQ#c3JTl-E4@udBMf`tC}FI6^R|1qi_<6tqb#5=&%~(HOVa
zw>o>6wJk0slt$8dI%cbjum);4^xM1Z9X{y7VSf-s6{YME
zOmAVbm2B@WSQ&&7dBG)t?aP#jS%|>tUNxrpt6jXjw6=@HS|+L{o){0?w2O#!60*9X
z7VyNXvrIwxVo5$@g$BKr!&UK3yLfL()1dGzwwrkZYOvYsy6wiN;!BBVNt#yDt+co#
zX15XKd94J1uJiKdyCmsKDN~D$PPh14v)gy^TXQJ;NJ+W0;nLgQ>wP<4V!prNol92I
z<-$jL$0wI_5Qfw2SMGpYOazOj>UxCX9V9O77b7_yCVVTc>jv&?UyM54
zy}TDKZ6ptNE#``1c8D0Q)t2dUt&p(+o-xL2+I(B$ole?aOTi!8wyWZQh&LKWjp2Jc
zZ6fbOve3V`E>XLjFxXKl3TFiqLiURnx{L*+36c7?e{&Ng-uGNxzkNcS9|{ez})6^
zpNBf;k#8Vx75Khq6UjK1;z)&|Sy;&4Y%&QRLhmCp$g-h!IpVteZ9Zg{3x>V9n&Jyt
z?L+KTM;A|S@;j=iJF{gN7$<^3&3z9a!kDaW;J4GYtAH(|SmSHRMAMZ~fx2Xo4nX5O
zNx-c8uK;+KFvdHZOXSDOl1l7OGJfU6dldA+4OzLud%wXgzDRbG+w(qox$uscJ54(z
zR@Za0%Gaj5x+^r6>A~rXm|e0-{`cWj
zruI|(MXyv~rL0!SBZ3ryA-fA4Mm17SPXOQ&a!K{5Sz{;Xly)N|t~z4?f(PO2TC-Z(
zN%vmvCq0@3o32Ot#kfu1_rUsARqedi(Qk+w=`sL<*-|;mlZ>&rNaIijbD260Gef)8
zyS4*_i@_v54+O=#(->`_^zJDd^eFjJbC3?@0A~jyap$H^0L?V%)9l)9P!ofbeQI(K
z7y)NIfC$0nllv!8&Jxb-sBD6f7D3Y(AuvV&cI=P5bgfTJQf=+wj@Qg@FftIYEXtuvJMdk|QOgof9+ftstV0;gf9W$R
z1T2xjcCs9)-pS^v2cbp`I~w>(4~jbN*3I$3Exds+*3vwC<8Tj~6saab3xcc;E8Vo;
zi8>A4t)U4e-mt($CX*Y7ym^s6NoOR5f+?G5R*-CUE8u<&!se<5?e*4wz;g`*y#2a7P7^0b9-?etTBj@
zS)NOY*+-VJLK|~80di~QUyA<#v`2!xe3sq=@J0MO{j9pG%dTrN#=^y0N{&m*SS;E_
z4KthDcqIEk9#$g47Tu7#cZ25f?}*~M*EJuu>FsH#+Uq)GLDto5uPx=ak}GBn#^E$@
zvj7;3xFe-}h@TJdeiZoCJU=#>H`rm1$Q~h*##k-28^JV^2DfQR-s@|4k-NqFwJ`-G
zlUy;y(#68GX(a~~{wpa-Nws*c_uXr`^b0JngR6wEjHv6*@lTfSFmOqwDLb~Udp#D1
zOMUUK4~H^b=yrM}+Sa4y4=P*f<4agtXx4Atl#Iwg^LZ_ikZ>xWhkh=2lf|>$YCbBy
zd(W{naOoZ&vqW7!8Kk?@(m9qOzrDHHaXuxJa9y7wsm*j2J^|4+tz~ul%NR8)zwMhT
zV_U_NTXcg<)9y@`rU3p-@m*X!fRsv(LLu9ZbFAMD>{C>WPFdm6ZNA$T#og_@8LpB9
zwoUWE{|_z
zBpQ9){mvP$V_2=kNja83oym2RIly7hrz)7~!)nRLBniMa}{0;E;O>Jw&IwpjILfYl^zL265
z7?`#6i5iH^q(&qxKvLu`>}2A;^8WyWi+FYm9~gKCUxZA*+MXV`)1EcASnN~#8ujH<
z45~kQD}QXUkq;^Y9Q@Vsf5B}WYkw2GFQ<{XJ>A^cFrCE@3AbBjqBxDD18-Gl}*Q4!nJ_
zK*Yso_IqP>b>!Ma(y}eT+GGhFZEy^vd3$!5GQ=KpUrP8x!HSlxHIufZqiUARJaRS_
zCbVKiQ|$!#8jhbimfGSmmNCBg(ZFRtnr1Wh6)JMAAyQna>+Y$1H(FlCN~4S-p6mL5
ze?zG7PmEUUOuBnbE~Sx7LjX4rS86{x^?N*oas`RE2fp!`|qn(-=f&@h4EL6h`{)Bs;lJf
zV}8n`*n?m*_!^vRpAha87(5SW3t(*fe3z5+=zOCX&)16C_=WLH!`~0zPq4o&b*<{;
zoj*)~{_&y*VYr;&`B88AS3JnTh3H6cg1#f!c&_Dal*BG1w0p=VvXjiUc;j%d9(OYr
zLQY%m@OpP{tr)c*VFcg!-oH6m=~Sx9E^2adU*Z1%E0m$X__TJsz8diQ##jxOlZ#7*
z=Y}nHcI&ibkO4l`t!p>LIb~?CL>96*-4KfQB#tr947Nz2^4{3_asp^)tnO6tQ
zx?5%eNLItMDMAUxK?EM1DpPM|aoE*1cGmH^d7U9&nTen5glDelW7ydCXTYzL{{U%^
z73q3qmxXmLCsnk$xYF-+81Iq`*mWj?8@ON*i-dDHi5;MI%!!60*0sD{{{RZ7#Fs`S
z@rCuJ^O9qlO=ja$u=5)~GqvcIF`mhB7_TVRegW&+yzBjw;(rfb+ba37TuRqg*GL}(
zFp3zi0s;!0iF?;fV?q{{O0tBzI5qg{nVa^FwHPM5sV~U+>rVKQ;e|1+?u(~fv~zju
zb8ckQ?>xdu`LAsx5(QQ}l%zRQ20B+)u6Vw04a@NJ#u{`{+_ZiIy0(obk+8-)?QYyT
zkf|l&ONd++Q<8%syT|>JHBClL-s^rJ-sk3Jvbt#rC!Ea~x0#1u`Dw*^Y4|na4-I%*
zL(x1_9hZtVojTi4)4VrelHY0YJXaGn#FJdV_KLJtmhw&>Ynz6WBvJqcnNZWIhq_Uz
z%-@s$03oPVwz0Lm(eYn`z9MTn{ET#SN@a
zBV;o&AYNWgtJqiU9+~2eH&?QUP}3c*Ev8;_-c->`fxZif7?*B}+eW*Z=fhgwhAyNDsM?F|IsV5!rE7N-@Zah7@lEDMYid>D-8qq_-i~5b2wSlD
z_2Mmmz&;=U0EDjHG}OM-CmMCj>9Jp0EWkv~6dHu-daHVyQOXyABt?ytBv8a0q~lhk
zDbswdwy*f?Y}M~6Zqn1`uiv@#>{`{FtW?gbK^uToVh^G6%MgB`Rn@z-l;Lg>&M*`{
z)AZYre@gtZ@K46O?P4h20vPI#6{ZB{;=h>1S`frbAg-!;)esLeZw{>T}KB$(U)Fga{EeFTVBR|w#Z@fzQ!5B8Lq3$2a>Dx;|JfGAz4&S!hF`>y7gPHn579*
zw>)`ow`b_qkK}xns`yvpW|1x}{4sy7Y0;+Un@HE}Uf`XQGe(wImv+qHv~eKA{N7^{
z207>N*TqVk?fe_7S^(Q5PY`%=8yKCqEbVm^&ngYP5|KCJRTvx;fcjIY(+>(g6)1USXo-s@eu`riKlg^$e-i@y{!?*Vvn{w(fRasqqI`e-n6*EMdXBB)y&1HfZ(6@aNUe1-9+!6rjv*W=9-xX=SmIOxeANUD
z1G^com2_{}8{=-A_6vSIKWP-ts9w*dL#Q?M&NP&cYk23D&dzK@ad2Ucj8d64GXR0G
z4{bV>X{brni*ns;)9mkWyuwkbIW(Z`w)68iT{prWA&sH6hs64YmXC7Fe-u&89o^E~
z>Ij!bUr~(&oBYd@8JJO}V$(~|nmFul;yk3GL((UTRTWTnLgM
zIRTU-OviX;R+DJSITgZqR@YM0uXMlcYdEA!dn*TcFWnLpwT)4NOmeu5k1UY|&p?A1
zuh72^_`ZAXJ_~(j-%2+nRUSJQnrkvh+BW^Eck{T65%|^*3sV@}8pk3=j)Z_(xGxv@+TL%E9whMgsjRjC0D`YH+i$UGP|DGlxxCV>67AraL{PMI
z+RHklK2}6xtCL@HYd#v#HRTuS;p=48AXwoVowPASbdX24Y;u@bOFgh${_KX^*bcSx
zSHw?(UKF|UW{=~W8|KnAPY#U+DYP4OxBCs%gI!%g6NMosHxfxH-9GuZBqwrnn&qvB
zh7L6D>UUkyr}T)m{^>RqIq8*7tIAGtoi%dv~*
z-uPnB#T}AbX}6QyO=)8)q{UdGD!WSRUP6Ee3|Np(I$&2MMAveK3?AZXHv;Yxe2#d_
zvBH*P)SPq4uR{2bBp?O@4<}XEzLGr-f$o
z5W{_AIxuMFgvdXFQ5!UE_k!cT57v5iy)KVu43OVVf24@v0j|86S_q$LQj^At@<3FK
zJC;0h=Dr5iZ>7~5+}tMXxNYN#dzCWpF+u{u7#RpNYyonibHO0CO?uUz!haa(egKQa
z)*9BmZ>Y37#pbJ|S*lAtrkQI5w@b1Mo1Z4`>EMZRY|(?}Nw=u4Sv_=DUy(
zEA7$0Ep+uA`)lf69PuB4JSC-G{g!<~ZxnsMO+*;Fj?T|Zy*Hvcle}
zhIZG+KeN|@ygR0RM@?H^@ja%A4wGSbai`vDlf`3V_t$VWr;#(dw*19;F;!BhDi1ip
z>O5?gx|WH1;wz}WA>T=36J9={;!QPT(yT2lZEoUQdpm78%38@R*HNhxAYHqen|3RX
z_))CLKe2pUaWJyg>|O{ns86FrFXB{l4EHh15Rf`~k%)Z4vO6g2iit{2OO;Ag
z+V_%L`ThhYhm8u3Njk88@AEY5m6i3e>32RQ&;|9-wbyjHBZlHwm5!lo`SRVD=LLpX
z<0mRUmGmFOzl=T>veW*};vW=vddbDXDJ{mSXeYUOKvF~qUPqO%6tIRs$C5d(&X0_q
z3)1wTikgRpd>?4$&gryR^s5As=8#XQT3aeYB$8(`JhMLk0GY@QApqcJzP0#Y`wMu^
zIjuE+6nMkKIz(|vZ+dO4t!{Ozhzu^11s4wXmgy)VL1cMj&vqHF4z~}`$3{+gsm3zA
z@5-*Pt+#%UMsiD4%Hd?4xxD>seGjPWx@NuO?+*BjMoDDRwG9H{2Cg%*E|)&258dEuQJ!S3}bZ9^SO%H9Davt7jy$h)?V
z+_LOrB+IkzL#!=ld`gHoHXGqK47YHI+P}#
zH!i!b>Dx(KUn3{>mAr!E###rDbX)s9d&2%D(dNVhYwopHPiyUeCVko9Z-^sTn^Cm7x&F|<6Umf)WpErl
zwB?wYnK@?195XXBGcz-@6Eiz;%#34Zj+vR6<1sTkZ@#VfW2^Sps-~)|Mm@Sxw??hL
z_nvcjqsEHZ^fw|lZ-$PO&dWuiT-V3j)pHrEd!Ix1429L|=DsX5OSM<(js%z4Ix`Fi
zJTAyn(^FPX@bnkNZ0iG4gfEEBI$bL!tP(3mI6)+0it#sM
z)@?t$F3AyeZW07mMsm5AOP)ik4D2&AIeLCqFh}MZlujtJEi-DxNom`p@MJiIk0oW_
zX^W%rJuvL$1X~<{8G{;9)w&BejpB`-UhuU%_Qt+@)Dlb?A%Qoqsi!5?L4$_xqAhjZ
zp@fbsx6=kpNgF#9OD2qi2`1YL$#Gces()o_CI11W+vD*oyPRVFu5IG8j;A{uS`a_h
zjyp%>QnuenwS!*`QD49aJHPZ_Ek7x_CNW;>U&b4~V$i2okSw#;nQx|GnY3Z8jWdF$5)p}aB
zrwVa&Ji}Dgj9hZ%2%kI1?zj{0?@9UK%XGD)@?R?XYfF~|;4XR8{R5cZOdb8qL}BvE
zvLC@}8~#SO$lb#S%i~-UD9c>a6|##%9r$;cJ$Q=>4#dc=$h*Rf?*+!xWcVTrvPYPJ
zeg5v+467N{C!U}9{{Tr+oT(l0c~$=a95WtYkvC6q_t!rkN7NDJ2{xqU?aRzyk;a`Y
zbO9r3orzu*;O*hw;o{XpktND@Q8`z8a;!l!KPb9gKQ+7TV2NtI|
zI!F{Z=KWmU#t+wgmaf`d|ATuXxyX587*PHvQ*qw*mJx=G{d+Tauw(OmUhZGiY1c*9
zmA^hXgFZzgq)_u0wGIKA6{pC^V*Fbak2{+kGB^?LIuZ>s{prbdr*XvhHT3RpG99k&
z*@ow-!AadqHvwSfrY$PHj_b=B;(A_9TO3QVAhpgYS!i^h+V)QvpVW&U=DAuZC8{^s
z<`fz!2f_8&0QSlg=&2NCk+o^YkpL?J_5kaB|10EmdD!?$kh_6M-r62>S01~RC^Bk<
zh_GLf0;1Rw8TDH5S94K(?l7i9c)vU-FGlPK?#SGAv5a
zezD?*YnfVhg&cdfp6rfSb4Sa>B9(XwLs3>XmeNG?+t$_JK!S89DUkybk=K)gd`ylz
zh?mcF-hSb4!WqGkd}`=L)YiF&)-T9;(_Jn+SNPAvCw=Q2&l${ohH~qFK$CV!VxUx(
zbk5OCbt(M#TS+2kiO(+pQV!!xVJrdNY8zv8U=X&Z{7j1`R=7+{_+%57lA*g6>0d8;?tqoR`(TF~E%B3m&sl$Ky#KC#xq$rM+mH0DF((LB3Mz>7EyDGSAl2jsF)8jk
zoY%4IwC6`-*uPKP0-5KnnUD1bdNzsdI|jOX<4}K`YQ^Y%x4(!VmJgaonXL^LUq63e
zD&bj_duyJuo>$#bN;D_TsE||5rjb+jw4RvkS)^&{R$siPo(mSD!`_3eJiFoH1oEAFZ#+Yg_n~bFp%54M~3yuiH2mu
zZs=pJcvoATd4974!U#vE242b(arui4_TIJj+!Xvped3v@;A>dMLdoM
z*6ddR#74J&Pv>4D`o`%e!A(n~#S<>B9!iWIGrX?R_VKc;{1_+Pu?>L{_tz)|T(r=Y
z)j#8IrMb7V#*>%jMV^5yffU!h2Q*R5L9bt`+!1$ZF1BiNR!yinLCI{>2!`vtv?5M
zB>Btt?O!@)9WQeUE(^cmjtcA{*R~dattf9rsXABW%G*0Q~wkbHypLI=dt>
z2m;wKAVv3W+C{3^AtUG|u8q~|;62*e=0KXB6`t1C#@ua3q1Q>ylq5{c0Xlav^~
z8>5ZATJ&eMKJ_hGfxGAMK@48`Ky|tA2DZ!Nn#qyHshmOfExy_1_49dLjDlBFs|c(v-Wj@_4z_xz2U+1~@-lk7#Z
zeq52r{6nD2v_-5B6P!d~o_(YL?UVI}t1Q$LsvLyj6ZHw!p_&)O`m@aFPPh%iq{dPV
z4oGb9F^eagY;B&FyCDB+EXL+$$EMwWG^Hfxp8D)*Yj4rIVF?Oye3yM5s(C&cxY%2q
zcewwX`?-7zR*tmKNMRKHvWo03Sy$kTGZt`L*p)c29(eRwV-D?XUIN0LR3J=iG@!E|
z;?IYsN{^CGqKt-f62ZrL92vsr%!7M&6ZAUU3y!wx?CO3amJcqe$r-d|a;Drd)E}^E
zq-nR%iOyrv-~
zt`U9(Ew$!IR$Ml=+F{_A;@UI3ufHZPm*lh^~w!9(m#=Yr6T>l#>)k+FKou}*-q5i}V$`mLD>iln6
z0^K*##}X<<1(v4r`-#{-zwT=wUq7|5bjltjQb430pVq7MU!*uH2_`J$pljQAh;qip
z%A{%pxGXAJ?8nAG-Ik1J?->`W^kEoSowBnjTi*oL=I-lXgNvTCboW|yn(ZIy38fO8
zEh*l6(=HV^413g5q>Y;IYA?!eAa~zcX7BOubswm^Zb+)m^!~;x5PGjs=w=6!~U(
zcWBtAYtg(Ejj;f(FW?SLXFWdn{Hyd|Qr#hV(t!t%g4KwbI)@!EdgO?eUumh$x?48(
zjRz8O2a5F0_-em5HHG^J77QIHTDl9a#TVPhb|M(=liJ}LE)Dw>XzW{Uv-xdv`d!0lD<=(@_AU^
zsLeIK_I@<~p|5gSI}3um{u*`8^&B8yIli35l1^5>q|K^yh6F#|(MQk?ruBeLUFgyTK7Fe`57)0E}TAEYlobh-;`Gz_1xJF%X
zBbs@UDD-zPMml~e45FEgh+aGs+uh;&hS38k;(yiy{FWwLf@l|-
zP95)m+&@od;$a5NW9Jf|+jF=LUL_UP@{^U<{&Z~7mHW*SA{y})hv3wg1eTqu5cJpn
z>#KxDq>jc=i^jWHRb0u>@~Gl0jsIekGt)HIIE~(SxWwucXZ`3Uq*PUTq*PqOY)oA-
zj!{uY%UpPV6^PaxMCMcK|I|4LbkaMmq0&7E((sr18*UKt;-}~oVKd0mB1_vnsDM6{
zhEL$$;or6xkMhEmX=c8&z+O+0=1|8KhbZXJArQa!aV94Y_xxUm1HNN~6%pt1(sJGE
zPQgS>o~gwPTY3`6@zc;VJ32BwEwtbARyV9auY4b`66?XjfY(q9b2YdG*$$PDqk=A3
z=u01&8U-xS2NEx@gK-3cG^ySBFRw-fc2WIy@6R;5w(?CiuHD>3dGTqfv(XgX4Ltt=
z;_pX2e>TZb;(1?CtCG?8$&_YaXn0AQ5g{Xt?vg6{Jjv_-^(C%3wO_s1qWI;e#fwL6
zft<9)A7XjJJ8c)S?sjYLxb_JPP1fJ{nQlZPg^)!f|D6U4j&;iZ>u=~zR08ufI<(ff
zn;D1YLiq%~yfWUW(URg;(dyvcY_gcEk*Fp?b-f(!SR!5OAI3~624pGl1L1SW9_rH~
zTrV}iZ3B>B&S!xqqrA7j%ai)}VWzDW#Sfuu(wzEGH(-@;*~)UR%Bq
zC?PuPA*_FTb@v>I5AdOFl=+{|24?jiSS5+2I{wU=dx@EA!^o)?_JL$O;Cj>^5XO*o
zC$ya?H^7ZPLLznjQ;7wO6eSIKX26}vrO0QV;)#kq0WVHutG9v7fSE#Z}W3gIFF>LEtGO@+nA5dRy
z9#p~J`+Eo--tFaLZVHULO?Sd-BV|YQX}yI0f{$c
z@9mzj0(QPj_`r>zh>Z*iW0A>SpI?K#WgfjU6K>O+VqOX0+Y;o%o2b#rHInZl)(52TgiBNDKr*`mIBJ-7v7ot+-tQ`H41R-?9=aze>K07G3241^==d%4Cs{%rv|zTV3Z7c
zJKDm(7z7u$JC@>%GU*VbF1>=GCc4X51A3MtG0nPH!2E{WW~+|QH_=3uW@k!O1TEoq
zMyPM&GE6=^ePn!z;Kb17Hxq!5XmS{kGSwjjnHZ|uf|!&Yyq*b{f(Wg{u4;j}crD*v
zJ>wfNI;T~k;JnOpsvDSMN_-DJAiFw}D{oi}COj2egMf@P&An_BYi|fk-Ptu|zi$Oe
z-}VKYU}ZdNgo9xsR>(S}O#0pH;jZv_PJF0BW9~krhqJaWmoimz>89OL+Hm`$@Y(k;
z+&G%dq)c-k#z!}P(yO9VBqC0WjM0d=r?gGGZNb47rO++aO@9X}J*CJTK)G2oawGvD
zL|J4;sPDNp3*f6ZoKAlG%9h}GZD(wK7Rs|)F`Z9W%adGcE;e!l4C2}W^e4&TV?eSI
zZiGr7t5<9@Jywq61m!GD^i3
zMFjRG-9gqf>z3)=*-=Cu7$d}PmHDS3@o}#!E4f&U$F1`
z&C0VUR^5$3XRZ=h&M@i(Facyo3{ES=Hz^F$(f-{|jJTEKkqfLnW{y9>m@M~(!T}kt
zj+IEhRtP8Vt}gvQ3nN(|RFmMgrWwKQ@6EjXf$|=Se*Xi=*Ph)KJvRIUr1aMWuQUa}
z^4a~(Z93fUD2i$fh&NV5qi?>B%x&Fc%)_#4{ifZh4^q*ksh;OeJW>23PUlKNj!Wd8
zU|$_Q)Uk}cis_QykDZv?M{)WG&}y2W&Sv)LocKW|
zYuvO+Q;uR5SI1tKRU7t;A23?${oG#-3L<`Q;gDC
zq=dov2_yhOU9Lulr2Y+s9OwGuHI53mfxeieP6>hj9~l7ThTR8UZ*KhW%2%l;cr)lg7cLgr4*q@
zU6zY7aomPP*GWjAqSrUFn^Uw8|FX{*x#O?Ghn5SyXDh1U!6_cOXg|}kR9X4of5QCr
z)gCJ2HeyZ`Yl7TSte?FnxVSiY(KLEeZ0h(8aDhlM3U4UC&^JO|W+`aWNjIlwT3GLB
zxKu|sPe7A`aqq09U9Rh44<3^Qi!9cA|Jix9tgCpZ{VXJV{RDqAyWYJC6|7#|!RHz*
zNOrvOl1pO$VO93L14=CTB=q^MZhx@5oWvdbQBZ2{mDF-<&)9Zp(VFgv;JhWyK1wj_
z+U~}dniqJJ(7!jO@M1GE(Q2BU&D!=I63vVw>m_zC^F5LNAHZnY_|>(xOu+IVfO5O+
zAHbmIAK<=}@L)s?y+l8Sfx)GMURUqdby<08%`m6QpoGCm>4K-^Z}EALbnUCcY)Eapj1%S+D#O_`Zd`f)(VF*&t^7gRRO>E$R@=J`
zCQ^}I?j_r&pZ5JUE2E`B7-s90DVG}a((CPdx~69$L1HxI{_u?(J2r)m4ijseQSRWo
z+3xvz-P2j2|2+Q+;*Y|tIgHae%&=OtY+)J^=D;>MSef{hg6^yc|!pTvBcAx%wJIVs>(op_N?BDvAp{aA5mBfj})Lj|19p(sf_4
zQC;u$6)|Dw-B{t;^ZEz4#clZqsOtky${PDE^S{DdWTWcgj^kJMK~h{y?aU`H*=6xe
zW=jHW`y}kUy~*CiMiP^4+a*P2)d=`BI*iD#+Q%q49VIpUc-CYNJq$$nB88-QP@zfs
zpy6J%cW_*UJV*bte?|C}x$5?I=Tq?ZI%mI=RyZ+wh`C$rh2U*fj$
zli8P4`jGag8O)A||IhlE3Z+OQGSo8-dm0rP5_D&pohr
z|9-*vx#ALdl;W$cypTdgg?D?vJvXX~aQO8XQ~ZOu0Z!|?-|5aH-b!yqs^QF#@C$z4
zXP3v$2US>)KjzUzrSz}cs!osY^oyF>=J6n?28pJnX2}%AvzFP7
z6qLpy0a||``*e@#x*OX}jh_WWtuu6WQ2?;=Bveyg^`Cq)snse$Tl~6l}Gt@5zj99U~L4$x6%WNj9&X
zAe5vz%4uF0BK3l68#7ID6X_e$U*Fn
z-Fd;jH-3Y*-5IN@>H$Y{6rVo)rv3r)ha_rjzpB%5SE}2tReYTymEWtyJoC!TC7WGP
zX-|~_=LE;;Ofkx1%L*<^DL3NL)ab+k{~}x4oX+!w+^Arf|v^
z4w0R&$4}?s?nla~q7?gTQn}7>9f2W2aYEq8QXLTd=q_PfalcXjlps+)13yp7KE&E=
zj5lcJcd7&x`kN!Fs4u9mf`xSLg8!}k+WY5C7lie+wcp5&{At0z3jf
z1wJ7O2^kg{91<=*B_=*PCKe_H3=9lB96TBV0vaYdIy&b6clp-`z<`I}MC*fspaei-
zKtN$Y{2K(20000u2nY!9;eQJRBos6ZEF1tH0bFf}4uFJ!g8I)m3=Aw36o3d^4S>dg
z!K7e=#iA5dg~MhybrlOuE~uwc>z})Z$Kf#BqZW4yOSw@mY`~@AH2;Gqq2V5$I&Z-x
z=@C)Xxc@~nZ2+HpLCW$L+&nP^019mU|F-|X=HbD$!WaNZNC;>MC`f2%7-&ccNH_ol
zBnA{T*tRGPmMWzwHoI#mY;wUh4wYDaKZn}fp4kl??tj*4IMwm~tpkvuAiyJq!T<;Z
zI^GJ$Vehv08os%PzthJreZ;p0;=7`ZA-z$e#ht{F=?U9-5^*kEI)QP$UI~IXX_>sP
zvnHNgP-`SgBzEwYSu`9HlfXmMx?;!Il{I>I>1k=Qh4yqZyDTSu(SnH(g;KUKVb`3o
ze}Ji%<@nk`WER&9%RVUb!~0WBG)I%6;I)nVz66lY*jp;uFz^YBB1LwS^i(WzM4^Q(
zG1vCM-d)n78NNwn#**3NuT$v~$w|hULklAm8$Eg4?WiC6YQ^R7I)a1gQ_?)Hl0+rc
zyJBOI(DG&wj-Uw;ioZ5s^6(?G*HTaq*a+;TO<~1^K3HMkhoMbu%*aqMk;Xnwfl%_FE?E+ySqZ-gk3ThVz%DPnc3~
z%ddSQCcZnulbB3G^MT!#W|>eTZ||)=z=IHrUBUUIppv}Lp!b6AhQJfOBeF|T2fCK>
z#xGbU&HlF`-QDG8!5ZuBKl48f70+8XSYvop{vu}39-T-CGuS5|Im+)kuU}5-c5%;6
zxJhJwqbEwHk_hE%c_71Fv}jQWZ*bp{mm7y8@>6(|0C2=*>O;cw{1nWG!zWt4d7WOnWmg2XMk_lGb^O#PPg7421Eqc8A|%o3s!MOn+?~m9ima
zr;gp2li1hr%Own1#_u0O^Am?Z@El~NcP_nUK3PQJ<`Mm3?i(~;7H@@2O}0+i>FP@3
zG}X<;U!`~;F&geB;Te^R8
zTy9dXw(%IlzXua~mY!aB@W=;U^43#T9{s!Qh
zz`@o{TQWr58HfPshMr?_hXI0bmaY5YuWov
zRwA#Ys>ptDKV8n;eJj2_>R5briT&$D%2rk9ApcGHRBMZp_mUEUU6xoOKR+)1vg{(o
z*C?&KLL$yTc5-=KX9fv4KTw4Eu{v^Hq04;SuEAY<=@3_p7kG&g5O#l8IYPj)#%mpH
ziloS@8{AZOanlD@gv<^|x23e~u(%pj6>%GLkT+fRV3JnN4&owT<`^8>K3>=2y^)7U
znJL&AQrmU3S85a7qpb1T7cEh?vj+z6|v`wc8edFf}^Ac
zw^t+m?^W8F;V=n1R1;P020sTVAqIpi?lzP!AR{4^p(F66IGv9FrX
zzMlpxDz%t@_nsf!?Ve=<
z&)9MOZwhQ%mK;YCD*Wg)n(D(nr>kMEl1Sg&DZ4FLElh}3V@7ch`Gey`WQ3q#idtud
zaUQ_Ju5n?40tvfb9EWuTPC<9B>dIMoVu#OzLY;fc_j?wtytPNy)!!N;z8UkV_}SXY
zuFP*uMw
zN_W_xs;MO5R@mQr;-TC>VIB0Jv#V2Z1#
z!$j-kTN$)CP8L5k3%p`w^j9UMyjvOe2ET{SkQ%%MQz+a!UrwEY-}jtg1Kh+1N4ZFct5<%=IFiS
zFRyRqq73O;{-(r4MHM8>H3V@(PWKm@m)o(*ZX#Sju*`cQuk)1Z;-5TrzQ*|loJ}r)o-90GtPR-aWqvlbwL6M^^$zbwl*p8IA2eLIcKzOp
zG91hxZb<0+O?!D?+UT7_ud_i4NQ4i^?*e~%R}LJrD_p(
zb+hEC
zkxr4g^@6yc8uiUNuPzyF&1ps+3o_+vr!sl+uT4}$dgQXnyBE#znYC9+EX(TQ6a}Az
zBYDaU5Y%zUN<-=G`q*)o4f918yC@{xpXl*0{XwOj<=doO@{POx404feV_%2wL
zaQ>W?x3NT6c5sB++|`(^7B=5U#NKeaVLKP4#B%?YF6M+_lzefAah)On~;R$E}o
zZ*0@(&@YP{a6>heHx0W~Mq;JRv%B8dES@W{+1g<{pn@;!D}HI_JD=_#*4>cZeCtlz
zx(^xjNtZ=N>bQ0h=(e>vNK|KTC!siYLY%K3O7s@&YgETrM2fbg(-J2j{lW~^oU773
zH|**garNXt7jU`9fAv#ar@6J`D_A=3?ezb-ItwCj^T?ezA@QUR%9>3>{3_BV!I%
z)gN*U7u-q{=j8+jm%b`x%C*W?=9cs9Fk_4>zaq
z6)Fp_(olWx0ASH@*d8Eed?0NyUDM#`5Vl%1JvUAv%@0-~ao3Yjk>W(vhf==BDodOE
ztxn*31V)D1DrwQ{acs$sD7B0@e&^KIy4;I5!ES|M01wb0>)fZI+TGSzYHmT!Wd~E!
z_jJAuk%3+Xj@;M!M4EU-&Gz18f5jR?yd?n!mJVHwuDwL2Fj+($uwoU~i4FI~`oi!*
zU)Q{?k*x_15i~KLqG6Fb&7siAbpZjNyP`=2?J#K$Wu734Q@N!_osWRc9)^mh5EsOi
zb2@~?h~`Q+2MUXzh#}>lRCD(e_0?
z6n&%VQoU_Ad%w|;R)SK@L>KC<}VV%J00JyBQ*?SVXH?^nO
z*_J71ia*gjnYPoH=Pg^ko_2;sjZYJ{%uf-v>P}bUXCd}que%)Gw3JnB-+!%G#ksG6
z*zL3)PnnyW_TiBus!M(|(jmKThAVexfYvXzeh<0x)f6uuud)NzaEVjIQXeV|o!6$)
zuUxPACo^W5WGRJa2hC}QeNTjc6#IWg*~*SqZcBpZed0~mpi1Z@dSA*#lNmy|Olfep
zKjK*=2}r{7XLA&gceN#Nk5^qC;fLw(KcSi~@u#)Nd*VRq?8HFVN-~hjHxQ*GFTLe;
zzS5!bCOBWjh&B)qxHyUAXT&wTzjUpQ_p7{}#3O3xVKrd^eXd&`3dt
z@^>W_w3{^(NyEj~;zh~Ol~(4?*08PGtw0~_BiroLZ!6r{$}6h`HIouH7ckh)nM^=KQ*b=UK|_o+9l3h?VC#y
z_5&N`(B0~(khM?NN8q>*mhvQH=4BskwYrPLrGDKDS|CuSNmILrAm~`(qpm(Hih}ea
zFce8+NsgL_GhMc>89OP3p1g*C`DmRzYvkGROMe`u$aPSodoN$F(@d8Fe$K!KxEXIzQJ5sfd_OLOUWp@3T!<5kXSEze(XX@k*
zjylRLOxBpO)ix{;V+Kwx$i&4$Cp%f`9ExcpcR}@4B+S^ke6ByfiaHQqG_CE*EWJ{U
z^sQDf^Q(09E?R!HFy~@Jio6xm!1fOW#Jjii^vElFLyLo44Rmbx_dIQ{9IhK!@+Dfv
zJ9$?JExx!uBVPD7)u7b3+SD99g>)}<435r
zE%7b(3>N7>h-i~b>|)`n-OUXhnEH+q395gjq5(%Y3opw{j%CvBYa<J
z5%HX`Mz0Z@wxS_@PPxcHe}1XzSk9}C7c>dcrzuXMn!rYcl5*dhz{+b1Cd={iKfPwJ
zqB&&OAA7@&IqBYXllAnXe)&kSn^_e!R-CUcYeV2%KZDh2%soZ2u#^^Ki@D$sX_h7Y
z}oeuOpr
zR*3=WeSU7VvNJ6b<%X9A
z4!z+sp9Tw2656D+IqzLB1_p&Vn&%TuId77Dn}pFI6N{&1GB#
z{uiI&{ebTbHVghOg?}kOZ`<1Q<;VBKqv%piT7Ms2UDLhAvql>
z0Cs<2*G3a*y7?B`Yw-nm-ITW`U#4M^Yq-4i`XBNI;pFYLdg-N8DW4nVST#uF<^;Rd
zZ|##;L`(8MOJ^aL_}k524J}BzGBxzuaxk~`t#wF_d>9aZHEGV!Znn4d0g|U&@Fi8Nd8T+O{zUrr6Cu8p9Zm{Dw#4R
z&@cJDlD?kFb#rsfW5xxSga3IAgKcys-Ma_<=UtN@K@6v?Q_~BL!8z_Fcch?uB43>s
z<@)RNu3%io?r3F$BzrjW_khKZ2dy@?3HqpYo^38F%f)fcO^7JN{6IVF1_V!6lymOS
z(~fmY&;`X6gcdaeYJ}^;sct7^qracp@n80&8CO~`*2D{jqGwp3f-40aIOCnxlipl=
zJ1uGVcDslS9C5*iqN;^DQQo(9jk5+Se`%3RmJdC5lZ0tc>ktSjXKT0=$Zdb?WXK)|P|FC228V$$wNhsTSQLOu519I+Tzl0v@HT6~Tm;xaP3urUJisJPDxmRA
ztdV<($*g4!Wq|zmfhQCT-~{eO)vx%BRiia7BsUP@bvbm+k(VhPw0yT_Cj(h?l(T`r
zg2^lNMN44eb49rjv@pnd|Ad~L$};G5o{dZHtoqhG7xQiHD@A%$Qi)g20KtnIx^(++
z(7U^OcjD4KnlDygrs{Z!PT~z7T$Hh#EgS(o!LTeb$*y(Wat==0UKzttXDcu9x1^bg
z*E4mn({2Ky#R;^%WjQ`s)R8mBz#VnR`(Y=ve&r(wYGDh}cyGHa_UM)+ccX#TxZxY#
z`u2ixSG_~7^JZ+1jD|$H^%UB;ZTNhUYQ08k>8{cd{dv=#BD0%-7Z@Un29G-1rd&br
z_Lob4DTT-I(ny;8Qj57k;9do7OErt@K=NBamp2Q1P^EH)XhoR_YFSwmJ95$B0JPKS
z7^%rTFs}d5ML$d@H|Tw_*4{ez6XsVLT*nNN2tLm=+v0IiiL4%eQH&~;Y>2s0uY&qH
zQyiYjO-hiF#+h