From 52f2f40774f298b3a977ec7bc8dd5a86f09ed15a Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Sat, 3 Jan 2026 01:21:40 +0100
Subject: [PATCH] finalized integration testing of enhanced ai service
---
.../services/serviceAi/subAiCallLooping.py | 48 +++++++---
.../services/serviceAi/subStructureFilling.py | 6 +-
.../serviceAi/subStructureGeneration.py | 6 +-
.../mainServiceGeneration.py | 10 +++
.../serviceGeneration/renderers/registry.py | 30 +++++++
.../renderers/rendererBaseTemplate.py | 15 +++-
.../renderers/rendererCsv.py | 7 +-
.../renderers/rendererDocx.py | 7 +-
.../renderers/rendererHtml.py | 7 +-
.../renderers/rendererImage.py | 7 +-
.../renderers/rendererJson.py | 7 +-
.../renderers/rendererMarkdown.py | 7 +-
.../renderers/rendererPdf.py | 7 +-
.../renderers/rendererPptx.py | 15 ++++
.../renderers/rendererText.py | 17 +++-
.../renderers/rendererXlsx.py | 25 +++++-
.../processing/adaptive/contentValidator.py | 6 +-
.../processing/adaptive/progressTracker.py | 3 +-
.../workflows/processing/core/taskPlanner.py | 2 +-
.../workflows/processing/modes/modeDynamic.py | 72 +++++++++++----
.../processing/shared/placeholderFactory.py | 46 ++++++++++
.../shared/promptGenerationTaskplan.py | 20 +++--
modules/workflows/workflowManager.py | 89 ++++++++++++++-----
23 files changed, 386 insertions(+), 73 deletions(-)
diff --git a/modules/services/serviceAi/subAiCallLooping.py b/modules/services/serviceAi/subAiCallLooping.py
index 3d4a0866..2b71520b 100644
--- a/modules/services/serviceAi/subAiCallLooping.py
+++ b/modules/services/serviceAi/subAiCallLooping.py
@@ -117,18 +117,24 @@ class AiCallLooper:
if not lastRawResponse:
logger.warning(f"Iteration {iteration}: No previous response available for continuation!")
- # Filter promptArgs to only include parameters that buildGenerationPrompt accepts
- # buildGenerationPrompt accepts: outputFormat, userPrompt, title, extracted_content, continuationContext, services
- filteredPromptArgs = {
- k: v for k, v in promptArgs.items()
- if k in ['outputFormat', 'userPrompt', 'title', 'extracted_content', 'services']
- }
- # Always include services if available
- if not filteredPromptArgs.get('services') and hasattr(self, 'services'):
- filteredPromptArgs['services'] = self.services
-
- # Rebuild prompt with continuation context using the provided prompt builder
- iterationPrompt = await promptBuilder(**filteredPromptArgs, continuationContext=continuationContext)
+ # For section_content, pass all promptArgs (it uses buildSectionPromptWithContinuation which needs all args)
+ # For other use cases (chapter_structure, code_structure), filter to only accepted parameters
+ if useCaseId == "section_content":
+ # Pass all promptArgs plus continuationContext for section_content
+ iterationPrompt = await promptBuilder(**promptArgs, continuationContext=continuationContext)
+ else:
+ # Filter promptArgs to only include parameters that buildGenerationPrompt accepts
+ # buildGenerationPrompt accepts: outputFormat, userPrompt, title, extracted_content, continuationContext, services
+ filteredPromptArgs = {
+ k: v for k, v in promptArgs.items()
+ if k in ['outputFormat', 'userPrompt', 'title', 'extracted_content', 'services']
+ }
+ # Always include services if available
+ if not filteredPromptArgs.get('services') and hasattr(self, 'services'):
+ filteredPromptArgs['services'] = self.services
+
+ # Rebuild prompt with continuation context using the provided prompt builder
+ iterationPrompt = await promptBuilder(**filteredPromptArgs, continuationContext=continuationContext)
else:
# First iteration - use original prompt
iterationPrompt = prompt
@@ -241,8 +247,22 @@ class AiCallLooper:
# Handle use cases that return JSON directly (no section extraction needed)
directReturnUseCases = ["section_content", "chapter_structure", "code_structure", "code_content", "image_batch"]
if useCaseId in directReturnUseCases:
- # For chapter_structure and code_structure, check completeness and support looping
- if useCaseId in ["chapter_structure", "code_structure"] and parsedJsonForUseCase:
+ # For chapter_structure, code_structure, and section_content, check completeness and support looping
+ loopingUseCases = ["chapter_structure", "code_structure", "section_content"]
+ if useCaseId in loopingUseCases:
+ # If parsing failed (e.g., invalid JSON with comments or truncated JSON), continue looping to get valid JSON
+ if not parsedJsonForUseCase:
+ logger.info(f"Iteration {iteration}: Use case '{useCaseId}' - JSON parsing failed (likely incomplete/truncated), continuing iteration to complete")
+ # Accumulate response for merging in next iteration
+ accumulatedDirectJson.append(result)
+
+ # Continue to next iteration - continuation prompt builder will handle the rest
+ if iterationOperationId:
+ self.services.chat.progressLogUpdate(iterationOperationId, 0.7, "JSON incomplete, requesting continuation")
+ self.services.chat.progressLogFinish(iterationOperationId, True)
+ continue
+
+ # Check completeness if we have parsed JSON
isComplete = JsonResponseHandler.isJsonComplete(parsedJsonForUseCase)
if not isComplete:
diff --git a/modules/services/serviceAi/subStructureFilling.py b/modules/services/serviceAi/subStructureFilling.py
index b1f6d6b6..3d687398 100644
--- a/modules/services/serviceAi/subStructureFilling.py
+++ b/modules/services/serviceAi/subStructureFilling.py
@@ -2192,7 +2192,8 @@ Return a JSON object with this structure:
Output requirements:
- "content" must be an object (never a string)
-- Return only valid JSON, no explanatory text
+- Return only valid JSON - no text before, no text after, no comments, no explanations, no markdown code fences
+- Start with {{ and end with }} - return ONLY the JSON object itself
- No invented data: Return empty structures if ContentParts have no data
## USER REQUEST
@@ -2235,7 +2236,8 @@ Return a JSON object with this structure:
Output requirements:
- "content" must be an object (never a string)
-- Return only valid JSON, no explanatory text
+- Return only valid JSON - no text before, no text after, no comments, no explanations, no markdown code fences
+- Start with {{ and end with }} - return ONLY the JSON object itself
- Generate meaningful content based on the Generation Hint
## USER REQUEST
diff --git a/modules/services/serviceAi/subStructureGeneration.py b/modules/services/serviceAi/subStructureGeneration.py
index e1651f50..c6774fc3 100644
--- a/modules/services/serviceAi/subStructureGeneration.py
+++ b/modules/services/serviceAi/subStructureGeneration.py
@@ -365,7 +365,9 @@ Then chapters that generate those generic content types MUST assign the relevant
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Each chapter needs: id, level (1, 2, 3, etc.), title
- contentParts: {{"partId": {{"instruction": "..."}} or {{"caption": "..."}} or both}} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
+- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Description of what content to generate for this chapter
+ The generationHint MUST contain ALL relevant details from the USER REQUEST that apply to this specific chapter. Include all formatting rules, data requirements, constraints, column specifications, validation rules, and any other specifications mentioned in the user request that are relevant for generating content for this chapter. Do NOT use generic descriptions - include specific details from the user request.
- The number of chapters depends on the user request - create only what is requested
## DOCUMENT OUTPUT FORMAT
@@ -420,10 +422,10 @@ EXAMPLE STRUCTURE (for reference only - adapt to user request):
"title": "Chapter Title",
"contentParts": {{
"extracted_part_id": {{
- "instruction": "Use extracted content..."
+ "instruction": "Use extracted content with ALL relevant details from user request"
}}
}},
- "generationHint": "Description of chapter content",
+ "generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}}
]
diff --git a/modules/services/serviceGeneration/mainServiceGeneration.py b/modules/services/serviceGeneration/mainServiceGeneration.py
index 45ef37e1..a49b78c7 100644
--- a/modules/services/serviceGeneration/mainServiceGeneration.py
+++ b/modules/services/serviceGeneration/mainServiceGeneration.py
@@ -413,6 +413,16 @@ class GenerationService:
logger.warning(f"Unsupported format '{docFormat}' for document {doc.get('id', docIndex)}, skipping")
continue
+ # Check output style classification (code/document/image/etc.) from renderer
+ from modules.services.serviceGeneration.renderers.registry import getOutputStyle
+ outputStyle = getOutputStyle(docFormat)
+ if outputStyle:
+ logger.debug(f"Document {doc.get('id', docIndex)} format '{docFormat}' classified as '{outputStyle}' style")
+ # Store style in document metadata for potential use in processing paths
+ if "metadata" not in doc:
+ doc["metadata"] = {}
+ doc["metadata"]["outputStyle"] = outputStyle
+
# Create JSON structure with single document (preserving metadata)
singleDocContent = {
"metadata": {**metadata, "language": docLanguage}, # Add per-document language to metadata
diff --git a/modules/services/serviceGeneration/renderers/registry.py b/modules/services/serviceGeneration/renderers/registry.py
index 04ac520f..fdaba913 100644
--- a/modules/services/serviceGeneration/renderers/registry.py
+++ b/modules/services/serviceGeneration/renderers/registry.py
@@ -139,6 +139,32 @@ class RendererRegistry:
}
return info
+
+ def getOutputStyle(self, outputFormat: str) -> Optional[str]:
+ """
+ Get the output style classification for a given format.
+ Returns: 'code', 'document', 'image', or other (e.g., 'video' for future use)
+ """
+ if not self._discovered:
+ self.discoverRenderers()
+
+ # Normalize format name
+ formatName = outputFormat.lower().strip()
+
+ # Check for aliases first
+ if formatName in self._format_mappings:
+ formatName = self._format_mappings[formatName]
+
+ # Get renderer class and call getOutputStyle (all renderers have same signature)
+ rendererClass = self._renderers.get(formatName)
+ try:
+ return rendererClass.getOutputStyle(formatName)
+ except (AttributeError, TypeError) as e:
+ logger.warning(f"No renderer found for format: {outputFormat}, cannot determine output style")
+ return None
+ except Exception as e:
+ logger.warning(f"Error getting output style for {outputFormat}: {str(e)}")
+ return None
# Global registry instance
_registry = RendererRegistry()
@@ -154,3 +180,7 @@ def getSupportedFormats() -> List[str]:
def getRendererInfo() -> Dict[str, Dict[str, str]]:
"""Get information about all registered renderers."""
return _registry.getRendererInfo()
+
+def getOutputStyle(outputFormat: str) -> Optional[str]:
+ """Get the output style classification for a given format."""
+ return _registry.getOutputStyle(outputFormat)
diff --git a/modules/services/serviceGeneration/renderers/rendererBaseTemplate.py b/modules/services/serviceGeneration/renderers/rendererBaseTemplate.py
index efe53eaa..0c72bd24 100644
--- a/modules/services/serviceGeneration/renderers/rendererBaseTemplate.py
+++ b/modules/services/serviceGeneration/renderers/rendererBaseTemplate.py
@@ -5,7 +5,7 @@ Base renderer class for all format renderers.
"""
from abc import ABC, abstractmethod
-from typing import Dict, Any, List, Tuple
+from typing import Dict, Any, List, Tuple, Optional
from modules.datamodels.datamodelJson import supportedSectionTypes
from modules.datamodels.datamodelDocument import RenderedDocument
import json
@@ -50,6 +50,19 @@ class BaseRenderer(ABC):
"""
return 0
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """
+ Return the output style classification for this renderer.
+ Returns: 'code', 'document', 'image', or other (e.g., 'video' for future use)
+ Override this method in subclasses to specify the output style.
+
+ Args:
+ formatName: Optional format name (e.g., 'txt', 'js', 'csv') - useful for renderers
+ that handle multiple formats with different styles (e.g., RendererText)
+ """
+ return 'document' # Default to document style
+
@abstractmethod
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""
diff --git a/modules/services/serviceGeneration/renderers/rendererCsv.py b/modules/services/serviceGeneration/renderers/rendererCsv.py
index 15be4d96..eb00a610 100644
--- a/modules/services/serviceGeneration/renderers/rendererCsv.py
+++ b/modules/services/serviceGeneration/renderers/rendererCsv.py
@@ -6,7 +6,7 @@ CSV renderer for report generation.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
class RendererCsv(BaseRenderer):
"""Renders content to CSV format with format-specific extraction."""
@@ -26,6 +26,11 @@ class RendererCsv(BaseRenderer):
"""Return priority for CSV renderer."""
return 70
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: CSV requires specific structure (header, then data rows)."""
+ return 'code'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to CSV format."""
try:
diff --git a/modules/services/serviceGeneration/renderers/rendererDocx.py b/modules/services/serviceGeneration/renderers/rendererDocx.py
index b0f62394..6a714c3f 100644
--- a/modules/services/serviceGeneration/renderers/rendererDocx.py
+++ b/modules/services/serviceGeneration/renderers/rendererDocx.py
@@ -6,7 +6,7 @@ DOCX renderer for report generation using python-docx.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
import io
import base64
import re
@@ -39,6 +39,11 @@ class RendererDocx(BaseRenderer):
"""Return priority for DOCX renderer."""
return 115
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: Word documents are formatted documents."""
+ return 'document'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to DOCX format using AI-analyzed styling."""
self.services.utils.debugLogToFile(f"DOCX RENDER CALLED: title={title}, user_prompt={userPrompt[:50] if userPrompt else 'None'}...", "DOCX_RENDERER")
diff --git a/modules/services/serviceGeneration/renderers/rendererHtml.py b/modules/services/serviceGeneration/renderers/rendererHtml.py
index 1797af6d..34017e67 100644
--- a/modules/services/serviceGeneration/renderers/rendererHtml.py
+++ b/modules/services/serviceGeneration/renderers/rendererHtml.py
@@ -6,7 +6,7 @@ HTML renderer for report generation.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
class RendererHtml(BaseRenderer):
"""Renders content to HTML format with format-specific extraction."""
@@ -26,6 +26,11 @@ class RendererHtml(BaseRenderer):
"""Return priority for HTML renderer."""
return 100
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: HTML web pages are rendered documents."""
+ return 'document'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""
Render HTML document with images as separate files.
diff --git a/modules/services/serviceGeneration/renderers/rendererImage.py b/modules/services/serviceGeneration/renderers/rendererImage.py
index 197560d1..02d991fe 100644
--- a/modules/services/serviceGeneration/renderers/rendererImage.py
+++ b/modules/services/serviceGeneration/renderers/rendererImage.py
@@ -6,7 +6,7 @@ Image renderer for report generation using AI image generation.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
import logging
import base64
@@ -30,6 +30,11 @@ class RendererImage(BaseRenderer):
"""Return priority for image renderer."""
return 90
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: Images are visual media."""
+ return 'image'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to image format using AI image generation."""
try:
diff --git a/modules/services/serviceGeneration/renderers/rendererJson.py b/modules/services/serviceGeneration/renderers/rendererJson.py
index 91e8342d..10aa63d5 100644
--- a/modules/services/serviceGeneration/renderers/rendererJson.py
+++ b/modules/services/serviceGeneration/renderers/rendererJson.py
@@ -6,7 +6,7 @@ JSON renderer for report generation.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
import json
class RendererJson(BaseRenderer):
@@ -27,6 +27,11 @@ class RendererJson(BaseRenderer):
"""Return priority for JSON renderer."""
return 80
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: JSON is structured data format."""
+ return 'code'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to JSON format."""
try:
diff --git a/modules/services/serviceGeneration/renderers/rendererMarkdown.py b/modules/services/serviceGeneration/renderers/rendererMarkdown.py
index 2bdbf114..e76046b0 100644
--- a/modules/services/serviceGeneration/renderers/rendererMarkdown.py
+++ b/modules/services/serviceGeneration/renderers/rendererMarkdown.py
@@ -6,7 +6,7 @@ Markdown renderer for report generation.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
class RendererMarkdown(BaseRenderer):
"""Renders content to Markdown format with format-specific extraction."""
@@ -26,6 +26,11 @@ class RendererMarkdown(BaseRenderer):
"""Return priority for markdown renderer."""
return 95
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: Markdown documents are formatted documents."""
+ return 'document'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to Markdown format."""
try:
diff --git a/modules/services/serviceGeneration/renderers/rendererPdf.py b/modules/services/serviceGeneration/renderers/rendererPdf.py
index 32aca32c..50ec9222 100644
--- a/modules/services/serviceGeneration/renderers/rendererPdf.py
+++ b/modules/services/serviceGeneration/renderers/rendererPdf.py
@@ -6,7 +6,7 @@ PDF renderer for report generation using reportlab.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
import io
import base64
@@ -39,6 +39,11 @@ class RendererPdf(BaseRenderer):
"""Return priority for PDF renderer."""
return 120
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: PDF documents are formatted documents."""
+ return 'document'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to PDF format using AI-analyzed styling."""
try:
diff --git a/modules/services/serviceGeneration/renderers/rendererPptx.py b/modules/services/serviceGeneration/renderers/rendererPptx.py
index ff3d005d..bb43d8be 100644
--- a/modules/services/serviceGeneration/renderers/rendererPptx.py
+++ b/modules/services/serviceGeneration/renderers/rendererPptx.py
@@ -26,6 +26,21 @@ class RendererPptx(BaseRenderer):
"""Get list of supported output formats."""
return ["pptx", "ppt"]
+ @classmethod
+ def getFormatAliases(cls) -> List[str]:
+ """Return format aliases."""
+ return []
+
+ @classmethod
+ def getPriority(cls) -> int:
+ """Return priority for PowerPoint renderer."""
+ return 105
+
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: PowerPoint presentations are formatted documents."""
+ return 'document'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""
Render content as PowerPoint presentation from JSON data.
diff --git a/modules/services/serviceGeneration/renderers/rendererText.py b/modules/services/serviceGeneration/renderers/rendererText.py
index 52035014..fd15e50d 100644
--- a/modules/services/serviceGeneration/renderers/rendererText.py
+++ b/modules/services/serviceGeneration/renderers/rendererText.py
@@ -6,7 +6,7 @@ Text renderer for report generation.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
class RendererText(BaseRenderer):
"""Renders content to plain text format with format-specific extraction."""
@@ -48,6 +48,21 @@ class RendererText(BaseRenderer):
"""Return priority for text renderer."""
return 90
+ @classmethod
+ def getOutputStyle(cls, formatName: str = None) -> str:
+ """
+ Return output style classification based on format.
+ For txt/text/plain: 'document' (unstructured text)
+ For all other formats: 'code' (structured formats with rules/syntax)
+
+ Note: formatName parameter is provided by registry when calling this method.
+ """
+ # Plain text formats are document style
+ if formatName and formatName.lower() in ['txt', 'text', 'plain']:
+ return 'document'
+ # All other formats handled by RendererText are code style
+ return 'code'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to plain text format."""
try:
diff --git a/modules/services/serviceGeneration/renderers/rendererXlsx.py b/modules/services/serviceGeneration/renderers/rendererXlsx.py
index 404abf31..14f8a71a 100644
--- a/modules/services/serviceGeneration/renderers/rendererXlsx.py
+++ b/modules/services/serviceGeneration/renderers/rendererXlsx.py
@@ -6,7 +6,7 @@ Excel renderer for report generation using openpyxl.
from .rendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument
-from typing import Dict, Any, List
+from typing import Dict, Any, List, Optional
import io
import base64
from datetime import datetime, UTC, date
@@ -43,6 +43,11 @@ class RendererXlsx(BaseRenderer):
"""Return priority for Excel renderer."""
return 110
+ @classmethod
+ def getOutputStyle(cls, formatName: Optional[str] = None) -> str:
+ """Return output style classification: Excel spreadsheets are formatted documents."""
+ return 'document'
+
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]:
"""Render extracted JSON content to Excel format using AI-analyzed styling."""
try:
@@ -798,6 +803,7 @@ class RendererXlsx(BaseRenderer):
# Add rows - handle both array format and cells object format
cell_style = styles.get("table_cell", {})
+ header_count = len(headers)
for row_idx, row_data in enumerate(rows, 4):
# Handle different row formats
if isinstance(row_data, list):
@@ -810,6 +816,14 @@ class RendererXlsx(BaseRenderer):
# Unknown format, skip
continue
+ # Validate row column count matches headers - pad or truncate if needed
+ if len(cell_values) < header_count:
+ # Pad with empty strings if row has fewer columns
+ cell_values.extend([""] * (header_count - len(cell_values)))
+ elif len(cell_values) > header_count:
+ # Truncate if row has more columns than headers
+ cell_values = cell_values[:header_count]
+
for col_idx, cell_value in enumerate(cell_values, 1):
# Extract value if it's a dict with "value" key
if isinstance(cell_value, dict):
@@ -1178,6 +1192,7 @@ class RendererXlsx(BaseRenderer):
# Add rows with formatting
cell_style = styles.get("table_cell", {})
+ header_count = len(headers)
for row_data in rows:
# Handle different row formats
if isinstance(row_data, list):
@@ -1187,6 +1202,14 @@ class RendererXlsx(BaseRenderer):
else:
continue
+ # Validate row column count matches headers - pad or truncate if needed
+ if len(cell_values) < header_count:
+ # Pad with empty strings if row has fewer columns
+ cell_values.extend([""] * (header_count - len(cell_values)))
+ elif len(cell_values) > header_count:
+ # Truncate if row has more columns than headers
+ cell_values = cell_values[:header_count]
+
for col, cell_value in enumerate(cell_values, 1):
sanitized_value = self._sanitizeCellValue(cell_value)
cell = sheet.cell(row=startRow, column=col, value=sanitized_value)
diff --git a/modules/workflows/processing/adaptive/contentValidator.py b/modules/workflows/processing/adaptive/contentValidator.py
index fe0ee5bd..32f9c528 100644
--- a/modules/workflows/processing/adaptive/contentValidator.py
+++ b/modules/workflows/processing/adaptive/contentValidator.py
@@ -540,7 +540,7 @@ class ContentValidator:
if not hasattr(self, 'services') or not self.services or not hasattr(self.services, 'ai'):
return self._createFailedValidationResult("AI service not available")
- # Use taskStep.objective if available, otherwise fall back to intent.primaryGoal
+ # Use taskStep.objective if available, otherwise fall back to workflow intent
taskObjective = None
if taskStep and hasattr(taskStep, 'objective'):
taskObjective = taskStep.objective
@@ -567,7 +567,9 @@ class ContentValidator:
expectedFormats = intent.get('expectedFormats', [])
# Determine objective text and label
- objectiveText = taskObjective if taskObjective else intent.get('primaryGoal', 'Unknown')
+ workflowIntent = getattr(self.services.workflow, '_workflowIntent', {}) if hasattr(self.services, 'workflow') and self.services.workflow else {}
+ intentText = workflowIntent.get('intent', 'Unknown')
+ objectiveText = taskObjective if taskObjective else intentText
objectiveLabel = "TASK OBJECTIVE" if taskObjective else "USER REQUEST"
# Build prompt base WITHOUT document summaries first
diff --git a/modules/workflows/processing/adaptive/progressTracker.py b/modules/workflows/processing/adaptive/progressTracker.py
index 2b6cf8b3..80c570ed 100644
--- a/modules/workflows/processing/adaptive/progressTracker.py
+++ b/modules/workflows/processing/adaptive/progressTracker.py
@@ -28,7 +28,8 @@ class ProgressTracker:
improvementSuggestions = validation.get('improvementSuggestions', [])
# Get task objective from taskIntent (task-level, not workflow-level)
- taskObjective = taskIntent.get('taskObjective', taskIntent.get('primaryGoal', 'Unknown'))
+ # Fallback to 'Unknown' if task objective not available
+ taskObjective = taskIntent.get('taskObjective', 'Unknown')
# If validation is not schema compliant, treat as indeterminate (do not count as failure)
if not schemaCompliant or overallSuccess is None or qualityScore is None:
diff --git a/modules/workflows/processing/core/taskPlanner.py b/modules/workflows/processing/core/taskPlanner.py
index 20abccde..0fac427c 100644
--- a/modules/workflows/processing/core/taskPlanner.py
+++ b/modules/workflows/processing/core/taskPlanner.py
@@ -64,7 +64,7 @@ class TaskPlanner:
# Use workflowIntent from workflow object (set in workflowManager from userintention analysis)
workflowIntent = getattr(workflow, '_workflowIntent', None)
if workflowIntent and isinstance(workflowIntent, dict):
- cleanedObjective = workflowIntent.get('primaryGoal', actualUserPrompt)
+ cleanedObjective = workflowIntent.get('intent', actualUserPrompt)
else:
# Fallback: use user prompt directly if workflowIntent not available
cleanedObjective = actualUserPrompt
diff --git a/modules/workflows/processing/modes/modeDynamic.py b/modules/workflows/processing/modes/modeDynamic.py
index 92e04e96..f7754eab 100644
--- a/modules/workflows/processing/modes/modeDynamic.py
+++ b/modules/workflows/processing/modes/modeDynamic.py
@@ -149,21 +149,63 @@ class DynamicMode(BaseMode):
})
# Content validation (against original cleaned user prompt / workflow intent)
- if getattr(self, 'workflowIntent', None) and result.documents:
- # Pass ALL documents to validator - validator decides what to validate (generic approach)
- # Pass taskStep so validator can use task.objective and format fields
- # Pass action name so validator knows which action created the documents
- # Pass action parameters so validator can verify parameter-specific requirements
- # Pass action history so validator can validate process-oriented criteria in multi-step workflows
- actionName = selection.get('action', 'unknown')
- actionParameters = selection.get('parameters', {})
- actionHistory = getattr(context, 'executedActions', None) if hasattr(context, 'executedActions') else None
- validationResult = await self.contentValidator.validateContent(result.documents, self.workflowIntent, taskStep, actionName, actionParameters, actionHistory, context)
- observation.contentValidation = validationResult
- quality_score = validationResult.get('qualityScore', 0.0)
- if quality_score is None:
- quality_score = 0.0
- logger.info(f"Content validation: {validationResult['overallSuccess']} (quality: {quality_score:.2f})")
+ if getattr(self, 'workflowIntent', None):
+ # Collect ALL documents from current round, not just from last action
+ # Start with documents from current action (ActionDocument objects with metadata)
+ allRoundDocuments = list(result.documents) if result and result.documents else []
+
+ # Also collect ChatDocument references from all messages in current round
+ # These provide document existence info even if we don't have full metadata
+ if workflow and hasattr(workflow, 'messages') and workflow.messages:
+ currentRound = getattr(workflow, 'currentRound', 0)
+ currentTask = getattr(workflow, 'currentTask', 0)
+ # Collect documents from all messages in current round
+ for message in workflow.messages:
+ if hasattr(message, 'documents') and message.documents:
+ for chatDoc in message.documents:
+ # Include documents from current round and current task
+ docRound = getattr(chatDoc, 'roundNumber', None)
+ docTask = getattr(chatDoc, 'taskNumber', None)
+ if docRound == currentRound and (docTask is None or docTask == currentTask):
+ # Avoid duplicates - check if document already in list by fileId
+ chatDocFileId = getattr(chatDoc, 'fileId', None)
+ if chatDocFileId:
+ # Check if we already have this document (by fileId for ChatDocument, by documentName for ActionDocument)
+ isDuplicate = False
+ for existingDoc in allRoundDocuments:
+ existingFileId = getattr(existingDoc, 'fileId', None)
+ existingDocName = getattr(existingDoc, 'documentName', None)
+ # Match by fileId or by documentName matching fileName
+ if (existingFileId == chatDocFileId) or \
+ (existingDocName and hasattr(chatDoc, 'fileName') and existingDocName == chatDoc.fileName):
+ isDuplicate = True
+ break
+ if not isDuplicate:
+ allRoundDocuments.append(chatDoc)
+
+ # Only validate if we have documents to validate
+ if allRoundDocuments:
+ # Pass ALL documents from current round to validator
+ # Pass taskStep so validator can use task.objective and format fields
+ # Pass action name so validator knows which action created the documents
+ # Pass action parameters so validator can verify parameter-specific requirements
+ # Pass action history so validator can validate process-oriented criteria in multi-step workflows
+ actionName = selection.get('action', 'unknown')
+ actionParameters = selection.get('parameters', {})
+ actionHistory = getattr(context, 'executedActions', None) if hasattr(context, 'executedActions') else None
+ validationResult = await self.contentValidator.validateContent(allRoundDocuments, self.workflowIntent, taskStep, actionName, actionParameters, actionHistory, context)
+ else:
+ # No documents to validate
+ validationResult = None
+
+ if validationResult:
+ observation.contentValidation = validationResult
+ quality_score = validationResult.get('qualityScore', 0.0)
+ if quality_score is None:
+ quality_score = 0.0
+ logger.info(f"Content validation: {validationResult.get('overallSuccess', False)} (quality: {quality_score:.2f})")
+ else:
+ logger.info("Content validation skipped: no documents to validate")
# NEW: Record validation result for adaptive learning
actionValue = selection.get('action', 'unknown')
diff --git a/modules/workflows/processing/shared/placeholderFactory.py b/modules/workflows/processing/shared/placeholderFactory.py
index f94d08d4..0be4e029 100644
--- a/modules/workflows/processing/shared/placeholderFactory.py
+++ b/modules/workflows/processing/shared/placeholderFactory.py
@@ -68,6 +68,52 @@ def extractUserPrompt(context: Any) -> str:
return context.taskStep.objective
return 'No request specified'
+def extractNormalizedRequest(services: Any) -> str:
+ """Extract normalized user request from services. Maps to {{KEY:NORMALIZED_REQUEST}}.
+ Returns the full normalized request from user input analysis (preserves all constraints and details).
+ CRITICAL: Must return the actual normalizedRequest from analysis, NOT intent.
+ """
+ try:
+ # Get normalized request from currentUserPromptNormalized (stores the normalizedRequest from analysis)
+ if services and getattr(services, 'currentUserPromptNormalized', None):
+ normalized = services.currentUserPromptNormalized
+ # Validate that it's not the intent (which is shorter and less detailed)
+ # Intent is typically a concise objective, normalized request should be longer and more detailed
+ workflowIntent = getattr(services.workflow, '_workflowIntent', {}) if hasattr(services, 'workflow') and services.workflow else {}
+ intent = workflowIntent.get('intent', '')
+
+ # If normalized matches intent exactly, it's wrong - log warning
+ if intent and normalized == intent:
+ logger.warning(f"extractNormalizedRequest: normalized request matches intent - this is incorrect! normalized={normalized[:100]}...")
+ # Try to get from workflow intent or return error message
+ return f"ERROR: Normalized request not properly stored. Expected detailed request, got intent: {intent}"
+
+ return normalized
+
+ return 'No normalized request specified'
+ except Exception as e:
+ logger.error(f"Error extracting normalized request: {str(e)}")
+ return 'No normalized request specified'
+
+def extractUserIntent(services: Any) -> str:
+ """Extract user intent from services. Maps to {{KEY:USER_INTENT}}.
+ Returns the concise intent from user input analysis, or falls back to normalized request.
+ """
+ try:
+ # Get intent from currentUserPrompt (stores the intent from analysis)
+ if services and getattr(services, 'currentUserPrompt', None):
+ intent = services.currentUserPrompt
+ # If intent is same as normalized, it's fine - use it
+ return intent
+
+ # Fallback to normalized request if intent not available
+ if services and getattr(services, 'currentUserPromptNormalized', None):
+ return services.currentUserPromptNormalized
+
+ return 'No intent specified'
+ except Exception:
+ return 'No intent specified'
+
def extractWorkflowHistory(service: Any) -> str:
"""Extract workflow history. Maps to {{KEY:WORKFLOW_HISTORY}}
Reverse-chronological, enriched with message summaries and document labels.
diff --git a/modules/workflows/processing/shared/promptGenerationTaskplan.py b/modules/workflows/processing/shared/promptGenerationTaskplan.py
index 1d4d999a..11a54ca1 100644
--- a/modules/workflows/processing/shared/promptGenerationTaskplan.py
+++ b/modules/workflows/processing/shared/promptGenerationTaskplan.py
@@ -12,6 +12,8 @@ from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt,
extractAvailableDocumentsSummary,
extractWorkflowHistory,
+ extractUserIntent,
+ extractNormalizedRequest,
)
logger = logging.getLogger(__name__)
@@ -41,13 +43,13 @@ def generateTaskPlanningPrompt(services, context: Any) -> PromptBundle:
- Data Type: {workflowIntent.get('dataType', 'unknown')}
- Expected Formats: {workflowIntent.get('expectedFormats', [])}
- Quality Requirements: {workflowIntent.get('qualityRequirements', {})}
-- Primary Goal: {workflowIntent.get('primaryGoal', '')}
Note: Tasks can override these if task-specific needs differ (e.g., workflow wants PDF, but task needs CSV for intermediate step).
"""
placeholders: List[PromptPlaceholder] = [
- PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
+ PromptPlaceholder(label="NORMALIZED_REQUEST", content=extractNormalizedRequest(services), summaryAllowed=False),
+ PromptPlaceholder(label="USER_INTENT", content=extractUserIntent(services), summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services), summaryAllowed=True),
PromptPlaceholder(label="USER_LANGUAGE", content=userLanguage, summaryAllowed=False),
@@ -62,9 +64,17 @@ Break down user requests into logical, executable task steps.
## 📋 Context
-### User Request
-The following is the user's normalized request:
-{{KEY:USER_PROMPT}}
+### Normalized User Request
+The following is the user's full normalized request (preserves all constraints and details):
+```
+{{KEY:NORMALIZED_REQUEST}}
+```
+
+### User Intent
+The following is the user's intent (concise objective):
+```
+{{KEY:USER_INTENT}}
+```
### Workflow Intent
{{KEY:WORKFLOW_INTENT}}
diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py
index 01db9438..9806060a 100644
--- a/modules/workflows/workflowManager.py
+++ b/modules/workflows/workflowManager.py
@@ -212,7 +212,7 @@ class WorkflowManager:
# Extract intent analysis fields and store as workflowIntent
workflowIntent = {
- 'primaryGoal': analysisResult.get('primaryGoal'),
+ 'intent': intentText, # Use intent instead of primaryGoal
'dataType': analysisResult.get('dataType', 'unknown'),
'expectedFormats': analysisResult.get('expectedFormats', []),
'qualityRequirements': analysisResult.get('qualityRequirements', {}),
@@ -229,8 +229,16 @@ class WorkflowManager:
self.services.workflow._workflowIntent = workflowIntent
# Store normalized request and intent
+ # CRITICAL: normalizedRequest MUST be used if available, do NOT fall back to intent
self.services.currentUserPrompt = intentText or userInput.prompt
- self.services.currentUserPromptNormalized = normalizedRequest or intentText or userInput.prompt
+ if normalizedRequest and normalizedRequest.strip():
+ # Use normalizedRequest if available and not empty
+ self.services.currentUserPromptNormalized = normalizedRequest
+ logger.info(f"Stored normalized request (length: {len(normalizedRequest)}, preview: {normalizedRequest[:100]}...)")
+ else:
+ # Fallback only if normalizedRequest is None or empty
+ logger.warning(f"normalizedRequest is None or empty, falling back to intentText. normalizedRequest={normalizedRequest}, intentText={intentText[:100] if intentText else None}...")
+ self.services.currentUserPromptNormalized = intentText or userInput.prompt
if contextItems is not None:
self.services.currentUserContextItems = contextItems
@@ -289,7 +297,6 @@ class WorkflowManager:
- complexity: "simple" | "moderate" | "complex"
- needsWorkflowHistory: bool
- fastTrack: bool
- - primaryGoal: Hauptziel
- dataType: Datentyp
- expectedFormats: Erwartete Formate
- qualityRequirements: Qualitätsanforderungen
@@ -313,11 +320,10 @@ class WorkflowManager:
- "complex": Multi-task workflow, many documents, research needed, content generation required, multi-step planning (60-120s)
6. needsWorkflowHistory: Boolean indicating if this request needs previous workflow rounds/history (e.g., 'continue', 'retry', 'fix', 'improve', 'update', 'modify', 'based on previous', 'build on', references to earlier work)
7. fastTrack: Boolean indicating if Fast Track is possible (simple requests without documents and without workflow history)
-8. primaryGoal: The main objective the user wants to achieve
-9. dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown)
-10. expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., ["xlsx", "pdf"]). If format is unclear or not specified, use empty list []
-11. qualityRequirements: Quality requirements they have (accuracy, completeness) as {{accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}}
-12. successCriteria: Specific success criteria that define completion (array of strings)
+8. dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown)
+9. expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., ["xlsx", "pdf"]). If format is unclear or not specified, use empty list []
+10. qualityRequirements: Quality requirements they have (accuracy, completeness) as {{accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}}
+11. successCriteria: Specific success criteria that define completion (array of strings)
Rules:
- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained
@@ -345,7 +351,6 @@ Return ONLY JSON (no markdown) with this exact structure:
"complexity": "simple" | "moderate" | "complex",
"needsWorkflowHistory": true|false,
"fastTrack": true|false,
- "primaryGoal": "The main objective the user wants to achieve",
"dataType": "numbers|text|documents|analysis|code|unknown",
"expectedFormats": ["pdf", "docx", "xlsx", "txt", "json", "csv", "html", "md"],
"qualityRequirements": {{
@@ -395,7 +400,6 @@ The following is the user's original input message. Analyze intent, normalize th
"complexity": "moderate",
"needsWorkflowHistory": False,
"fastTrack": False,
- "primaryGoal": None,
"dataType": "unknown",
"expectedFormats": [],
"qualityRequirements": {
@@ -523,10 +527,14 @@ The following is the user's original input message. Analyze intent, normalize th
roundNum = workflow.currentRound
contextLabel = f"round{roundNum}_usercontext"
+ # Use normalized request if available (from combined analysis), otherwise use original prompt
+ # This ensures the first message uses the normalized request for security
+ normalizedRequest = getattr(self.services, 'currentUserPromptNormalized', None) or userInput.prompt
+
messageData = {
"workflowId": workflow.id,
"role": "user",
- "message": userInput.prompt,
+ "message": normalizedRequest, # Use normalized request instead of original prompt
"status": "first",
"sequenceNr": 1,
"publishedAt": self.services.utils.timestampGetUtc(),
@@ -602,12 +610,11 @@ The following is the user's original input message. Analyze intent, normalize th
"2) normalizedRequest: full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details.\n"
"3) intent: concise single-paragraph core request in the detected language for high-level routing.\n"
"4) contextItems: supportive data blocks to attach as separate documents if significantly larger than the intent (large literal content, long lists/tables, code/JSON blocks, transcripts, CSV fragments, detailed specs). Keep URLs in the intent unless they embed large pasted content.\n"
- "5) primaryGoal: The main objective the user wants to achieve.\n"
- "6) dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown).\n"
- "7) expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., [\"xlsx\", \"pdf\"]). If format is unclear or not specified, use empty list [].\n"
- "8) qualityRequirements: Quality requirements they have (accuracy, completeness) as {accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}.\n"
- "9) successCriteria: Specific success criteria that define completion (array of strings).\n"
- "10) needsWorkflowHistory: Boolean indicating if this request needs previous workflow rounds/history to be understood or completed (e.g., 'continue', 'retry', 'fix', 'improve', 'update', 'modify', 'based on previous', 'build on', references to earlier work). Return true if the request is a continuation, retry, modification, or builds upon previous work.\n\n"
+ "5) dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown).\n"
+ "6) expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., [\"xlsx\", \"pdf\"]). If format is unclear or not specified, use empty list [].\n"
+ "7) qualityRequirements: Quality requirements they have (accuracy, completeness) as {accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}.\n"
+ "8) successCriteria: Specific success criteria that define completion (array of strings).\n"
+ "9) needsWorkflowHistory: Boolean indicating if this request needs previous workflow rounds/history to be understood or completed (e.g., 'continue', 'retry', 'fix', 'improve', 'update', 'modify', 'based on previous', 'build on', references to earlier work). Return true if the request is a continuation, retry, modification, or builds upon previous work.\n\n"
"Rules:\n"
"- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained.\n"
"- If content exceeds that threshold, move bulky parts into contextItems; keep intent short and clear.\n"
@@ -625,7 +632,6 @@ The following is the user's original input message. Analyze intent, normalize th
" \"content\": \"Full extracted content block here\"\n"
" }\n"
" ],\n"
- " \"primaryGoal\": \"The main objective the user wants to achieve\",\n"
" \"dataType\": \"numbers|text|documents|analysis|code|unknown\",\n"
" \"expectedFormats\": [\"pdf\", \"docx\", \"xlsx\", \"txt\", \"json\", \"csv\", \"html\", \"md\"],\n"
" \"qualityRequirements\": {\n"
@@ -668,8 +674,9 @@ The following is the user's original input message. Analyze intent, normalize th
contextItems = parsed.get('contextItems') or []
# Extract intent analysis fields and store as workflowIntent
+ intentText = parsed.get('intent') or userInput.prompt
workflowIntent = {
- 'primaryGoal': parsed.get('primaryGoal'),
+ 'intent': intentText, # Use intent instead of primaryGoal
'dataType': parsed.get('dataType', 'unknown'),
'expectedFormats': parsed.get('expectedFormats', []),
'qualityRequirements': parsed.get('qualityRequirements', {}),
@@ -727,10 +734,22 @@ The following is the user's original input message. Analyze intent, normalize th
pass
self.services.currentUserPrompt = intentText or userInput.prompt
# Always set currentUserPromptNormalized - use normalizedRequest if available, otherwise fallback to currentUserPrompt
- normalizedValue = normalizedRequest or intentText or userInput.prompt
- self.services.currentUserPromptNormalized = normalizedValue
+ # CRITICAL: normalizedRequest MUST be used if available, do NOT fall back to intent
+ if normalizedRequest and normalizedRequest.strip():
+ # Use normalizedRequest if available and not empty
+ self.services.currentUserPromptNormalized = normalizedRequest
+ logger.debug(f"Stored normalized request from analysis (length: {len(normalizedRequest)})")
+ else:
+ # Fallback only if normalizedRequest is None or empty
+ logger.warning(f"normalizedRequest is None or empty in analysis, falling back to intentText. normalizedRequest={normalizedRequest}, intentText={intentText}")
+ self.services.currentUserPromptNormalized = intentText or userInput.prompt
if contextItems is not None:
self.services.currentUserContextItems = contextItems
+
+ # Update message with normalized request if analysis produced one
+ if normalizedRequest and normalizedRequest != userInput.prompt:
+ messageData["message"] = normalizedRequest
+ logger.debug(f"Updated first message with normalized request (length: {len(normalizedRequest)})")
# Create documents for context items
if contextItems and isinstance(contextItems, list):
@@ -784,6 +803,34 @@ The following is the user's original input message. Analyze intent, normalize th
# Finally, persist and bind the first message with combined documents (context + user)
self.services.chat.storeMessageWithDocuments(workflow, messageData, createdDocs)
+ # Create ChatMessage with success criteria (KPI) AFTER the first user message
+ # This ensures the KPI message appears after the user message in the UI
+ workflowIntent = getattr(workflow, '_workflowIntent', None)
+ if workflowIntent and isinstance(workflowIntent, dict):
+ successCriteria = workflowIntent.get('successCriteria', [])
+ if successCriteria and isinstance(successCriteria, list) and len(successCriteria) > 0:
+ try:
+ # Format success criteria as message with "KPI" title
+ criteriaText = "**KPI**\n\n" + "\n".join([f"• {criterion}" for criterion in successCriteria])
+
+ kpiMessageData = {
+ "workflowId": workflow.id,
+ "role": "system",
+ "message": criteriaText,
+ "summary": f"KPI: {len(successCriteria)} success criteria",
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1, # After user message
+ "publishedAt": self.services.utils.timestampGetUtc(),
+ "roundNumber": workflow.currentRound,
+ "taskNumber": 0,
+ "actionNumber": 0
+ }
+
+ self.services.chat.storeMessageWithDocuments(workflow, kpiMessageData, [])
+ logger.info(f"Created KPI message with {len(successCriteria)} success criteria after first user message")
+ except Exception as e:
+ logger.error(f"Error creating KPI message: {str(e)}")
+
except Exception as e:
logger.error(f"Error sending first message: {str(e)}")
raise