From 3b53889b7c3e2a4f967dc82d7d2830023b08c4fe Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Mon, 20 Oct 2025 01:33:48 +0200
Subject: [PATCH] Centralized AI continuation agents
---
modules/services/serviceAi/mainServiceAi.py | 26 ++++
modules/services/serviceAi/subCoreAi.py | 121 ++++++++-------
.../serviceGeneration/subPromptBuilder.py | 123 +++++++--------
.../mainServiceNormalization.py | 2 +-
.../serviceWorkflow/mainServiceWorkflow.py | 33 ++--
modules/workflows/methods/methodAi.py | 6 +-
modules/workflows/methods/methodOutlook.py | 10 +-
.../processing/adaptive/contentValidator.py | 2 +-
.../processing/adaptive/intentAnalyzer.py | 2 +-
.../workflows/processing/core/taskPlanner.py | 2 +-
.../processing/modes/modeActionplan.py | 4 +-
.../workflows/processing/modes/modeReact.py | 10 +-
.../promptGenerationActionsActionplan.py | 146 +++++++++---------
.../shared/promptGenerationTaskplan.py | 5 +-
modules/workflows/workflowManager.py | 2 +-
15 files changed, 262 insertions(+), 232 deletions(-)
diff --git a/modules/services/serviceAi/mainServiceAi.py b/modules/services/serviceAi/mainServiceAi.py
index 0c6293c0..7e4af22d 100644
--- a/modules/services/serviceAi/mainServiceAi.py
+++ b/modules/services/serviceAi/mainServiceAi.py
@@ -69,6 +69,8 @@ class AiService:
def coreAi(self):
"""Lazy initialization of core AI service."""
if self._coreAi is None:
+ if self.aiObjects is None:
+ raise RuntimeError("AiService.aiObjects must be initialized before accessing coreAi. Use await AiService.create() or await service._ensureAiObjectsInitialized()")
logger.info("Lazy initializing SubCoreAi...")
self._coreAi = SubCoreAi(self.services, self.aiObjects)
return self._coreAi
@@ -153,6 +155,30 @@ class AiService:
await self._ensureAiObjectsInitialized()
return await self.webResearchService.webResearch(request)
+ # Core AI Methods - Delegating to SubCoreAi
+ async def callAiPlanning(
+ self,
+ prompt: str,
+ placeholders: Optional[List[PromptPlaceholder]] = None,
+ options: Optional[AiCallOptions] = None,
+ loopInstruction: Optional[str] = None
+ ) -> str:
+ """Planning AI call for task planning, action planning, action selection, etc."""
+ await self._ensureAiObjectsInitialized()
+ return await self.coreAi.callAiPlanning(prompt, placeholders, options, loopInstruction)
+
+ async def callAiDocuments(
+ self,
+ prompt: str,
+ documents: Optional[List[ChatDocument]] = None,
+ options: Optional[AiCallOptions] = None,
+ outputFormat: Optional[str] = None,
+ title: Optional[str] = None
+ ) -> Union[str, Dict[str, Any]]:
+ """Document generation AI call for all non-planning calls."""
+ await self._ensureAiObjectsInitialized()
+ return await self.coreAi.callAiDocuments(prompt, documents, options, outputFormat, title)
+
def sanitizePromptContent(self, content: str, contentType: str = "text") -> str:
"""
diff --git a/modules/services/serviceAi/subCoreAi.py b/modules/services/serviceAi/subCoreAi.py
index 506f66a1..d602a15f 100644
--- a/modules/services/serviceAi/subCoreAi.py
+++ b/modules/services/serviceAi/subCoreAi.py
@@ -2,7 +2,7 @@ import logging
from typing import Dict, Any, List, Optional, Tuple, Union
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, ModelCapabilities, OperationType, Priority
-from modules.interfaces.interfaceAiObjects import AiObjects
+from modules.shared.debugLogger import writeDebugFile
logger = logging.getLogger(__name__)
@@ -25,7 +25,8 @@ class SubCoreAi:
self,
prompt: str,
options: AiCallOptions,
- debug_prefix: str = "ai_call"
+ debugPrefix: str = "ai_call",
+ loopInstruction: str = None
) -> str:
"""
Shared core function for AI calls with looping system.
@@ -35,68 +36,85 @@ class SubCoreAi:
Args:
prompt: The prompt to send to AI
options: AI call configuration options
- debug_prefix: Prefix for debug file names
+ debugPrefix: Prefix for debug file names
+ loopInstruction: If provided, replaces LOOP_INSTRUCTION placeholder and includes in continuation prompts
Returns:
Complete AI response after all iterations
"""
- max_iterations = 10 # Prevent infinite loops
+ max_iterations = 100 # Prevent infinite loops
iteration = 0
- accumulated_content = []
+ accumulatedContent = []
- logger.info(f"Starting AI call with looping (debug prefix: {debug_prefix})")
+ logger.debug(f"Starting AI call with looping (debug prefix: {debugPrefix}, loopInstruction: {loopInstruction is not None})")
- # Write initial prompt to debug file
- from modules.shared.debugLogger import writeDebugFile
- writeDebugFile(prompt, f"{debug_prefix}_prompt", None)
+ # Import debug logger for use in iterations
+
+ # Store original prompt to preserve LOOP_INSTRUCTION placeholder
+ originalPrompt = prompt
+
+ # Handle LOOP_INSTRUCTION placeholder replacement for first iteration
+ if loopInstruction and iteration == 0:
+ if "LOOP_INSTRUCTION" not in prompt:
+ raise ValueError("LOOP_INSTRUCTION placeholder not found in prompt when loopInstruction provided")
+ prompt = prompt.replace("LOOP_INSTRUCTION", loopInstruction)
+ logger.debug("Replaced LOOP_INSTRUCTION placeholder with provided instruction")
while iteration < max_iterations:
iteration += 1
- logger.info(f"AI call iteration {iteration}/{max_iterations}")
+ logger.debug(f"AI call iteration {iteration}/{max_iterations}")
# Build iteration prompt
if iteration == 1:
- iteration_prompt = prompt
+ iterationPrompt = prompt
+ elif loopInstruction and iteration > 1:
+ # Only use continuation logic if loopInstruction is provided
+ iterationPrompt = self._buildContinuationPrompt(originalPrompt, accumulatedContent, iteration, loopInstruction)
else:
- iteration_prompt = self._buildContinuationPrompt(prompt, accumulated_content, iteration)
+ # No looping - use original prompt
+ iterationPrompt = prompt
# Make AI call
try:
from modules.datamodels.datamodelAi import AiCallRequest
request = AiCallRequest(
- prompt=iteration_prompt,
+ prompt=iterationPrompt,
context="",
options=options
)
+
+ # Write the ACTUAL prompt sent to AI (including continuation context)
+ writeDebugFile(iterationPrompt, f"{debugPrefix}_prompt_iteration_{iteration}", None)
+
response = await self.aiObjects.call(request)
result = response.content
# Write raw AI response to debug file
- writeDebugFile(result, f"{debug_prefix}_response_iteration_{iteration}", None)
+ writeDebugFile(result, f"{debugPrefix}_response_iteration_{iteration}", None)
# Emit stats for this iteration
self.services.workflow.storeWorkflowStat(
self.services.currentWorkflow,
response,
- f"ai.call.{debug_prefix}.iteration_{iteration}"
+ f"ai.call.{debugPrefix}.iteration_{iteration}"
)
if not result or not result.strip():
logger.warning(f"Iteration {iteration}: Empty response, stopping")
break
- # Check if this is a continuation response
- if "[CONTINUE:" in result:
+ # Check if this is a continuation response (only if loopInstruction is provided)
+ if loopInstruction and "[CONTINUE:" in result:
# Extract the content before the continuation marker
- content_part = result.split("[CONTINUE:")[0].strip()
- if content_part:
- accumulated_content.append(content_part)
- logger.info(f"Iteration {iteration}: Continuation detected, continuing...")
+ contentPart = result.split("[CONTINUE:")[0].strip()
+ if contentPart:
+ accumulatedContent.append(contentPart)
+ logger.debug(f"Iteration {iteration}: Continuation detected, continuing...")
continue
else:
# This is the final response
- accumulated_content.append(result)
- logger.info(f"Iteration {iteration}: Final response received")
+ accumulatedContent.append(result)
+ logger.debug(f"Iteration {iteration}: Final response received")
break
except Exception as e:
@@ -107,19 +125,20 @@ class SubCoreAi:
logger.warning(f"AI call stopped after maximum iterations ({max_iterations})")
# Combine all accumulated content
- final_result = "\n\n".join(accumulated_content) if accumulated_content else ""
+ final_result = "\n\n".join(accumulatedContent) if accumulatedContent else ""
# Write final result to debug file
- writeDebugFile(final_result, f"{debug_prefix}_final_result", None)
+ writeDebugFile(final_result, f"{debugPrefix}_final_result", None)
- logger.info(f"AI call completed: {len(accumulated_content)} parts from {iteration} iterations")
+ logger.info(f"AI call completed: {len(accumulatedContent)} parts from {iteration} iterations")
return final_result
def _buildContinuationPrompt(
self,
base_prompt: str,
- accumulated_content: List[str],
- iteration: int
+ accumulatedContent: List[str],
+ iteration: int,
+ loopInstruction: str = None
) -> str:
"""
Build a prompt for continuation iterations.
@@ -132,11 +151,11 @@ You are continuing from a previous response. Please continue generating content
IMPORTANT:
- Continue from the exact point where you stopped
- Maintain the same format and structure
-- If you cannot complete the full response, end with: [CONTINUE: brief description of what still needs to be generated]
+- {loopInstruction if loopInstruction else "If you cannot complete the full response, end with: [CONTINUE: brief description of what still needs to be generated]"}
- Only stop when the response is completely generated
Previous content generated:
-{chr(10).join(accumulated_content[-1:]) if accumulated_content else "None"}
+{chr(10).join(accumulatedContent[-1:]) if accumulatedContent else "None"}
Continue generating content now:
"""
@@ -194,7 +213,8 @@ Continue generating content now:
self,
prompt: str,
placeholders: Optional[List[PromptPlaceholder]] = None,
- options: Optional[AiCallOptions] = None
+ options: Optional[AiCallOptions] = None,
+ loopInstruction: Optional[str] = None
) -> str:
"""
Planning AI call for task planning, action planning, action selection, etc.
@@ -212,13 +232,13 @@ Continue generating content now:
# Build full prompt with placeholders
if placeholders:
- placeholders_dict = {p.key: p.value for p in placeholders}
+ placeholders_dict = {p.label: p.content for p in placeholders}
full_prompt = self._buildPromptWithPlaceholders(prompt, placeholders_dict)
else:
full_prompt = prompt
# Use shared core function with planning-specific debug prefix
- return await self._callAiWithLooping(full_prompt, options, "planning")
+ return await self._callAiWithLooping(full_prompt, options, "planning", loopInstruction=loopInstruction)
# Document Generation AI Call
async def callAiDocuments(
@@ -227,9 +247,7 @@ Continue generating content now:
documents: Optional[List[ChatDocument]] = None,
options: Optional[AiCallOptions] = None,
outputFormat: Optional[str] = None,
- title: Optional[str] = None,
- documentProcessor=None,
- documentGenerator=None
+ title: Optional[str] = None
) -> Union[str, Dict[str, Any]]:
"""
Document generation AI call for all non-planning calls.
@@ -241,8 +259,6 @@ Continue generating content now:
options: AI call configuration options
outputFormat: Optional output format for document generation
title: Optional title for generated documents
- documentProcessor: Document processing service instance
- documentGenerator: Document generation service instance
Returns:
AI response as string, or dict with documents if outputFormat is specified
@@ -251,24 +267,16 @@ Continue generating content now:
options = AiCallOptions()
# Handle document generation with specific output format using unified approach
- if outputFormat and documentGenerator:
+ if outputFormat:
# Use unified generation method for all document generation
if documents and len(documents) > 0:
- # Extract content from documents first
logger.info(f"Extracting content from {len(documents)} documents")
- extracted_content = await documentProcessor.callAiText(prompt, documents, options)
- # Generate with extracted content using shared core function
- generation_prompt = await self._buildGenerationPrompt(prompt, extracted_content, outputFormat, title)
- generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation")
+ extracted_content = await self.services.ai.documentProcessor.callAiText(prompt, documents, options)
else:
- # Direct generation without documents
logger.info("No documents provided - using direct generation")
- generation_prompt = await self._buildGenerationPrompt(prompt, None, outputFormat, title)
- generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation")
-
- # Write the generated JSON to debug file
- from modules.shared.debugLogger import writeDebugFile
- writeDebugFile(generated_json, "unified_generation_response", documents)
+ extracted_content = None
+ generation_prompt = await self._buildGenerationPrompt(prompt, extracted_content, outputFormat, title)
+ generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation", loopInstruction="If you cannot complete the full response, end with: [CONTINUE: brief description of what still needs to be generated]")
# Parse the generated JSON
try:
@@ -313,7 +321,6 @@ Continue generating content now:
# Log AI response for debugging
try:
- from modules.shared.debugLogger import writeDebugFile
writeDebugFile(str(result), "documentGenerationResponse", documents)
except Exception:
pass
@@ -325,14 +332,14 @@ Continue generating content now:
return {"success": False, "error": f"Rendering failed: {str(e)}"}
# Handle text calls (no output format specified)
- if documents and documentProcessor:
+ if documents:
# Use document processing for text calls with documents
- result = await documentProcessor.callAiText(prompt, documents, options)
+ result = await self.services.ai.documentProcessor.callAiText(prompt, documents, options)
else:
# Use shared core function for direct text calls
- result = await self._callAiWithLooping(prompt, options, "text")
+ result = await self._callAiWithLooping(prompt, options, "text", loopInstruction=None)
- return result
+ return result
# AI Image Analysis
@@ -448,7 +455,7 @@ Continue generating content now:
-
+# TO CHECK FUNCTIONS TODO
diff --git a/modules/services/serviceGeneration/subPromptBuilder.py b/modules/services/serviceGeneration/subPromptBuilder.py
index c2ba3c3e..380fa00b 100644
--- a/modules/services/serviceGeneration/subPromptBuilder.py
+++ b/modules/services/serviceGeneration/subPromptBuilder.py
@@ -195,74 +195,69 @@ Consider the user's intent and the most logical way to organize the extracted co
except Exception as e:
services.utils.debugLogToFile(f"Generic prompt analysis failed: {str(e)}", "PROMPT_BUILDER")
- # Fallback to single-file prompt
- example_data = {
- "metadata": {
- "title": "Example Document",
- "author": "AI Assistant",
- "source_documents": ["document_001"],
- "extraction_method": "ai_extraction"
- },
- "sections": [
- {
- "id": "section_001",
- "content_type": "heading",
- "elements": [
- {
- "level": 1,
- "text": "1. SECTION TITLE"
- }
- ],
- "order": 1,
- "metadata": {}
- }
- ],
- "summary": "",
- "tags": []
- }
-
- return f"""
-{userPrompt}
+ # Always use the proper generation prompt template with LOOP_INSTRUCTION
+ result = f"""You are an AI assistant that generates structured JSON content for document creation.
-You are a document processing assistant that extracts and structures content from documents. Your task is to analyze the provided document content and create a structured JSON output.
+USER REQUEST: "{userPrompt}"
+DOCUMENT TITLE: "{title}"
+TARGET FORMAT: {outputFormat}
-TASK: Extract the actual content from the document and organize it into structured sections.
+TASK: Generate JSON content that fulfills the user's request.
-REQUIREMENTS:
-1. Analyze the document content provided in the context below
-2. Extract all content and organize it into logical sections
-3. Create structured JSON with sections containing the extracted content
-4. Preserve the original structure and data
+CRITICAL: You MUST return ONLY valid JSON in this exact structure:
+{{
+ "metadata": {{
+ "title": "{title}",
+ "splitStrategy": "single_document",
+ "source_documents": [],
+ "extraction_method": "ai_generation"
+ }},
+ "documents": [
+ {{
+ "id": "doc_1",
+ "title": "{title}",
+ "filename": "document.{outputFormat}",
+ "sections": [
+ {{
+ "id": "section_1",
+ "content_type": "heading",
+ "elements": [
+ {{
+ "level": 1,
+ "text": "1. SECTION TITLE"
+ }}
+ ],
+ "order": 1
+ }},
+ {{
+ "id": "section_2",
+ "content_type": "paragraph",
+ "elements": [
+ {{
+ "text": "This is the actual content that should be generated."
+ }}
+ ],
+ "order": 2
+ }}
+ ]
+ }}
+ ]
+}}
-OUTPUT FORMAT: Return only valid JSON in this exact structure:
-{json.dumps(example_data, indent=2)}
-
-Requirements:
-- Preserve all original data - do not summarize or interpret
-- Use the exact JSON format shown above
-- Maintain data integrity and structure
-
-Content Types to Extract:
-1. Tables: Extract all rows and columns with proper headers
-2. Lists: Extract all items with proper nesting
-3. Headings: Extract with appropriate levels
-4. Paragraphs: Extract as structured text
-5. Code: Extract code blocks with language identification
-6. Images: Analyze images and describe all visible content including text, tables, logos, graphics, layout, and visual elements
-
-Image Analysis Requirements:
-- If you cannot analyze an image for any reason, explain why in the JSON response
-- Describe everything you see in the image
-- Include all text content, tables, logos, graphics, layout, and visual elements
-- If the image is too small, corrupted, or unclear, explain this
-- Always provide feedback - never return empty responses
-
-Return only the JSON structure with actual data from the documents. Do not include any text before or after the JSON.
-
-Extract the ACTUAL CONTENT from the source documents. Do not use placeholder text like "Section 1", "Section 2", etc. Extract the real headings, paragraphs, and content from the documents.
-
-DO NOT return a schema description - return actual extracted content in the JSON format shown above.
+IMPORTANT:
+- Return ONLY the JSON structure above
+- Do NOT include any text before or after the JSON
+- Fill in the actual content based on the user request: {userPrompt}
+- If the content is too large, you can split it into multiple sections
+- Each section should have a unique id and appropriate content_type
+- LOOP_INSTRUCTION
"""
+
+ # Debug output
+ if services:
+ services.utils.debugLogToFile(f"GENERATION PROMPT: Generated successfully", "PROMPT_BUILDER")
+
+ return result.strip()
async def buildExtractionPrompt(
outputFormat: str,
@@ -499,6 +494,8 @@ IMPORTANT:
- Fill in the actual content based on the user request: {safeUserPrompt}
- If the content is too large, you can split it into multiple sections
- Each section should have a unique id and appropriate content_type
+
+LOOP_INSTRUCTION
"""
# Debug output
diff --git a/modules/services/serviceNormalization/mainServiceNormalization.py b/modules/services/serviceNormalization/mainServiceNormalization.py
index 4dfbf9cb..2a932723 100644
--- a/modules/services/serviceNormalization/mainServiceNormalization.py
+++ b/modules/services/serviceNormalization/mainServiceNormalization.py
@@ -90,7 +90,7 @@ class NormalizationService:
" \"Date\": {\"formats\": [\"DD.MM.YYYY\",\"YYYY-MM-DD\"]}\n }\n}\n"
)
- response = await self.services.ai.coreAi.callAiPlanning(prompt=prompt, placeholders=None, options=None)
+ response = await self.services.ai.callAiPlanning(prompt=prompt, placeholders=None, options=None)
if not response:
return {"mapping": {}, "normalizationPolicy": {}}
diff --git a/modules/services/serviceWorkflow/mainServiceWorkflow.py b/modules/services/serviceWorkflow/mainServiceWorkflow.py
index 1edafafa..1e7b9ae1 100644
--- a/modules/services/serviceWorkflow/mainServiceWorkflow.py
+++ b/modules/services/serviceWorkflow/mainServiceWorkflow.py
@@ -7,7 +7,6 @@ from modules.datamodels.datamodelChat import ChatContentExtracted
from modules.services.serviceExtraction.mainServiceExtraction import ExtractionService
from modules.services.serviceGeneration.subDocumentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData
from modules.shared.timezoneUtils import get_utc_timestamp
-from modules.services.serviceAi.mainServiceAi import AiService
from modules.security.tokenManager import TokenManager
from modules.shared.progressLogger import ProgressLogger
@@ -43,23 +42,25 @@ class WorkflowService:
break
# Create prompt for AI
- prompt = f"""You are an AI assistant providing a summary of a chat conversation.
- Please respond in '{self.user.language}' language.
+ prompt = f"""
+You are an AI assistant providing a summary of a chat conversation.
+Please respond in '{self.user.language}' language.
- Chat History:
- {chr(10).join(f"- {msg.message}" for msg in reversed(relevantMessages))}
+Chat History:
+{chr(10).join(f"- {msg.message}" for msg in reversed(relevantMessages))}
- Instructions:
- 1. Summarize the conversation's key points and outcomes
- 2. Be concise but informative
- 3. Use a professional but friendly tone
- 4. Focus on important decisions and next steps if any
+Instructions:
+1. Summarize the conversation's key points and outcomes
+2. Be concise but informative
+3. Use a professional but friendly tone
+4. Focus on important decisions and next steps if any
- Please provide a comprehensive summary of this conversation."""
+LOOP_INSTRUCTION
+
+Please provide a comprehensive summary of this conversation."""
- # Get summary using AI service directly (avoiding circular dependency)
- ai_service = AiService(self)
- return await ai_service.coreAi.callAiDocuments(
+ # Get summary using AI service through proper main service interface
+ return await self.services.ai.callAiDocuments(
prompt=prompt,
documents=None,
options={
@@ -69,9 +70,7 @@ class WorkflowService:
"compress_prompt": True,
"compress_documents": False,
"max_cost": 0.01
- },
- documentProcessor=ai_service.documentProcessor,
- documentGenerator=ai_service.documentGenerator
+ }
)
except Exception as e:
diff --git a/modules/workflows/methods/methodAi.py b/modules/workflows/methods/methodAi.py
index e10f7fe9..b2c7e526 100644
--- a/modules/workflows/methods/methodAi.py
+++ b/modules/workflows/methods/methodAi.py
@@ -127,13 +127,11 @@ class MethodAi(MethodBase):
# Update progress - calling AI
progressLogger.updateProgress(operationId, 0.6, "Calling AI")
- result = await self.services.ai.coreAi.callAiDocuments(
+ result = await self.services.ai.callAiDocuments(
prompt=aiPrompt, # Use original prompt, let unified generation handle prompt building
documents=chatDocuments if chatDocuments else None,
options=options,
- outputFormat=output_format,
- documentProcessor=self.services.ai.documentProcessor,
- documentGenerator=self.services.ai.documentGenerator
+ outputFormat=output_format
)
# Update progress - processing result
diff --git a/modules/workflows/methods/methodOutlook.py b/modules/workflows/methods/methodOutlook.py
index a4949a0d..ef9ee6f0 100644
--- a/modules/workflows/methods/methodOutlook.py
+++ b/modules/workflows/methods/methodOutlook.py
@@ -1182,11 +1182,13 @@ Return JSON:
"subject": "subject line",
"body": "email body (HTML allowed)",
"attachments": ["doc_ref1", "doc_ref2"]
-}}"""
+}}
+
+LOOP_INSTRUCTION"""
# Call AI service to generate email content
try:
- ai_response = await self.services.ai.coreAi.callAiDocuments(
+ ai_response = await self.services.ai.callAiDocuments(
prompt=ai_prompt,
documents=chatDocuments,
options=AiCallOptions(
@@ -1199,9 +1201,7 @@ Return JSON:
resultFormat="json",
maxCost=0.50,
maxProcessingTime=30
- ),
- documentProcessor=self.services.ai.documentProcessor,
- documentGenerator=self.services.ai.documentGenerator
+ )
)
# Parse AI response
diff --git a/modules/workflows/processing/adaptive/contentValidator.py b/modules/workflows/processing/adaptive/contentValidator.py
index 156dd2c9..2a9b5d83 100644
--- a/modules/workflows/processing/adaptive/contentValidator.py
+++ b/modules/workflows/processing/adaptive/contentValidator.py
@@ -120,7 +120,7 @@ DELIVERED CONTENT TO CHECK:
request_options = AiCallOptions()
request_options.operationType = OperationType.GENERAL
- response = await self.services.ai.coreAi.callAiPlanning(
+ response = await self.services.ai.callAiPlanning(
prompt=validationPrompt,
placeholders=None,
options=request_options
diff --git a/modules/workflows/processing/adaptive/intentAnalyzer.py b/modules/workflows/processing/adaptive/intentAnalyzer.py
index 74283629..bbe78651 100644
--- a/modules/workflows/processing/adaptive/intentAnalyzer.py
+++ b/modules/workflows/processing/adaptive/intentAnalyzer.py
@@ -63,7 +63,7 @@ CRITICAL: Respond with ONLY the JSON object below. Do not include any explanator
request_options = AiCallOptions()
request_options.operationType = OperationType.GENERAL
- response = await self.services.ai.coreAi.callAiPlanning(
+ response = await self.services.ai.callAiPlanning(
prompt=analysisPrompt,
placeholders=None,
options=request_options
diff --git a/modules/workflows/processing/core/taskPlanner.py b/modules/workflows/processing/core/taskPlanner.py
index 361c86b2..2b724c65 100644
--- a/modules/workflows/processing/core/taskPlanner.py
+++ b/modules/workflows/processing/core/taskPlanner.py
@@ -105,7 +105,7 @@ class TaskPlanner:
maxProcessingTime=30
)
- prompt = await self.services.ai.coreAi.callAiPlanning(
+ prompt = await self.services.ai.callAiPlanning(
prompt=taskPlanningPromptTemplate,
placeholders=placeholders,
options=options
diff --git a/modules/workflows/processing/modes/modeActionplan.py b/modules/workflows/processing/modes/modeActionplan.py
index 0e11ac88..e3dc977c 100644
--- a/modules/workflows/processing/modes/modeActionplan.py
+++ b/modules/workflows/processing/modes/modeActionplan.py
@@ -137,7 +137,7 @@ class ActionplanMode(BaseMode):
maxProcessingTime=30
)
- prompt = await self.services.ai.coreAi.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
+ prompt = await self.services.ai.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
# Check if AI response is valid
if not prompt:
@@ -476,7 +476,7 @@ class ActionplanMode(BaseMode):
maxProcessingTime=30
)
- response = await self.services.ai.coreAi.callAiPlanning(prompt=promptTemplate, placeholders=placeholders, options=options)
+ response = await self.services.ai.callAiPlanning(prompt=promptTemplate, placeholders=placeholders, options=options)
# Log result review response received
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
diff --git a/modules/workflows/processing/modes/modeReact.py b/modules/workflows/processing/modes/modeReact.py
index 405f530e..84d38a45 100644
--- a/modules/workflows/processing/modes/modeReact.py
+++ b/modules/workflows/processing/modes/modeReact.py
@@ -201,7 +201,7 @@ class ReactMode(BaseMode):
maxProcessingTime=30
)
- response = await self.services.ai.coreAi.callAiPlanning(
+ response = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
placeholders=placeholders,
options=options
@@ -313,7 +313,7 @@ class ReactMode(BaseMode):
resultFormat="json" # Explicitly request JSON format
)
- paramsResp = await self.services.ai.coreAi.callAiPlanning(
+ paramsResp = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
placeholders=placeholders,
options=options
@@ -625,7 +625,7 @@ class ReactMode(BaseMode):
maxProcessingTime=30
)
- resp = await self.services.ai.coreAi.callAiPlanning(
+ resp = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
placeholders=placeholders,
options=options
@@ -719,7 +719,7 @@ User language: {userLanguage}
Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly message
- response = await self.services.ai.coreAi.callAiPlanning(
+ response = await self.services.ai.callAiPlanning(
prompt=prompt,
placeholders=None,
options=AiCallOptions(
@@ -760,7 +760,7 @@ Result context: {resultContext}
Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly result message
- response = await self.services.ai.coreAi.callAiPlanning(
+ response = await self.services.ai.callAiPlanning(
prompt=prompt,
placeholders=None,
options=AiCallOptions(
diff --git a/modules/workflows/processing/shared/promptGenerationActionsActionplan.py b/modules/workflows/processing/shared/promptGenerationActionsActionplan.py
index fc44be85..9cbef765 100644
--- a/modules/workflows/processing/shared/promptGenerationActionsActionplan.py
+++ b/modules/workflows/processing/shared/promptGenerationActionsActionplan.py
@@ -32,98 +32,100 @@ def generateActionDefinitionPrompt(services, context: Any) -> PromptBundle:
template = """# Action Definition
- Generate the next action to advance toward completing the task objective.
+Generate the next action to advance toward completing the task objective.
- ## 📋 Context
+## 📋 Context
- ### User Language
- {{KEY:USER_LANGUAGE}}
+### User Language
+{{KEY:USER_LANGUAGE}}
- ### Task Objective
- {{KEY:USER_PROMPT}}
+### Task Objective
+{{KEY:USER_PROMPT}}
- ### Available Documents
- {{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
+### Available Documents
+{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
- ### Available Connections
- {{KEY:AVAILABLE_CONNECTIONS_INDEX}}
-
- ### Workflow History
- {{KEY:WORKFLOW_HISTORY}}
+### Available Connections
+{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
- ### Available Methods
- {{KEY:AVAILABLE_METHODS}}
+### Workflow History
+{{KEY:WORKFLOW_HISTORY}}
- ## ⚠️ RULES
+### Available Methods
+{{KEY:AVAILABLE_METHODS}}
- ### Action Names
- - **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
- - **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
- - **DO NOT separate** method and action names - use the full compound name
+## ⚠️ RULES
- ### Parameter Guidelines
- - **Use exact document references** from AVAILABLE_DOCUMENTS_INDEX
- - **Use exact connection references** from AVAILABLE_CONNECTIONS_INDEX
- - **Include user language** if relevant
- - **Avoid unnecessary fields** - host applies defaults
+### Action Names
+- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
+- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
+- **DO NOT separate** method and action names - use the full compound name
- ## 📊 Required JSON Structure
+### Parameter Guidelines
+- **Use exact document references** from AVAILABLE_DOCUMENTS_INDEX
+- **Use exact connection references** from AVAILABLE_CONNECTIONS_INDEX
+- **Include user language** if relevant
+- **Avoid unnecessary fields** - host applies defaults
- ```json
- {
- "actions": [
- {
- "action": "method.action_name",
- "parameters": {},
- "resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
- "description": "What this action accomplishes",
- "userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}'"
- }
- ]
- }
- ```
+## 📊 Required JSON Structure
- ## ✅ Correct Example
+```json
+{
+ "actions": [
+ {
+ "action": "method.action_name",
+ "parameters": {},
+ "resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
+ "description": "What this action accomplishes",
+ "userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}'"
+ }
+ ]
+}
+```
- ```json
- {
- "actions": [
- {
- "action": "document.extract",
- "parameters": {"documentList": ["docList:msg_123:results"]},
- "resultLabel": "round1_task1_action1_extract_results",
- "description": "Extract data from documents",
- "userMessage": "Extracting data from documents"
- }
- ]
- }
- ```
+## ✅ Correct Example
+
+```json
+{
+ "actions": [
+ {
+ "action": "document.extract",
+ "parameters": {"documentList": ["docList:msg_123:results"]},
+ "resultLabel": "round1_task1_action1_extract_results",
+ "description": "Extract data from documents",
+ "userMessage": "Extracting data from documents"
+ }
+ ]
+}
+```
- ## 🎯 Action Planning Guidelines
+## 🎯 Action Planning Guidelines
- ### Method Selection
- - **Choose appropriate method** based on task requirements
- - **Consider available resources** (documents, connections)
- - **Match method capabilities** to task objectives
+### Method Selection
+- **Choose appropriate method** based on task requirements
+- **Consider available resources** (documents, connections)
+- **Match method capabilities** to task objectives
- ### Parameter Design
- - **Use ACTION SIGNATURE** to understand required parameters
- - **Convert objective** into appropriate parameter values
- - **Include all required parameters** for the action
+### Parameter Design
+- **Use ACTION SIGNATURE** to understand required parameters
+- **Convert objective** into appropriate parameter values
+- **Include all required parameters** for the action
- ### Result Labeling
- - **Use descriptive labels** that explain what the action produces
- - **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
- - **Make labels meaningful** for future reference
+### Result Labeling
+- **Use descriptive labels** that explain what the action produces
+- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
+- **Make labels meaningful** for future reference
- ### User Messages
- - **Write in user language:** '{{KEY:USER_LANGUAGE}}'
- - **Explain what's happening** in user-friendly terms
- - **Keep messages concise** but informative
+### User Messages
+- **Write in user language:** '{{KEY:USER_LANGUAGE}}'
+- **Explain what's happening** in user-friendly terms
+- **Keep messages concise** but informative
- ## 🚀 Response Format
- Return ONLY the JSON object."""
+## 🚀 Response Format
+Return ONLY the JSON object with complete action objects. If you cannot complete the full response, ensure each action object is complete and valid.
+LOOP_INSTRUCTION
+"""
return PromptBundle(prompt=template, placeholders=placeholders)
diff --git a/modules/workflows/processing/shared/promptGenerationTaskplan.py b/modules/workflows/processing/shared/promptGenerationTaskplan.py
index e8d1ca77..f5f09960 100644
--- a/modules/workflows/processing/shared/promptGenerationTaskplan.py
+++ b/modules/workflows/processing/shared/promptGenerationTaskplan.py
@@ -129,6 +129,7 @@ Break down user requests into logical, executable task steps.
- **High**: Complex strategic tasks (6+ actions)
## 🚀 Response Format
-Return ONLY the JSON object."""
-
+Return ONLY the JSON object with complete task objects. If you cannot complete the full response, ensure each task object is complete and valid.
+LOOP_INSTRUCTION
+"""
return PromptBundle(prompt=template, placeholders=placeholders)
diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py
index 1dfebd77..f5f65060 100644
--- a/modules/workflows/workflowManager.py
+++ b/modules/workflows/workflowManager.py
@@ -220,7 +220,7 @@ class WorkflowManager:
)
# Call AI analyzer
- aiResponse = await self.services.ai.coreAi.callAiPlanning(prompt=analyzerPrompt, placeholders=None, options=None)
+ aiResponse = await self.services.ai.callAiPlanning(prompt=analyzerPrompt, placeholders=None, options=None)
detectedLanguage = None
normalizedRequest = None