cleanup prompts and placeholders

This commit is contained in:
ValueOn AG 2025-10-05 15:01:22 +02:00
parent 9b8510bfd0
commit b44c46be23
13 changed files with 461 additions and 688 deletions

View file

@ -409,3 +409,38 @@ register_model_labels(
# Resolve forward references
from modules.datamodels.datamodelChat import ChatWorkflow
TaskContext.update_forward_refs()
# -----------------------------
# Prompt helper datamodels
# -----------------------------
class PromptPlaceholder(BaseModel, ModelMixin):
label: str
content: str
summaryAllowed: bool = Field(default=False, description="Whether host may summarize content before sending to AI")
register_model_labels(
"PromptPlaceholder",
{"en": "Prompt Placeholder", "fr": "Espace réservé d'invite"},
{
"label": {"en": "Label", "fr": "Libellé"},
"content": {"en": "Content", "fr": "Contenu"},
"summaryAllowed": {"en": "Summary Allowed", "fr": "Résumé autorisé"},
},
)
class PromptBundle(BaseModel, ModelMixin):
prompt: str
placeholders: List[PromptPlaceholder] = Field(default_factory=list)
register_model_labels(
"PromptBundle",
{"en": "Prompt Bundle", "fr": "Lot d'invite"},
{
"prompt": {"en": "Prompt", "fr": "Invite"},
"placeholders": {"en": "Placeholders", "fr": "Espaces réservés"},
},
)

View file

@ -41,7 +41,8 @@ class Services:
def __init__(self, user: User, workflow: ChatWorkflow = None):
self.user: User = user
self.workflow: ChatWorkflow = workflow
self.currentUserPrompt: str = "" # Store the current user prompt for easy access
self.currentUserPrompt: str = "" # Cleaned/normalized user intent for the current round
self.rawUserPrompt: str = "" # Original raw user message for the current round
# Initialize interfaces

View file

@ -1,5 +1,6 @@
import logging
from typing import Dict, Any, List, Optional, Tuple, Union
from modules.datamodels.datamodelWorkflow import PromptPlaceholder
from modules.datamodels.datamodelChat import ChatDocument
from modules.services.serviceExtraction.mainServiceExtraction import ExtractionService
@ -704,7 +705,7 @@ class AiService:
self,
prompt: str,
documents: Optional[List[ChatDocument]] = None,
placeholders: Optional[Dict[str, str]] = None,
placeholders: Optional[List[PromptPlaceholder]] = None,
options: Optional[AiCallOptions] = None
) -> str:
"""
@ -713,7 +714,7 @@ class AiService:
Args:
prompt: The main prompt for the AI call
documents: Optional list of documents to process
placeholders: Optional dictionary of placeholder replacements for planning calls
placeholders: Optional list of placeholder replacements for planning calls
options: AI call configuration options
Returns:
@ -728,12 +729,19 @@ class AiService:
if options is None:
options = AiCallOptions()
# Normalize placeholders from List[PromptPlaceholder]
placeholders_dict: Dict[str, str] = {}
placeholders_meta: Dict[str, bool] = {}
if placeholders:
placeholders_dict = {p.label: p.content for p in placeholders}
placeholders_meta = {p.label: bool(getattr(p, 'summaryAllowed', False)) for p in placeholders}
# Auto-determine call type based on documents and operation type
call_type = self._determineCallType(documents, options.operationType)
options.callType = call_type
if call_type == "planning":
return await self._callAiPlanning(prompt, placeholders, options)
return await self._callAiPlanning(prompt, placeholders_dict, placeholders_meta, options)
else:
# Set processDocumentsIndividually from the legacy parameter if not set in options
if options.processDocumentsIndividually is None and documents:
@ -758,6 +766,7 @@ class AiService:
self,
prompt: str,
placeholders: Optional[Dict[str, str]],
placeholdersMeta: Optional[Dict[str, bool]],
options: AiCallOptions
) -> str:
"""
@ -766,8 +775,67 @@ class AiService:
# Ensure aiObjects is initialized
await self._ensureAiObjectsInitialized()
# Build full prompt with placeholders
full_prompt = self._buildPromptWithPlaceholders(prompt, placeholders)
# Build full prompt with placeholders; if too large, summarize summaryAllowed placeholders proportionally
effective_placeholders = placeholders or {}
full_prompt = self._buildPromptWithPlaceholders(prompt, effective_placeholders)
if options.compressPrompt and placeholdersMeta:
# Determine model capacity
try:
caps = self._getModelCapabilitiesForContent(full_prompt, None, options)
max_bytes = caps.get("maxContextBytes", len(full_prompt.encode("utf-8")))
except Exception:
max_bytes = len(full_prompt.encode("utf-8"))
current_bytes = len(full_prompt.encode("utf-8"))
if current_bytes > max_bytes:
# Compute total bytes contributed by allowed placeholders (approximate by content length)
allowed_labels = [l for l, allow in placeholdersMeta.items() if allow]
allowed_sizes = {l: len((effective_placeholders.get(l) or "").encode("utf-8")) for l in allowed_labels}
total_allowed = sum(allowed_sizes.values())
overage = current_bytes - max_bytes
if total_allowed > 0 and overage > 0:
# Target total for allowed after reduction
target_allowed = max(total_allowed - overage, 0)
# Global ratio to apply across allowed placeholders
ratio = target_allowed / total_allowed if total_allowed > 0 else 1.0
ratio = max(0.0, min(1.0, ratio))
reduced: Dict[str, str] = {}
for label, content in effective_placeholders.items():
if label in allowed_labels and isinstance(content, str) and len(content) > 0:
old_len = len(content)
# Reduce by proportional ratio on characters (fallback if empty)
reduction_factor = ratio if old_len > 0 else 1.0
reduced[label] = self._reduceText(content, reduction_factor)
else:
reduced[label] = content
effective_placeholders = reduced
full_prompt = self._buildPromptWithPlaceholders(prompt, effective_placeholders)
# If still slightly over, perform a second-pass fine adjustment with updated ratio
current_bytes = len(full_prompt.encode("utf-8"))
if current_bytes > max_bytes and total_allowed > 0:
overage2 = current_bytes - max_bytes
# Recompute allowed sizes after first reduction
allowed_sizes2 = {l: len((effective_placeholders.get(l) or "").encode("utf-8")) for l in allowed_labels}
total_allowed2 = sum(allowed_sizes2.values())
if total_allowed2 > 0 and overage2 > 0:
target_allowed2 = max(total_allowed2 - overage2, 0)
ratio2 = target_allowed2 / total_allowed2
ratio2 = max(0.0, min(1.0, ratio2))
reduced2: Dict[str, str] = {}
for label, content in effective_placeholders.items():
if label in allowed_labels and isinstance(content, str) and len(content) > 0:
old_len = len(content)
reduction_factor = ratio2 if old_len > 0 else 1.0
reduced2[label] = self._reduceText(content, reduction_factor)
else:
reduced2[label] = content
effective_placeholders = reduced2
full_prompt = self._buildPromptWithPlaceholders(prompt, effective_placeholders)
# Make AI call using AiObjects (let it handle model selection)

View file

@ -303,7 +303,7 @@ class MethodOutlook(MethodBase):
WORKFLOW POSITION: Use for email analysis, before composing responses
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
folder (str, optional): Email folder to read from (default: "Inbox")
limit (int, optional): Maximum number of emails to read (default: 10)
filter (str, optional): Filter criteria for emails. Supports: Email address (e.g., "user@domain.com") - filters by sender, Search queries (e.g., "from:user@domain.com", "subject:meeting"), Text content (e.g., "project update") - searches in subject
@ -462,7 +462,7 @@ class MethodOutlook(MethodBase):
WORKFLOW POSITION: Use for finding specific emails, before reading or responding
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
query (str): Search query
folder (str, optional): Folder to search in (default: "All")
limit (int, optional): Maximum number of results (default: 20)
@ -669,7 +669,7 @@ class MethodOutlook(MethodBase):
List email drafts in Outlook
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
folder (str, optional): Folder to search for drafts (default: "Drafts")
limit (int, optional): Maximum number of drafts to list (default: 20)
expectedDocumentFormats (list, optional): Expected document formats with extension, mimeType, description
@ -792,7 +792,7 @@ class MethodOutlook(MethodBase):
Find email drafts across all folders in Outlook
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
limit (int, optional): Maximum number of drafts to find (default: 50)
expectedDocumentFormats (list, optional): Expected document formats with extension, mimeType, description
"""
@ -929,7 +929,7 @@ class MethodOutlook(MethodBase):
Check the contents of the Drafts folder directly
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
limit (int, optional): Maximum number of drafts to check (default: 20)
expectedDocumentFormats (list, optional): Expected document formats with extension, mimeType, description
"""
@ -1052,7 +1052,7 @@ class MethodOutlook(MethodBase):
WORKFLOW POSITION: Use when you have complete email information ready to send
Parameters:
connectionReference (str): REQUIRED - Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): REQUIRED - Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
to (List[str]): REQUIRED - Email recipient addresses
subject (str): REQUIRED - Email subject line
body (str): REQUIRED - Email body content
@ -1216,7 +1216,7 @@ class MethodOutlook(MethodBase):
WORKFLOW POSITION: Use when you need AI to generate email content from available information
Parameters:
connectionReference (str): REQUIRED - Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): REQUIRED - Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
to (List[str]): REQUIRED - Email recipient addresses
context (str): REQUIRED - Context information for email composition
documentList (List[str], optional): Document references to include as context and attachments
@ -1460,7 +1460,7 @@ Make sure the email is:
Check if the current Microsoft connection has the necessary permissions for Outlook operations.
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list) to check
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list) to check
"""
try:
connectionReference = parameters.get("connectionReference")

View file

@ -454,7 +454,7 @@ class MethodSharepoint(MethodBase):
WORKFLOW POSITION: Use first to locate documents, before readDocuments or uploadDocument
Parameters:
connectionReference (str): Microsoft connection reference (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Microsoft connection reference (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
site (str, optional): Site hint (e.g., "SSS", "KM XYZ")
searchQuery (str): Search query - "budget", "folders:alpha", "files:budget", "/Documents/Project1", "namepart1 namepart2 namepart3". Use "folders:" prefix when user wants to store files or find folders
maxResults (int, optional): Max results (default: 100)
@ -834,7 +834,7 @@ class MethodSharepoint(MethodBase):
Parameters:
documentList (list): Reference(s) to the document list to read
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
pathObject (str, optional): Path object to locate documents. This can ONLY be a reference to a result from sharepoint.findDocumentPath action
pathQuery (str, optional): Path query to locate documents, only if no pathObject is provided (e.g., "/Documents/Project1", "*" for all sites)
includeMetadata (bool, optional): Whether to include metadata (default: True)
@ -1117,7 +1117,7 @@ class MethodSharepoint(MethodBase):
WORKFLOW POSITION: Use after document generation, as final storage step
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
pathObject (str, optional): Path object to locate documents. This can ONLY be a reference to a result from sharepoint.findDocumentPath action
pathQuery (str, optional): Path query to locate documents, only if no pathObject is provided (e.g., "/Documents/Project1", "*" for all sites)
documentList (list): Reference(s) to the document list to upload
@ -1477,7 +1477,7 @@ class MethodSharepoint(MethodBase):
WORKFLOW POSITION: Use for exploring SharePoint content, before findDocumentPath
Parameters:
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
connectionReference (str): Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS_INDEX list)
pathObject (str, optional): Path object to locate documents. This can ONLY be a reference to a result from sharepoint.findDocumentPath action
pathQuery (str, optional): Path query to locate documents, only if no pathObject is provided (e.g., "/Documents/Project1", "*" for all sites)
includeSubfolders (bool, optional): Whether to include subfolders (default: False)

View file

@ -14,17 +14,8 @@ from modules.datamodels.datamodelAi import AiCallOptions, OperationType, Process
from modules.workflows.processing.modes.modeBase import BaseMode
from modules.workflows.processing.shared.executionState import TaskExecutionState
from modules.workflows.processing.shared.promptGenerationActionsActionplan import (
createActionDefinitionPromptTemplate,
createResultReviewPromptTemplate
)
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt,
extractAvailableDocuments,
extractWorkflowHistory,
extractAvailableMethods,
extractUserLanguage,
extractAvailableConnections,
extractReviewContent
generateActionDefinitionPrompt,
generateResultReviewPrompt
)
logger = logging.getLogger(__name__)
@ -126,29 +117,10 @@ class ActionplanMode(BaseMode):
# Check workflow status before calling AI service
self._checkWorkflowStopped(workflow)
# Generate the action definition prompt with placeholders
actionPromptTemplate = createActionDefinitionPromptTemplate()
# Extract content for placeholders
userPrompt = extractUserPrompt(actionContext)
# Populate context.available_documents with formatted document string (like old system)
actionContext.available_documents = self.services.workflow.getAvailableDocuments(workflow)
availableDocuments = extractAvailableDocuments(actionContext)
workflowHistory = extractWorkflowHistory(self.services, actionContext)
availableMethods = extractAvailableMethods(self.services)
userLanguage = extractUserLanguage(self.services)
# Action planner also needs connections for parameter generation (like old system)
availableConnectionsStr = extractAvailableConnections(self.services)
# Create placeholders dictionary
placeholders = {
"USER_PROMPT": userPrompt,
"AVAILABLE_DOCUMENTS": availableDocuments,
"WORKFLOW_HISTORY": workflowHistory,
"AVAILABLE_METHODS": availableMethods,
"USER_LANGUAGE": userLanguage,
"AVAILABLE_CONNECTIONS": availableConnectionsStr
}
# Build prompt bundle (template + placeholders)
bundle = generateActionDefinitionPrompt(self.services, actionContext)
actionPromptTemplate = bundle.prompt
placeholders = bundle.placeholders
# Trace action planning prompt
self._writeTraceLog("Action Plan Prompt", actionPromptTemplate)
@ -165,11 +137,7 @@ class ActionplanMode(BaseMode):
maxProcessingTime=30
)
prompt = await self.services.ai.callAi(
prompt=actionPromptTemplate,
placeholders=placeholders,
options=options
)
prompt = await self.services.ai.callAi(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
# Check if AI response is valid
if not prompt:
@ -485,18 +453,10 @@ class ActionplanMode(BaseMode):
# Check workflow status before calling AI service
self._checkWorkflowStopped(workflow)
# Use placeholder-based review prompt
promptTemplate = createResultReviewPromptTemplate()
# Extract content for placeholders
userPrompt = extractUserPrompt(reviewContext)
reviewContent = extractReviewContent(reviewContext)
# Create placeholders dictionary
placeholders = {
"USER_PROMPT": userPrompt,
"REVIEW_CONTENT": reviewContent
}
# Build prompt bundle for result review
bundle = generateResultReviewPrompt(reviewContext)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
# Log result review prompt sent to AI
logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===")
@ -518,11 +478,7 @@ class ActionplanMode(BaseMode):
maxProcessingTime=30
)
response = await self.services.ai.callAi(
prompt=promptTemplate,
placeholders=placeholders,
options=options
)
response = await self.services.ai.callAi(prompt=promptTemplate, placeholders=placeholders, options=options)
# Log result review response received
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")

View file

@ -15,15 +15,12 @@ from modules.datamodels.datamodelChat import ChatWorkflow
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
from modules.workflows.processing.modes.modeBase import BaseMode
from modules.workflows.processing.shared.executionState import TaskExecutionState, shouldContinue
from modules.workflows.processing.shared.placeholderFactoryReactOnly import (
ContextAwarePlaceholders,
WorkflowPhase
)
from modules.workflows.processing.shared.promptGenerationActionsReact import (
createReactPlanSelectionPromptTemplate,
createReactParametersPromptTemplate,
createReactRefinementPromptTemplate
generateReactPlanSelectionPrompt,
generateReactParametersPrompt,
generateReactRefinementPrompt
)
from modules.workflows.processing.shared.placeholderFactory import extractReviewContent
from modules.workflows.processing.adaptive import IntentAnalyzer, ContentValidator, LearningEngine, ProgressTracker
logger = logging.getLogger(__name__)
@ -39,8 +36,7 @@ class ReactMode(BaseMode):
self.learningEngine = LearningEngine()
self.progressTracker = ProgressTracker()
self.currentIntent = None
# Initialize context-aware placeholder service
self.placeholderService = ContextAwarePlaceholders(services)
# Placeholder service no longer used; prompts are generated directly
async def generateTaskActions(self, taskStep: TaskStep, workflow: ChatWorkflow,
previousResults: List = None, enhancedContext: TaskContext = None) -> List[TaskAction]:
@ -176,13 +172,9 @@ class ReactMode(BaseMode):
async def _planSelect(self, context: TaskContext) -> Dict[str, Any]:
"""Plan: select exactly one action. Returns {"action": {method, name}}"""
promptTemplate = createReactPlanSelectionPromptTemplate()
# Use context-aware placeholders for React plan selection (minimal context)
placeholders = await self.placeholderService.getPlaceholders(
WorkflowPhase.REACT_PLAN_SELECTION,
context
)
bundle = generateReactPlanSelectionPrompt(self.services, context)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
self._writeTraceLog("React Plan Selection Prompt", promptTemplate)
self._writeTraceLog("React Plan Selection Placeholders", placeholders)
@ -230,24 +222,9 @@ class ReactMode(BaseMode):
parameters = selection['parameters']
else:
logger.info("No parameters in action selection, requesting from AI")
promptTemplate = createReactParametersPromptTemplate()
# Get action parameter description (not function signature)
from modules.workflows.processing.shared.methodDiscovery import methods, getActionParameterSignature
actionParameters = getActionParameterSignature(methodName, actionName, methods)
selectedAction = compoundActionName
# Use context-aware placeholders for React parameters (full context)
additional_data = {
"SELECTED_ACTION": selectedAction,
"ACTION_SIGNATURE": actionParameters
}
placeholders = await self.placeholderService.getPlaceholders(
WorkflowPhase.REACT_PARAMETERS,
context,
additional_data
)
bundle = generateReactParametersPrompt(self.services, context, compoundActionName)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
self._writeTraceLog("React Parameters Prompt", promptTemplate)
self._writeTraceLog("React Parameters Placeholders", placeholders)
@ -485,8 +462,6 @@ class ReactMode(BaseMode):
async def _refineDecide(self, context: TaskContext, observation: Dict[str, Any]) -> Dict[str, Any]:
"""Refine: decide continue or stop, with reason"""
promptTemplate = createReactRefinementPromptTemplate()
# Create proper ReviewContext for extractReviewContent
from modules.datamodels.datamodelWorkflow import ReviewContext
reviewContext = ReviewContext(
@ -497,14 +472,9 @@ class ReactMode(BaseMode):
workflow_id=context.workflow_id,
previous_results=[]
)
# Use context-aware placeholders for React refinement
additional_data = {"REVIEW_CONTENT": self.placeholderService._getReviewContent(reviewContext)}
placeholders = await self.placeholderService.getPlaceholders(
WorkflowPhase.RESULT_REVIEW,
context,
additional_data
)
baseReviewContent = extractReviewContent(reviewContext)
placeholders = {"REVIEW_CONTENT": baseReviewContent}
# NEW: Add content validation to review content
enhancedReviewContent = placeholders.get("REVIEW_CONTENT", "")
@ -538,9 +508,13 @@ class ReactMode(BaseMode):
# Update placeholders with enhanced review content
placeholders["REVIEW_CONTENT"] = enhancedReviewContent
bundle = generateReactRefinementPrompt(self.services, context, enhancedReviewContent)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
self._writeTraceLog("React Refinement Prompt", promptTemplate)
self._writeTraceLog("React Refinement Placeholders", placeholders)
# Centralized AI call for refinement decision (balanced analysis)
options = AiCallOptions(
operationType=OperationType.ANALYSE_CONTENT,

View file

@ -8,21 +8,24 @@ NAMING CONVENTION:
- Placeholder names are in UPPER_CASE with underscores
- Function names are in camelCase
MAPPING TABLE:
{{KEY:USER_PROMPT}} -> extractUserPrompt()
{{KEY:AVAILABLE_DOCUMENTS}} -> extractAvailableDocuments()
{{KEY:WORKFLOW_HISTORY}} -> extractWorkflowHistory()
{{KEY:AVAILABLE_METHODS}} -> extractAvailableMethods()
{{KEY:AVAILABLE_CONNECTIONS}} -> extractAvailableConnections()
{{KEY:USER_LANGUAGE}} -> extractUserLanguage()
{{KEY:REVIEW_CONTENT}} -> extractReviewContent()
{{KEY:ACTION_OBJECTIVE}} -> extractActionObjective()
{{KEY:PREVIOUS_ACTION_RESULTS}} -> extractPreviousActionResults()
{{KEY:LEARNINGS_AND_IMPROVEMENTS}} -> extractLearningsAndImprovements()
{{KEY:LATEST_REFINEMENT_FEEDBACK}} -> extractLatestRefinementFeedback()
{{KEY:SELECTED_ACTION}} -> extractSelectedAction()
{{KEY:ACTION_SIGNATURE}} -> extractActionSignature()
{{KEY:ENHANCED_DOCUMENTS}} -> extractEnhancedDocumentContext()
MAPPING TABLE (keys function) with usage [global | react | actionplan]:
{{KEY:USER_PROMPT}} -> extractUserPrompt() [global, react, actionplan]
{{KEY:USER_LANGUAGE}} -> extractUserLanguage() [react, actionplan]
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}} -> extractAvailableDocumentsSummary() [react, actionplan]
{{KEY:AVAILABLE_DOCUMENTS_INDEX}} -> extractAvailableDocumentsIndex() [react]
{{KEY:AVAILABLE_CONNECTIONS_INDEX}} -> extractAvailableConnectionsIndex() [react, actionplan]
{{KEY:AVAILABLE_CONNECTIONS_SUMMARY}} -> extractAvailableConnectionsSummary() [unused]
{{KEY:WORKFLOW_HISTORY}} -> extractWorkflowHistory() [actionplan]
{{KEY:AVAILABLE_METHODS}} -> extractAvailableMethods() [react, actionplan]
{{KEY:REVIEW_CONTENT}} -> extractReviewContent() [react, actionplan]
{{KEY:PREVIOUS_ACTION_RESULTS}} -> extractPreviousActionResults() [react]
{{KEY:LEARNINGS_AND_IMPROVEMENTS}} -> extractLearningsAndImprovements() [react]
{{KEY:LATEST_REFINEMENT_FEEDBACK}} -> extractLatestRefinementFeedback() [react]
Following placeholders are populated directly by prompt builders with according context in promptGenerationActionsReact module:
- ACTION_OBJECTIVE,
- SELECTED_ACTION,
- ACTION_SIGNATURE
"""
import json
@ -31,39 +34,31 @@ from typing import Dict, Any, List
from modules.datamodels.datamodelChat import ChatDocument
logger = logging.getLogger(__name__)
from modules.workflows.processing.shared.methodDiscovery import (
getAvailableDocuments,
getMethodsList,
methods,
discoverMethods
)
# ============================================================================
# CORE PLACEHOLDER EXTRACTION FUNCTIONS
# ============================================================================
from modules.workflows.processing.shared.methodDiscovery import (methods, discoverMethods)
def extractUserPrompt(context: Any) -> str:
"""Extract user prompt from context. Maps to {{KEY:USER_PROMPT}}"""
"""Extract user prompt from context. Maps to {{KEY:USER_PROMPT}}.
Prefer the cleaned intent stored on the services object if available via context.
Fallback to the task_step objective.
"""
try:
# Prefer services.currentUserPrompt when accessible through context
services = getattr(context, 'services', None)
if services and getattr(services, 'currentUserPrompt', None):
return services.currentUserPrompt
except Exception:
pass
if hasattr(context, 'task_step') and context.task_step:
return context.task_step.objective or 'No request specified'
return 'No request specified'
def extractAvailableDocuments(context: Any) -> str:
"""Extract available documents from context. Maps to {{KEY:AVAILABLE_DOCUMENTS}}"""
if hasattr(context, 'available_documents') and context.available_documents:
return context.available_documents
return "No documents available"
def extractWorkflowHistory(service: Any, context: Any) -> str:
"""Extract workflow history from context. Maps to {{KEY:WORKFLOW_HISTORY}}"""
if hasattr(context, 'workflow') and context.workflow:
return getPreviousRoundContext(service, context.workflow) or "No previous workflow rounds - this is the first round."
return "No previous workflow rounds - this is the first round."
def extractAvailableMethods(service: Any) -> str:
"""Extract available methods for action planning. Maps to {{KEY:AVAILABLE_METHODS}}"""
try:
@ -89,24 +84,10 @@ def extractAvailableMethods(service: Any) -> str:
logger.error(f"Error extracting available methods: {str(e)}")
return json.dumps({}, indent=2, ensure_ascii=False)
def extractUserLanguage(service: Any) -> str:
"""Extract user language from service. Maps to {{KEY:USER_LANGUAGE}}"""
return service.user.language if service and service.user else 'en'
def extractAvailableConnections(service: Any) -> str:
"""Extract available connections. Maps to {{KEY:AVAILABLE_CONNECTIONS}}"""
try:
connections = getConnectionReferenceList(service)
if connections:
return '\n'.join(f"- {conn}" for conn in connections)
return "No connections available"
except Exception as e:
logger.error(f"Error extracting available connections: {str(e)}")
return "No connections available"
def getConnectionReferenceList(services) -> List[str]:
"""Get list of available connections"""
try:
@ -128,7 +109,6 @@ def getConnectionReferenceList(services) -> List[str]:
logger.error(f"Error getting connection reference list: {str(e)}")
return []
def getPreviousRoundContext(services, context: Any) -> str:
"""Get previous round context for prompt"""
try:
@ -161,7 +141,6 @@ def getPreviousRoundContext(services, context: Any) -> str:
logger.error(f"Error getting previous round context: {str(e)}")
return "Error retrieving previous round context"
def extractReviewContent(context: Any) -> str:
"""Extract review content for result validation. Maps to {{KEY:REVIEW_CONTENT}}"""
try:
@ -234,18 +213,6 @@ def extractReviewContent(context: Any) -> str:
logger.error(f"Error extracting review content: {str(e)}")
return "No review content available"
# ============================================================================
# REACT MODE SPECIFIC PLACEHOLDERS
# ============================================================================
def extractActionObjective(context: Any, current_task: str, original_prompt: str, additional_data: Dict[str, Any] = None) -> str:
"""Extract action objective for React mode. Maps to {{KEY:ACTION_OBJECTIVE}}"""
# This is a placeholder - the actual implementation will be in placeholderFactoryReactOnly
# since it requires AI generation
return current_task or original_prompt
def extractPreviousActionResults(context: Any) -> str:
"""Extract previous action results for learning context. Maps to {{KEY:PREVIOUS_ACTION_RESULTS}}"""
try:
@ -265,7 +232,6 @@ def extractPreviousActionResults(context: Any) -> str:
logger.error(f"Error extracting previous action results: {str(e)}")
return "No previous actions executed yet"
def extractLearningsAndImprovements(context: Any) -> str:
"""Extract learnings and improvements from previous actions. Maps to {{KEY:LEARNINGS_AND_IMPROVEMENTS}}"""
try:
@ -294,7 +260,6 @@ def extractLearningsAndImprovements(context: Any) -> str:
logger.error(f"Error extracting learnings and improvements: {str(e)}")
return "No learnings available yet"
def extractLatestRefinementFeedback(context: Any) -> str:
"""Extract the latest refinement feedback. Maps to {{KEY:LATEST_REFINEMENT_FEEDBACK}}"""
try:
@ -326,308 +291,48 @@ def extractLatestRefinementFeedback(context: Any) -> str:
logger.error(f"Error extracting latest refinement feedback: {str(e)}")
return "No previous refinement feedback available"
def extractSelectedAction(additional_data: Dict[str, Any]) -> str:
"""Extract selected action from additional data. Maps to {{KEY:SELECTED_ACTION}}"""
return additional_data.get('SELECTED_ACTION', '') if additional_data else ''
def extractActionSignature(additional_data: Dict[str, Any]) -> str:
"""Extract action signature from additional data. Maps to {{KEY:ACTION_SIGNATURE}}"""
return additional_data.get('ACTION_SIGNATURE', '') if additional_data else ''
# ============================================================================
# CONTEXT-AWARE PLACEHOLDER FUNCTIONS (for React mode)
# ============================================================================
def extractMinimalDocumentContext(service: Any, context: Any) -> str:
"""Extract minimal document context (counts only) for React plan selection."""
def extractAvailableDocumentsSummary(service: Any, context: Any) -> str:
"""Summary of available documents (count only)."""
try:
if hasattr(context, 'workflow') and context.workflow:
# Get document count from workflow
documents = service.workflow.getAvailableDocuments(context.workflow)
if documents and documents != "No documents available":
# Count documents by counting docList and docItem references
doc_count = documents.count("docList:") + documents.count("docItem:")
return f"{doc_count} documents available from previous tasks"
else:
return "No documents available"
return "No documents available"
return "No documents available"
except Exception as e:
logger.error(f"Error getting minimal document context: {str(e)}")
logger.error(f"Error getting document summary: {str(e)}")
return "No documents available"
def extractFullDocumentContext(service: Any, context: Any) -> str:
"""Extract full document context with detailed references for parameter generation."""
def extractAvailableDocumentsIndex(service: Any, context: Any) -> str:
"""Index of available documents with detailed references for parameter generation."""
try:
if hasattr(context, 'workflow') and context.workflow:
return service.workflow.getAvailableDocuments(context.workflow)
return "No documents available"
except Exception as e:
logger.error(f"Error getting full document context: {str(e)}")
logger.error(f"Error getting document index: {str(e)}")
return "No documents available"
def extractMinimalConnectionContext(service: Any) -> str:
"""Extract minimal connection context (count only) for React plan selection."""
def extractAvailableConnectionsSummary(service: Any) -> str:
"""Summary of available connections (count only)."""
try:
connections = getConnectionReferenceList(service)
if connections:
return f"{len(connections)} connections available"
return "No connections available"
except Exception as e:
logger.error(f"Error getting minimal connection context: {str(e)}")
logger.error(f"Error getting connection summary: {str(e)}")
return "No connections available"
def extractFullConnectionContext(service: Any) -> str:
"""Extract full connection context with detailed references for parameter generation."""
def extractAvailableConnectionsIndex(service: Any) -> str:
"""Index of available connections with detailed references for parameter generation."""
try:
connections = getConnectionReferenceList(service)
if connections:
return '\n'.join(f"- {conn}" for conn in connections)
return "No connections available"
except Exception as e:
logger.error(f"Error getting full connection context: {str(e)}")
logger.error(f"Error getting connection index: {str(e)}")
return "No connections available"
def extractUserPromptFromService(service: Any) -> str:
"""Extract user prompt from service (clean and reliable)."""
# Get the current user prompt from services (clean and reliable)
if service and hasattr(service, 'currentUserPrompt') and service.currentUserPrompt:
return service.currentUserPrompt
# Fallback to task step objective if no current prompt found
return 'No request specified'
def extractUserLanguageFromService(service: Any) -> str:
"""Extract user language from service."""
return service.user.language if service and service.user else 'en'
# ============================================================================
# ADDITIONAL PLACEHOLDER EXTRACTION FUNCTIONS (moved from methodDiscovery.py)
# ============================================================================
def extractAvailableDocumentsFromList(context: Any) -> str:
"""Extract available documents from context list. Maps to {{KEY:AVAILABLE_DOCUMENTS}} (alternative implementation)"""
try:
if not context or not hasattr(context, 'available_documents') or not context.available_documents:
return "No documents available"
documents = context.available_documents
if not isinstance(documents, list):
return "No documents available"
docList = []
for i, doc in enumerate(documents, 1):
if isinstance(doc, ChatDocument):
docInfo = f"{i}. **{doc.fileName}**"
if hasattr(doc, 'mimeType') and doc.mimeType:
docInfo += f" ({doc.mimeType})"
if hasattr(doc, 'size') and doc.size:
docInfo += f" - {doc.size} bytes"
docList.append(docInfo)
elif isinstance(doc, dict):
docInfo = f"{i}. **{doc.get('fileName', 'Unknown')}**"
if doc.get('mimeType'):
docInfo += f" ({doc['mimeType']})"
if doc.get('size'):
docInfo += f" - {doc['size']} bytes"
docList.append(docInfo)
else:
docList.append(f"{i}. {str(doc)}")
return "\n".join(docList) if docList else "No documents available"
except Exception as e:
logger.error(f"Error getting available documents: {str(e)}")
return "Error retrieving documents"
def extractWorkflowHistoryFromMessages(services: Any, context: Any) -> str:
"""Extract workflow history from messages. Maps to {{KEY:WORKFLOW_HISTORY}} (alternative implementation)"""
try:
if not context or not hasattr(context, 'workflow_id'):
return "No workflow history available"
workflowId = context.workflow_id
if not workflowId:
return "No workflow history available"
# Get workflow messages
messages = services.interfaceDbChat.getWorkflowMessages(workflowId)
if not messages:
return "No workflow history available"
# Filter for relevant messages (last 10)
recentMessages = messages[-10:] if len(messages) > 10 else messages
historyList = []
for msg in recentMessages:
if hasattr(msg, 'role') and hasattr(msg, 'message'):
role = "User" if msg.role == "user" else "Assistant"
message = msg.message[:200] + "..." if len(msg.message) > 200 else msg.message
historyList.append(f"**{role}**: {message}")
return "\n".join(historyList) if historyList else "No workflow history available"
except Exception as e:
logger.error(f"Error getting workflow history: {str(e)}")
return "Error retrieving workflow history"
def extractAvailableMethodsFromList(services: Any) -> str:
"""Extract available methods as formatted list. Maps to {{KEY:AVAILABLE_METHODS}} (alternative implementation)"""
try:
if not methods:
discoverMethods(services)
return getMethodsList(services)
except Exception as e:
logger.error(f"Error getting available methods: {str(e)}")
return "Error retrieving available methods"
def extractUserLanguageFromServices(services: Any) -> str:
"""Extract user language from services. Maps to {{KEY:USER_LANGUAGE}} (alternative implementation)"""
try:
if hasattr(services, 'user') and hasattr(services.user, 'language'):
return services.user.language or 'en'
return 'en'
except Exception as e:
logger.error(f"Error getting user language: {str(e)}")
return 'en'
def extractReviewContentFromObservation(context: Any) -> str:
"""Extract review content from observation. Maps to {{KEY:REVIEW_CONTENT}} (alternative implementation)"""
try:
if not context or not hasattr(context, 'observation'):
return "No review content available"
observation = context.observation
if not isinstance(observation, dict):
return "No review content available"
reviewParts = []
# Add success status
if 'success' in observation:
reviewParts.append(f"Success: {observation['success']}")
# Add documents count
if 'documentsCount' in observation:
reviewParts.append(f"Documents generated: {observation['documentsCount']}")
# Add previews
if 'previews' in observation and observation['previews']:
reviewParts.append("Document previews:")
for preview in observation['previews']:
if isinstance(preview, dict):
name = preview.get('name', 'Unknown')
mimeType = preview.get('mimeType', 'Unknown')
size = preview.get('contentSize', 'Unknown size')
reviewParts.append(f" - {name} ({mimeType}) - {size}")
# Add notes
if 'notes' in observation and observation['notes']:
reviewParts.append("Notes:")
for note in observation['notes']:
reviewParts.append(f" - {note}")
return "\n".join(reviewParts) if reviewParts else "No review content available"
except Exception as e:
logger.error(f"Error getting review content: {str(e)}")
return "Error retrieving review content"
def extractEnhancedDocumentContext(services: Any) -> str:
"""Extract enhanced document context with full metadata. Maps to {{KEY:ENHANCED_DOCUMENTS}}"""
try:
# Get all documents from the current workflow
workflow = getattr(services, 'currentWorkflow', None)
if not workflow or not hasattr(workflow, 'id'):
return "No workflow context available"
# Get workflow documents from messages
if not hasattr(workflow, 'messages') or not workflow.messages:
return "No documents available"
# Collect all documents from all messages
all_documents = []
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
all_documents.extend(message.documents)
if not all_documents:
return "No documents available"
# Group documents by round/task/action for better organization
docGroups = {}
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
round_num = getattr(message, 'roundNumber', 0)
task_num = getattr(message, 'taskNumber', 0)
action_num = getattr(message, 'actionNumber', 0)
label = getattr(message, 'documentsLabel', 'results')
group_key = f"round{round_num}_task{task_num}_action{action_num}_{label}"
if group_key not in docGroups:
docGroups[group_key] = []
docGroups[group_key].extend(message.documents)
# Format documents by groups with proper docList references
docList = []
for group_key, group_docs in docGroups.items():
# Find the message that contains these documents to get the message ID
message_id = None
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
round_num = getattr(message, 'roundNumber', 0)
task_num = getattr(message, 'taskNumber', 0)
action_num = getattr(message, 'actionNumber', 0)
label = getattr(message, 'documentsLabel', 'results')
msg_group_key = f"round{round_num}_task{task_num}_action{action_num}_{label}"
if msg_group_key == group_key:
message_id = str(message.id)
break
# Generate proper docList reference
if message_id:
docListRef = f"docList:{message_id}:{group_key}"
else:
# Fallback to direct label reference
docListRef = group_key
docList.append(f"\n**{group_key}:**")
docList.append(f"Reference: {docListRef}")
for i, doc in enumerate(group_docs, 1):
if isinstance(doc, ChatDocument):
docInfo = f" {i}. **{doc.fileName}**"
if hasattr(doc, 'mimeType') and doc.mimeType:
docInfo += f" ({doc.mimeType})"
if hasattr(doc, 'size') and doc.size:
docInfo += f" - {doc.size} bytes"
if hasattr(doc, 'created') and doc.created:
docInfo += f" - Created: {doc.created}"
docList.append(docInfo)
elif isinstance(doc, dict):
docInfo = f" {i}. **{doc.get('fileName', 'Unknown')}**"
if doc.get('mimeType'):
docInfo += f" ({doc['mimeType']})"
if doc.get('size'):
docInfo += f" - {doc['size']} bytes"
if doc.get('created'):
docInfo += f" - Created: {doc['created']}"
docList.append(docInfo)
else:
docList.append(f" {i}. {str(doc)}")
return "\n".join(docList) if docList else "No documents available"
except Exception as e:
logger.error(f"Error getting enhanced document context: {str(e)}")
return "Error retrieving document context"

View file

@ -1,189 +0,0 @@
"""
Context-aware placeholder service for different workflow phases.
This module provides different levels of context based on the workflow phase.
"""
import json
import logging
from typing import Dict, Any, Optional
from enum import Enum
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPromptFromService, extractFullDocumentContext,
extractWorkflowHistory, extractUserLanguageFromService,
extractMinimalDocumentContext, extractAvailableMethods,
extractMinimalConnectionContext, extractFullConnectionContext,
extractReviewContent, extractPreviousActionResults,
extractLearningsAndImprovements, extractLatestRefinementFeedback
)
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, Priority, ProcessingMode
logger = logging.getLogger(__name__)
class WorkflowPhase(Enum):
"""Different phases of workflow execution requiring different context levels."""
TASK_PLANNING = "task_planning" # Needs full context for planning
REACT_PLAN_SELECTION = "react_plan_selection" # Needs minimal context for action selection
REACT_PARAMETERS = "react_parameters" # Needs full context for parameter generation
ACTION_PLANNING = "action_planning" # Needs full context for action planning
RESULT_REVIEW = "result_review" # Needs full context for review
class ContextAwarePlaceholders:
"""Context-aware placeholder service that provides different context levels based on workflow phase."""
def __init__(self, services):
self.services = services
async def getPlaceholders(self, phase: WorkflowPhase, context: Any, additional_data: Dict[str, Any] = None) -> Dict[str, str]:
"""
Get placeholders based on workflow phase and context.
Args:
phase: The workflow phase determining context level
context: The workflow context object
additional_data: Additional data for specific phases (e.g., selected action)
Returns:
Dictionary of placeholder key-value pairs
"""
if phase == WorkflowPhase.TASK_PLANNING:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"AVAILABLE_DOCUMENTS": extractFullDocumentContext(self.services, context),
"WORKFLOW_HISTORY": extractWorkflowHistory(self.services, context),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
}
elif phase == WorkflowPhase.REACT_PLAN_SELECTION:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"AVAILABLE_DOCUMENTS": extractMinimalDocumentContext(self.services, context),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
"AVAILABLE_METHODS": extractAvailableMethods(self.services),
"AVAILABLE_CONNECTIONS": extractMinimalConnectionContext(self.services),
}
elif phase == WorkflowPhase.REACT_PARAMETERS:
# Get both original user prompt and current task objective
original_prompt = extractUserPromptFromService(self.services)
current_task = ""
if hasattr(context, 'task_step') and context.task_step and context.task_step.objective:
current_task = context.task_step.objective
# Combine original prompt and current task for better context
combined_prompt = f"Original request: {original_prompt}"
if current_task and current_task != original_prompt:
combined_prompt += f"\n\nCurrent task: {current_task}"
# Generate intelligent action objective
action_objective = await self._generateActionObjective(context, current_task, original_prompt, additional_data)
placeholders = {
"USER_PROMPT": combined_prompt,
"ACTION_OBJECTIVE": action_objective, # AI-generated intelligent objective
"AVAILABLE_DOCUMENTS": extractFullDocumentContext(self.services, context),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
"AVAILABLE_CONNECTIONS": extractFullConnectionContext(self.services),
"PREVIOUS_ACTION_RESULTS": extractPreviousActionResults(context),
"LEARNINGS_AND_IMPROVEMENTS": extractLearningsAndImprovements(context),
"LATEST_REFINEMENT_FEEDBACK": extractLatestRefinementFeedback(context),
}
# Add additional data if provided (e.g., selected action, action signature)
if additional_data:
placeholders.update(additional_data)
return placeholders
elif phase == WorkflowPhase.ACTION_PLANNING:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"AVAILABLE_DOCUMENTS": extractFullDocumentContext(self.services, context),
"WORKFLOW_HISTORY": extractWorkflowHistory(self.services, context),
"AVAILABLE_METHODS": extractAvailableMethods(self.services),
"AVAILABLE_CONNECTIONS": extractFullConnectionContext(self.services),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
}
elif phase == WorkflowPhase.RESULT_REVIEW:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"REVIEW_CONTENT": extractReviewContent(context),
}
else:
logger.warning(f"Unknown workflow phase: {phase}")
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
}
async def _generateActionObjective(self, context: Any, current_task: str, original_prompt: str, additional_data: Dict[str, Any] = None) -> str:
"""Generate intelligent, context-aware action objective using AI."""
try:
# Get the selected action from additional_data
selected_action = additional_data.get('SELECTED_ACTION', '') if additional_data else ''
# Build context for AI objective generation
context_info = {
"original_prompt": original_prompt,
"current_task": current_task,
"selected_action": selected_action,
"available_documents": extractFullDocumentContext(self.services, context),
"available_connections": extractFullConnectionContext(self.services),
"previous_results": extractPreviousActionResults(context),
"learnings": extractLearningsAndImprovements(context),
"refinement_feedback": extractLatestRefinementFeedback(context),
"user_language": extractUserLanguageFromService(self.services)
}
# Create AI prompt for objective generation
objective_prompt = f"""Generate a specific, actionable objective for the selected action.
CONTEXT:
- Original User Request: {context_info['original_prompt']}
- Current Task: {context_info['current_task']}
- Selected Action: {context_info['selected_action']}
- Available Documents: {context_info['available_documents']}
- Available Connections: {context_info['available_connections']}
- Previous Action Results: {context_info['previous_results']}
- Learnings and Improvements: {context_info['learnings']}
- Latest Refinement Feedback: {context_info['refinement_feedback']}
- User Language: {context_info['user_language']}
REQUIREMENTS:
1. Create a SPECIFIC objective that tells the action exactly what to accomplish
2. Include relevant details about documents, connections, recipients, etc.
3. Learn from previous attempts and refinement feedback
4. Make it actionable and concrete
5. Focus on the user's actual intent, not just the task description
6. If this is a retry, incorporate learnings from previous failures
RESPONSE FORMAT:
Return ONLY the objective text, no explanations or formatting.
OBJECTIVE:"""
# Call AI to generate the objective
if self.services and hasattr(self.services, 'ai'):
options = AiCallOptions(
operationType=OperationType.ANALYSE_CONTENT,
priority=Priority.BALANCED,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingMode.ADVANCED,
maxCost=0.01,
maxProcessingTime=10
)
response = await self.services.ai.callAi(
prompt=objective_prompt,
placeholders={},
options=options
)
# Extract objective from response
if response and response.strip():
return response.strip()
# Fallback to current task if AI fails
return current_task or original_prompt
except Exception as e:
logger.error(f"Error generating action objective: {str(e)}")
# Fallback to current task
return current_task or original_prompt

View file

@ -5,13 +5,32 @@ Handles prompt templates and extraction functions for actionplan mode action han
import json
import logging
from typing import Dict, Any
from typing import Dict, Any, List
from modules.datamodels.datamodelWorkflow import PromptBundle, PromptPlaceholder
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt,
extractAvailableDocumentsSummary,
extractWorkflowHistory,
extractAvailableMethods,
extractUserLanguage,
extractAvailableConnectionsIndex,
extractReviewContent,
)
logger = logging.getLogger(__name__)
def createActionDefinitionPromptTemplate() -> str:
"""Create action definition prompt template with placeholders."""
return """# Action Definition
def generateActionDefinitionPrompt(services, context: Any) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
]
template = """# Action Definition
Generate the next action to advance toward completing the task objective.
@ -21,20 +40,20 @@ def createActionDefinitionPromptTemplate() -> str:
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
### Available Connections
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
### User Language
{{KEY:USER_LANGUAGE}}
### Workflow History
{{KEY:WORKFLOW_HISTORY}}
### Available Methods
{{KEY:AVAILABLE_METHODS}}
### Available Connections
{{KEY:AVAILABLE_CONNECTIONS}}
### User Language
{{KEY:USER_LANGUAGE}}
## ⚠️ RULES
### Action Names
@ -43,8 +62,8 @@ def createActionDefinitionPromptTemplate() -> str:
- **DO NOT separate** method and action names - use the full compound name
### Parameter Guidelines
- **Use exact document references** from AVAILABLE_DOCUMENTS
- **Use exact connection references** from AVAILABLE_CONNECTIONS
- **Use exact document references** from AVAILABLE_DOCUMENTS_INDEX
- **Use exact connection references** from AVAILABLE_CONNECTIONS_INDEX
- **Include user language** if relevant
- **Avoid unnecessary fields** - host applies defaults
@ -106,9 +125,16 @@ def createActionDefinitionPromptTemplate() -> str:
## 🚀 Response Format
Return ONLY the JSON object."""
def createResultReviewPromptTemplate() -> str:
"""Create result review prompt template with placeholders."""
return """# Result Review & Validation
return PromptBundle(prompt=template, placeholders=placeholders)
def generateResultReviewPrompt(context: Any) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="REVIEW_CONTENT", content=extractReviewContent(context), summaryAllowed=True),
]
template = """# Result Review & Validation
Review task execution outcomes and determine success, retry needs, or failure.
@ -198,7 +224,7 @@ def createResultReviewPromptTemplate() -> str:
- **Technical fixes** - Address specific technical issues
### Examples
- "Use more specific document references from AVAILABLE_DOCUMENTS"
- "Use more specific document references from AVAILABLE_DOCUMENTS_INDEX"
- "Include user language parameter for better localization"
- "Break down complex objective into smaller, focused actions"
- "Verify document references before processing"
@ -206,3 +232,5 @@ def createResultReviewPromptTemplate() -> str:
## 🚀 Response Format
Return ONLY the JSON object. Do not include any explanatory text."""
return PromptBundle(prompt=template, placeholders=placeholders)

View file

@ -3,15 +3,36 @@ React Mode Prompt Generation
Handles prompt templates for react mode action handling.
"""
def createReactPlanSelectionPromptTemplate() -> str:
"""Create action selection prompt template for React mode with minimal placeholders."""
return """Select one action to advance the task.
from typing import Any, List
from modules.datamodels.datamodelWorkflow import PromptBundle, PromptPlaceholder
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt,
extractUserLanguage,
extractAvailableMethods,
extractAvailableDocumentsSummary,
extractAvailableDocumentsIndex,
extractAvailableConnectionsIndex,
extractPreviousActionResults,
extractLearningsAndImprovements,
extractLatestRefinementFeedback,
)
from modules.workflows.processing.shared.methodDiscovery import methods, getActionParameterSignature
def generateReactPlanSelectionPrompt(services, context: Any) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
]
template = """Select one action to advance the task.
OBJECTIVE:
{{KEY:USER_PROMPT}}
AVAILABLE_DOCUMENTS:
{{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE_DOCUMENTS_SUMMARY:
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
AVAILABLE_METHODS:
{{KEY:AVAILABLE_METHODS}}
@ -28,9 +49,37 @@ def createReactPlanSelectionPromptTemplate() -> str:
4. Do NOT add explanations
"""
def createReactParametersPromptTemplate() -> str:
"""Create comprehensive action parameter prompt template for React mode with all available context."""
return """Generate parameters for this action.
return PromptBundle(prompt=template, placeholders=placeholders)
def generateReactParametersPrompt(services, context: Any, compoundActionName: str) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
# derive method/action and signature
methodName, actionName = (compoundActionName.split('.', 1) if '.' in compoundActionName else (compoundActionName, ''))
actionSignature = getActionParameterSignature(methodName, actionName, methods)
# determine action objective if available, else fall back to user prompt
actionObjective = None
if hasattr(context, 'action_objective') and context.action_objective:
actionObjective = context.action_objective
elif hasattr(context, 'task_step') and context.task_step and getattr(context.task_step, 'objective', None):
actionObjective = context.task_step.objective
else:
actionObjective = extractUserPrompt(context)
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="ACTION_OBJECTIVE", content=actionObjective, summaryAllowed=False),
PromptPlaceholder(label="ACTION_SIGNATURE", content=actionSignature, summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_INDEX", content=extractAvailableDocumentsIndex(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
PromptPlaceholder(label="PREVIOUS_ACTION_RESULTS", content=extractPreviousActionResults(context), summaryAllowed=True),
PromptPlaceholder(label="LEARNINGS_AND_IMPROVEMENTS", content=extractLearningsAndImprovements(context), summaryAllowed=True),
PromptPlaceholder(label="LATEST_REFINEMENT_FEEDBACK", content=extractLatestRefinementFeedback(context), summaryAllowed=True),
PromptPlaceholder(label="SELECTED_ACTION", content=compoundActionName, summaryAllowed=False),
]
template = """Generate parameters for this action.
ACTION_OBJECTIVE (the objective for this action to fulfill):
{{KEY:ACTION_OBJECTIVE}}
@ -38,11 +87,11 @@ def createReactParametersPromptTemplate() -> str:
ACTION_SIGNATURE (the signature of the action to generate parameters for):
{{KEY:ACTION_SIGNATURE}}
AVAILABLE_DOCUMENTS:
{{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE_DOCUMENTS_INDEX:
{{KEY:AVAILABLE_DOCUMENTS_INDEX}}
AVAILABLE_CONNECTIONS:
{{KEY:AVAILABLE_CONNECTIONS}}
AVAILABLE_CONNECTIONS_INDEX:
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
USER_REQUEST (final user prompt to deliver):
{{KEY:USER_PROMPT}}
@ -72,8 +121,8 @@ def createReactParametersPromptTemplate() -> str:
RULES:
1. Use ONLY parameter names from ACTION_SIGNATURE
2. Use exact connection references from AVAILABLE_CONNECTIONS for connectionReference parameters
3. Use exact document references from AVAILABLE_DOCUMENTS for documentList parameters
2. Use exact connection references from AVAILABLE_CONNECTIONS_INDEX for connectionReference parameters
3. Use exact document references from AVAILABLE_DOCUMENTS_INDEX for documentList parameters
4. Learn from PREVIOUS_ACTION_RESULTS and LEARNINGS_AND_IMPROVEMENTS to avoid repeating mistakes
5. Consider LATEST_REFINEMENT_FEEDBACK when generating parameters
6. Use the ACTION_OBJECTIVE to understand the specific goal for this action
@ -83,9 +132,16 @@ def createReactParametersPromptTemplate() -> str:
10. Do NOT add explanations
"""
def createReactRefinementPromptTemplate() -> str:
"""Create refinement prompt template for React mode with full context placeholders."""
return """Decide the next step based on the observation.
return PromptBundle(prompt=template, placeholders=placeholders)
def generateReactRefinementPrompt(services, context: Any, reviewContent: str) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="REVIEW_CONTENT", content=reviewContent, summaryAllowed=True),
]
template = """Decide the next step based on the observation.
OBJECTIVE:
{{KEY:USER_PROMPT}}
@ -106,3 +162,5 @@ def createReactRefinementPromptTemplate() -> str:
4. Do NOT use markdown code blocks
5. Do NOT add explanations
"""
return PromptBundle(prompt=template, placeholders=placeholders)

View file

@ -5,14 +5,26 @@ Handles prompt templates and extraction functions for task planning phase.
import json
import logging
from typing import Dict, Any
from typing import Dict, Any, List
from modules.datamodels.datamodelWorkflow import PromptBundle, PromptPlaceholder
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt,
extractAvailableDocumentsSummary,
extractWorkflowHistory,
)
logger = logging.getLogger(__name__)
def createTaskPlanningPromptTemplate() -> str:
"""Create task planning prompt template with placeholders."""
return """# Task Planning
def generateTaskPlanningPrompt(services, context: Any) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services, context), summaryAllowed=True),
]
template = """# Task Planning
Break down user requests into logical, executable task steps.
@ -22,7 +34,7 @@ def createTaskPlanningPromptTemplate() -> str:
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
### Previous Workflow Rounds
{{KEY:WORKFLOW_HISTORY}}
@ -105,3 +117,5 @@ def createTaskPlanningPromptTemplate() -> str:
## 🚀 Response Format
Return ONLY the JSON object."""
return PromptBundle(prompt=template, placeholders=placeholders)

View file

@ -155,6 +155,7 @@ class WorkflowManager:
"""Process a workflow with user input"""
try:
# Store the current user prompt in services for easy access throughout the workflow
self.services.rawUserPrompt = userInput.prompt
self.services.currentUserPrompt = userInput.prompt
self.workflowProcessor = WorkflowProcessor(self.services, workflow)
message = await self._sendFirstMessage(userInput, workflow)
@ -176,11 +177,11 @@ class WorkflowManager:
self.workflowProcessor._checkWorkflowStopped(workflow)
# Create initial message using interface
# Generate the correct documentsLabel that matches what getDocumentReferenceString will create
# For first user message, include round info in the user context label
round_num = workflow.currentRound
task_num = 0
action_num = 0
context_label = f"round{round_num}_task{task_num}_action{action_num}_context"
context_label = f"round{round_num}_usercontext"
messageData = {
"workflowId": workflow.id,
@ -215,6 +216,128 @@ class WorkflowManager:
message.documents = documents
# Update the message with documents in database
self.services.workflow.updateMessage(message.id, {"documents": [doc.to_dict() for doc in documents]})
# Analyze the user's input to extract intent and offload bulky context into documents
try:
analyzerPrompt = (
"You are an input analyzer. Split the user's message into:\n"
"1) intent: the user's core request in one concise paragraph, normalized to the user's language.\n"
"2) contextItems: supportive data to attach as separate documents if significantly larger than the intent. "
"Include large literal data blocks, long lists/tables, code/JSON blocks, quoted transcripts, CSV fragments, or detailed specs. "
"Keep URLs in the intent unless they include large pasted content.\n\n"
"Rules:\n"
"- If total content length (intent + data) is less than 10% of the model's max tokens, do not extract; "
"return an empty contextItems and keep a compact, self-contained intent.\n"
"- If content exceeds that, move bulky parts into contextItems, keeping the intent short and clear.\n"
"- Preserve critical references (URLs, filenames) in the intent.\n"
"- Normalize the intent to the detected language. If mixed-language, use the primary detected language and normalize.\n\n"
"Output JSON only (no markdown):\n"
"{\n"
" \"detectedLanguage\": \"en\",\n"
" \"intent\": \"Concise normalized request...\",\n"
" \"contextItems\": [\n"
" {\n"
" \"title\": \"User context 1\",\n"
" \"mimeType\": \"text/plain\",\n"
" \"content\": \"Full extracted content block here\"\n"
" }\n"
" ]\n"
"}\n\n"
f"User message:\n{userInput.prompt}"
)
# Call AI analyzer
aiResponse = await self.services.ai.callAi(prompt=analyzerPrompt)
detectedLanguage = None
intentText = userInput.prompt
contextItems = []
# Parse analyzer response (JSON expected)
try:
import json
jsonStart = aiResponse.find('{') if aiResponse else -1
jsonEnd = aiResponse.rfind('}') + 1 if aiResponse else 0
if jsonStart != -1 and jsonEnd > jsonStart:
parsed = json.loads(aiResponse[jsonStart:jsonEnd])
detectedLanguage = parsed.get('detectedLanguage') or None
if parsed.get('intent'):
intentText = parsed.get('intent')
contextItems = parsed.get('contextItems') or []
except Exception:
contextItems = []
# Update services state
if detectedLanguage and isinstance(detectedLanguage, str):
self._setUserLanguage(detectedLanguage)
self.services.currentUserPrompt = intentText or userInput.prompt
# Telemetry (sizes and counts)
try:
inputSize = len(userInput.prompt.encode('utf-8')) if userInput and userInput.prompt else 0
outputSize = len(aiResponse.encode('utf-8')) if aiResponse else 0
self.services.workflow.createLog({
"workflowId": workflow.id,
"message": f"User prompt analyzed (input {inputSize} bytes, output {outputSize} bytes, items {len(contextItems)})",
"type": "info",
"status": "running",
"progress": 0
})
except Exception:
pass
# Create and attach documents for context items
if contextItems and isinstance(contextItems, list):
created_docs = []
for idx, item in enumerate(contextItems):
try:
title = item.get('title') if isinstance(item, dict) else None
mime = item.get('mimeType') if isinstance(item, dict) else None
content = item.get('content') if isinstance(item, dict) else None
if not content:
continue
fileName = (title or f"user_context_{idx+1}.txt").strip()
mimeType = (mime or "text/plain").strip()
# Create file in component storage
content_bytes = content.encode('utf-8')
file_item = self.services.interfaceDbComponent.createFile(
name=fileName,
mimeType=mimeType,
content=content_bytes
)
# Persist file data
self.services.interfaceDbComponent.createFileData(file_item.id, content_bytes)
# Collect file info
file_info = self.services.workflow.getFileInfo(file_item.id)
from modules.datamodels.datamodelChat import ChatDocument as _ChatDocument
doc = _ChatDocument(
messageId=message.id,
fileId=file_item.id,
fileName=file_info.get("fileName", fileName) if file_info else fileName,
fileSize=file_info.get("size", len(content_bytes)) if file_info else len(content_bytes),
mimeType=file_info.get("mimeType", mimeType) if file_info else mimeType
)
# Persist document record
self.services.interfaceDbChat.createDocument(doc.to_dict())
created_docs.append(doc)
except Exception:
continue
if created_docs:
# Attach to message and persist
if not message.documents:
message.documents = []
message.documents.extend(created_docs)
# Ensure label is user_context for discoverability
message.documentsLabel = context_label
self.services.workflow.updateMessage(message.id, {
"documents": [d.to_dict() for d in message.documents],
"documentsLabel": context_label
})
except Exception as e:
logger.warning(f"Prompt analysis failed or skipped: {str(e)}")
return message
else: