refactored workflow engine

This commit is contained in:
ValueOn AG 2025-10-05 11:34:40 +02:00
parent 1a8cecc50a
commit 9b8510bfd0
31 changed files with 1559 additions and 1575 deletions

View file

@ -1053,7 +1053,7 @@ class MethodOutlook(MethodBase):
Parameters:
connectionReference (str): REQUIRED - Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
to (str): REQUIRED - Email recipient address
to (List[str]): REQUIRED - Email recipient addresses
subject (str): REQUIRED - Email subject line
body (str): REQUIRED - Email body content
cc (List[str], optional): CC recipients
@ -1072,7 +1072,9 @@ class MethodOutlook(MethodBase):
if not connectionReference or not to or not subject or not body:
return ActionResult.isFailure(error="connectionReference, to, subject, and body are required")
# Convert single values to lists
# Convert single values to lists for all recipient parameters
if isinstance(to, str):
to = [to]
if isinstance(cc, str):
cc = [cc]
if isinstance(bcc, str):
@ -1215,7 +1217,7 @@ class MethodOutlook(MethodBase):
Parameters:
connectionReference (str): REQUIRED - Reference to the Microsoft connection (must be a connection label from AVAILABLE_CONNECTIONS list)
to (str): REQUIRED - Email recipient address
to (List[str]): REQUIRED - Email recipient addresses
context (str): REQUIRED - Context information for email composition
documentList (List[str], optional): Document references to include as context and attachments
cc (List[str], optional): CC recipients
@ -1236,7 +1238,9 @@ class MethodOutlook(MethodBase):
if not connectionReference or not to or not context:
return ActionResult.isFailure(error="connectionReference, to, and context are required")
# Convert single values to lists
# Convert single values to lists for all recipient parameters
if isinstance(to, str):
to = [to]
if isinstance(cc, str):
cc = [cc]
if isinstance(bcc, str):

View file

@ -5,7 +5,7 @@ import logging
from typing import Dict, Any, List
from modules.datamodels.datamodelWorkflow import ActionResult, TaskAction, TaskStep
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.workflows.processing.shared.promptFactory import methods
from modules.workflows.processing.shared.methodDiscovery import methods
logger = logging.getLogger(__name__)

View file

@ -6,11 +6,11 @@ import logging
from typing import Dict, Any
from modules.datamodels.datamodelWorkflow import TaskStep, TaskContext, TaskPlan
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
from modules.workflows.processing.shared.promptFactoryPlaceholders import (
createTaskPlanningPromptTemplate,
extractUserPrompt,
extractAvailableDocuments,
extractWorkflowHistory
from modules.workflows.processing.shared.promptGenerationTaskplan import (
createTaskPlanningPromptTemplate
)
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt
)
logger = logging.getLogger(__name__)

View file

@ -1,4 +1,4 @@
# actionplanMode.py
# modeActionplan.py
# Actionplan mode implementation for workflows
import json
@ -11,16 +11,19 @@ from modules.datamodels.datamodelWorkflow import (
)
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
from modules.workflows.processing.modes.baseMode import BaseMode
from modules.workflows.processing.modes.modeBase import BaseMode
from modules.workflows.processing.shared.executionState import TaskExecutionState
from modules.workflows.processing.shared.promptFactoryPlaceholders import (
from modules.workflows.processing.shared.promptGenerationActionsActionplan import (
createActionDefinitionPromptTemplate,
createResultReviewPromptTemplate,
createResultReviewPromptTemplate
)
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt,
extractAvailableDocuments,
extractWorkflowHistory,
extractAvailableMethods,
extractUserLanguage,
extractAvailableConnections,
extractReviewContent
)
@ -135,8 +138,7 @@ class ActionplanMode(BaseMode):
availableMethods = extractAvailableMethods(self.services)
userLanguage = extractUserLanguage(self.services)
# Action planner also needs connections for parameter generation (like old system)
availableConnections = self.services.workflow.getConnectionReferenceList()
availableConnectionsStr = '\n'.join(f"- {conn}" for conn in availableConnections) if availableConnections else "No connections available"
availableConnectionsStr = extractAvailableConnections(self.services)
# Create placeholders dictionary
placeholders = {

View file

@ -1,4 +1,4 @@
# baseMode.py
# modeBase.py
# Abstract base class for workflow modes
from abc import ABC, abstractmethod

View file

@ -1,4 +1,4 @@
# reactMode.py
# modeReact.py
# React mode implementation for workflows
import json
@ -13,13 +13,13 @@ from modules.datamodels.datamodelWorkflow import (
)
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
from modules.workflows.processing.modes.baseMode import BaseMode
from modules.workflows.processing.shared.executionState import TaskExecutionState, should_continue
from modules.workflows.processing.shared.contextAwarePlaceholders import (
from modules.workflows.processing.modes.modeBase import BaseMode
from modules.workflows.processing.shared.executionState import TaskExecutionState, shouldContinue
from modules.workflows.processing.shared.placeholderFactoryReactOnly import (
ContextAwarePlaceholders,
WorkflowPhase
)
from modules.workflows.processing.shared.reactPromptTemplates import (
from modules.workflows.processing.shared.promptGenerationActionsReact import (
createReactPlanSelectionPromptTemplate,
createReactParametersPromptTemplate,
createReactRefinementPromptTemplate
@ -150,8 +150,8 @@ class ReactMode(BaseMode):
progressState = self.progressTracker.getCurrentProgress()
shouldContinue = self.progressTracker.shouldContinue(progressState, observation.get('contentValidation', {}))
if not shouldContinue or not should_continue(observation, lastReviewDict, step, state.max_steps):
logger.info(f"Stopping at step {step}: shouldContinue={shouldContinue}, should_continue={should_continue(observation, lastReviewDict, step, state.max_steps)}")
if not shouldContinue or not shouldContinue(observation, lastReviewDict, step, state.max_steps):
logger.info(f"Stopping at step {step}: shouldContinue={shouldContinue}, shouldContinue={shouldContinue(observation, lastReviewDict, step, state.max_steps)}")
break
step += 1
@ -233,25 +233,8 @@ class ReactMode(BaseMode):
promptTemplate = createReactParametersPromptTemplate()
# Get action parameter description (not function signature)
actionParameters = ""
from modules.workflows.processing.shared.promptFactory import methods
if self.services and methodName in methods:
methodInstance = methods[methodName]['instance']
if actionName in methodInstance.actions:
action_info = methodInstance.actions[actionName]
# Extract parameter descriptions from docstring
docstring = action_info.get('description', '')
paramDescriptions, paramTypes = methodInstance._extractParameterDetails(docstring)
param_list = []
for paramName, paramDesc in paramDescriptions.items():
paramType = paramTypes.get(paramName, 'Any')
if paramDesc:
param_list.append(f"- {paramName} ({paramType}): {paramDesc}")
else:
param_list.append(f"- {paramName} ({paramType})")
actionParameters = "Required parameters:\n" + "\n".join(param_list)
from modules.workflows.processing.shared.methodDiscovery import methods, getActionParameterSignature
actionParameters = getActionParameterSignature(methodName, actionName, methods)
selectedAction = compoundActionName

View file

@ -1,397 +0,0 @@
"""
Context-aware placeholder service for different workflow phases.
This module provides different levels of context based on the workflow phase.
"""
import json
import logging
from typing import Dict, Any, Optional
from enum import Enum
logger = logging.getLogger(__name__)
class WorkflowPhase(Enum):
"""Different phases of workflow execution requiring different context levels."""
TASK_PLANNING = "task_planning" # Needs full context for planning
REACT_PLAN_SELECTION = "react_plan_selection" # Needs minimal context for action selection
REACT_PARAMETERS = "react_parameters" # Needs full context for parameter generation
ACTION_PLANNING = "action_planning" # Needs full context for action planning
RESULT_REVIEW = "result_review" # Needs full context for review
class ContextAwarePlaceholders:
"""Context-aware placeholder service that provides different context levels based on workflow phase."""
def __init__(self, services):
self.services = services
async def getPlaceholders(self, phase: WorkflowPhase, context: Any, additional_data: Dict[str, Any] = None) -> Dict[str, str]:
"""
Get placeholders based on workflow phase and context.
Args:
phase: The workflow phase determining context level
context: The workflow context object
additional_data: Additional data for specific phases (e.g., selected action)
Returns:
Dictionary of placeholder key-value pairs
"""
if phase == WorkflowPhase.TASK_PLANNING:
return self._getTaskPlanningPlaceholders(context)
elif phase == WorkflowPhase.REACT_PLAN_SELECTION:
return self._getReactPlanSelectionPlaceholders(context)
elif phase == WorkflowPhase.REACT_PARAMETERS:
return await self._getReactParametersPlaceholders(context, additional_data)
elif phase == WorkflowPhase.ACTION_PLANNING:
return self._getActionPlanningPlaceholders(context)
elif phase == WorkflowPhase.RESULT_REVIEW:
return self._getResultReviewPlaceholders(context)
else:
logger.warning(f"Unknown workflow phase: {phase}")
return self._getMinimalPlaceholders(context)
def _getTaskPlanningPlaceholders(self, context: Any) -> Dict[str, str]:
"""Get full context placeholders for task planning."""
return {
"USER_PROMPT": self._extractUserPrompt(context),
"AVAILABLE_DOCUMENTS": self._getFullDocumentContext(context),
"WORKFLOW_HISTORY": self._getWorkflowHistory(context),
"USER_LANGUAGE": self._extractUserLanguage(),
}
def _getReactPlanSelectionPlaceholders(self, context: Any) -> Dict[str, str]:
"""Get minimal context placeholders for React plan selection."""
return {
"USER_PROMPT": self._extractUserPrompt(context),
"AVAILABLE_DOCUMENTS": self._getMinimalDocumentContext(context),
"USER_LANGUAGE": self._extractUserLanguage(),
"AVAILABLE_METHODS": self._getAvailableMethods(),
"AVAILABLE_CONNECTIONS": self._getMinimalConnectionContext(),
}
async def _getReactParametersPlaceholders(self, context: Any, additional_data: Dict[str, Any] = None) -> Dict[str, str]:
"""Get full context placeholders for React parameter generation."""
# Get both original user prompt and current task objective
original_prompt = self._extractUserPrompt(context)
current_task = ""
if hasattr(context, 'task_step') and context.task_step and context.task_step.objective:
current_task = context.task_step.objective
# Combine original prompt and current task for better context
combined_prompt = f"Original request: {original_prompt}"
if current_task and current_task != original_prompt:
combined_prompt += f"\n\nCurrent task: {current_task}"
# Generate intelligent action objective
action_objective = await self._generateActionObjective(context, current_task, original_prompt, additional_data)
placeholders = {
"USER_PROMPT": combined_prompt,
"ACTION_OBJECTIVE": action_objective, # AI-generated intelligent objective
"AVAILABLE_DOCUMENTS": self._getFullDocumentContext(context),
"USER_LANGUAGE": self._extractUserLanguage(),
"AVAILABLE_CONNECTIONS": self._getFullConnectionContext(),
"PREVIOUS_ACTION_RESULTS": self._getPreviousActionResults(context),
"LEARNINGS_AND_IMPROVEMENTS": self._getLearningsAndImprovements(context),
"LATEST_REFINEMENT_FEEDBACK": self._getLatestRefinementFeedback(context),
}
# Add additional data if provided (e.g., selected action, action signature)
if additional_data:
placeholders.update(additional_data)
return placeholders
def _getActionPlanningPlaceholders(self, context: Any) -> Dict[str, str]:
"""Get full context placeholders for action planning."""
return {
"USER_PROMPT": self._extractUserPrompt(context),
"AVAILABLE_DOCUMENTS": self._getFullDocumentContext(context),
"WORKFLOW_HISTORY": self._getWorkflowHistory(context),
"AVAILABLE_METHODS": self._getAvailableMethods(),
"AVAILABLE_CONNECTIONS": self._getFullConnectionContext(),
"USER_LANGUAGE": self._extractUserLanguage(),
}
def _getResultReviewPlaceholders(self, context: Any) -> Dict[str, str]:
"""Get full context placeholders for result review."""
return {
"USER_PROMPT": self._extractUserPrompt(context),
"REVIEW_CONTENT": self._getReviewContent(context),
}
def _getMinimalPlaceholders(self, context: Any) -> Dict[str, str]:
"""Get minimal placeholders as fallback."""
return {
"USER_PROMPT": self._extractUserPrompt(context),
"USER_LANGUAGE": self._extractUserLanguage(),
}
# Helper methods for extracting different context levels
def _extractUserPrompt(self, context: Any) -> str:
"""Extract user prompt from context."""
# Get the current user prompt from services (clean and reliable)
if self.services and hasattr(self.services, 'currentUserPrompt') and self.services.currentUserPrompt:
return self.services.currentUserPrompt
# Fallback to task step objective if no current prompt found
if hasattr(context, 'task_step') and context.task_step:
return context.task_step.objective or 'No request specified'
return 'No request specified'
def _extractUserLanguage(self) -> str:
"""Extract user language from service."""
return self.services.user.language if self.services and self.services.user else 'en'
def _getMinimalDocumentContext(self, context: Any) -> str:
"""Get minimal document context (counts only) for React plan selection."""
try:
if hasattr(context, 'workflow') and context.workflow:
# Get document count from workflow
documents = self.services.workflow.getAvailableDocuments(context.workflow)
if documents and documents != "No documents available":
# Count documents by counting docList and docItem references
doc_count = documents.count("docList:") + documents.count("docItem:")
return f"{doc_count} documents available from previous tasks"
else:
return "No documents available"
return "No documents available"
except Exception as e:
logger.error(f"Error getting minimal document context: {str(e)}")
return "No documents available"
def _getFullDocumentContext(self, context: Any) -> str:
"""Get full document context with detailed references for parameter generation."""
try:
if hasattr(context, 'workflow') and context.workflow:
return self.services.workflow.getAvailableDocuments(context.workflow)
return "No documents available"
except Exception as e:
logger.error(f"Error getting full document context: {str(e)}")
return "No documents available"
def _getMinimalConnectionContext(self) -> str:
"""Get minimal connection context (count only) for React plan selection."""
try:
connections = self.services.workflow.getConnectionReferenceList()
if connections:
return f"{len(connections)} connections available"
return "No connections available"
except Exception as e:
logger.error(f"Error getting minimal connection context: {str(e)}")
return "No connections available"
def _getFullConnectionContext(self) -> str:
"""Get full connection context with detailed references for parameter generation."""
try:
connections = self.services.workflow.getConnectionReferenceList()
if connections:
return '\n'.join(f"- {conn}" for conn in connections)
return "No connections available"
except Exception as e:
logger.error(f"Error getting full connection context: {str(e)}")
return "No connections available"
def _getWorkflowHistory(self, context: Any) -> str:
"""Get workflow history for task planning."""
try:
if hasattr(context, 'workflow') and context.workflow:
from modules.workflows.processing.shared.promptFactory import getPreviousRoundContext
return getPreviousRoundContext(self.services, context.workflow) or "No previous workflow rounds - this is the first round."
return "No previous workflow rounds - this is the first round."
except Exception as e:
logger.error(f"Error getting workflow history: {str(e)}")
return "No previous workflow rounds - this is the first round."
def _getAvailableMethods(self) -> str:
"""Get available methods for action selection and planning using compound action names."""
try:
from modules.workflows.processing.shared.promptFactory import methods, discoverMethods
# Get the methods dictionary
if not methods:
discoverMethods(self.services)
# Create a flat JSON format with compound action names for better AI parsing
available_actions_json = {}
for methodName, methodInfo in methods.items():
# Convert MethodAi -> ai, MethodDocument -> document, etc.
shortName = methodName.replace('Method', '').lower()
for actionName, actionInfo in methodInfo['actions'].items():
# Create compound action name: method.action
compoundActionName = f"{shortName}.{actionName}"
# Get the action description
action_description = actionInfo.get('description', f"Execute {actionName} action")
available_actions_json[compoundActionName] = action_description
return json.dumps(available_actions_json, indent=2, ensure_ascii=False)
except Exception as e:
logger.error(f"Error extracting available methods: {str(e)}")
return json.dumps({}, indent=2, ensure_ascii=False)
def _getReviewContent(self, context: Any) -> str:
"""Get review content for result validation."""
try:
from modules.workflows.processing.shared.promptFactoryPlaceholders import extractReviewContent
return extractReviewContent(context)
except Exception as e:
logger.error(f"Error getting review content: {str(e)}")
return "No review content available"
def _getPreviousActionResults(self, context: Any) -> str:
"""Get previous action results for learning context."""
try:
if not hasattr(context, 'previous_action_results') or not context.previous_action_results:
return "No previous actions executed yet"
results = []
for i, result in enumerate(context.previous_action_results[-5:], 1): # Last 5 results
if hasattr(result, 'resultLabel') and hasattr(result, 'status'):
status = "SUCCESS" if result.status == "completed" else "FAILED"
results.append(f"Action {i}: {result.resultLabel} - {status}")
if hasattr(result, 'error') and result.error:
results.append(f" Error: {result.error}")
return "\n".join(results) if results else "No previous actions executed yet"
except Exception as e:
logger.error(f"Error getting previous action results: {str(e)}")
return "No previous actions executed yet"
def _getLearningsAndImprovements(self, context: Any) -> str:
"""Get learnings and improvements from previous actions."""
try:
learnings = []
# Get improvements from context
if hasattr(context, 'improvements') and context.improvements and isinstance(context.improvements, list):
learnings.append("IMPROVEMENTS:")
for improvement in context.improvements[-3:]: # Last 3 improvements
learnings.append(f"- {improvement}")
# Get failure patterns
if hasattr(context, 'failure_patterns') and context.failure_patterns and isinstance(context.failure_patterns, list):
learnings.append("FAILURE PATTERNS TO AVOID:")
for pattern in context.failure_patterns[-3:]: # Last 3 patterns
learnings.append(f"- {pattern}")
# Get successful actions
if hasattr(context, 'successful_actions') and context.successful_actions and isinstance(context.successful_actions, list):
learnings.append("SUCCESSFUL APPROACHES:")
for action in context.successful_actions[-3:]: # Last 3 successful
learnings.append(f"- {action}")
return "\n".join(learnings) if learnings else "No learnings available yet"
except Exception as e:
logger.error(f"Error getting learnings and improvements: {str(e)}")
return "No learnings available yet"
def _getLatestRefinementFeedback(self, context: Any) -> str:
"""Get the latest refinement feedback to influence next action planning."""
try:
if not hasattr(context, 'previous_review_result') or not context.previous_review_result or not isinstance(context.previous_review_result, list):
return "No previous refinement feedback available"
# Get the most recent refinement decision
latest_decision = context.previous_review_result[-1]
if not isinstance(latest_decision, dict):
return "No previous refinement feedback available"
feedback_parts = []
# Add decision and reason
decision = latest_decision.get('decision', 'unknown')
reason = latest_decision.get('reason', 'No reason provided')
feedback_parts.append(f"Latest Decision: {decision}")
feedback_parts.append(f"Reason: {reason}")
# Add any specific feedback or suggestions
if 'feedback' in latest_decision:
feedback_parts.append(f"Feedback: {latest_decision['feedback']}")
if 'suggestions' in latest_decision:
feedback_parts.append(f"Suggestions: {latest_decision['suggestions']}")
return "\n".join(feedback_parts)
except Exception as e:
logger.error(f"Error getting latest refinement feedback: {str(e)}")
return "No previous refinement feedback available"
async def _generateActionObjective(self, context: Any, current_task: str, original_prompt: str, additional_data: Dict[str, Any] = None) -> str:
"""Generate intelligent, context-aware action objective using AI."""
try:
# Get the selected action from additional_data
selected_action = additional_data.get('SELECTED_ACTION', '') if additional_data else ''
# Build context for AI objective generation
context_info = {
"original_prompt": original_prompt,
"current_task": current_task,
"selected_action": selected_action,
"available_documents": self._getFullDocumentContext(context),
"available_connections": self._getFullConnectionContext(),
"previous_results": self._getPreviousActionResults(context),
"learnings": self._getLearningsAndImprovements(context),
"refinement_feedback": self._getLatestRefinementFeedback(context),
"user_language": self._extractUserLanguage()
}
# Create AI prompt for objective generation
objective_prompt = f"""Generate a specific, actionable objective for the selected action.
CONTEXT:
- Original User Request: {context_info['original_prompt']}
- Current Task: {context_info['current_task']}
- Selected Action: {context_info['selected_action']}
- Available Documents: {context_info['available_documents']}
- Available Connections: {context_info['available_connections']}
- Previous Action Results: {context_info['previous_results']}
- Learnings and Improvements: {context_info['learnings']}
- Latest Refinement Feedback: {context_info['refinement_feedback']}
- User Language: {context_info['user_language']}
REQUIREMENTS:
1. Create a SPECIFIC objective that tells the action exactly what to accomplish
2. Include relevant details about documents, connections, recipients, etc.
3. Learn from previous attempts and refinement feedback
4. Make it actionable and concrete
5. Focus on the user's actual intent, not just the task description
6. If this is a retry, incorporate learnings from previous failures
RESPONSE FORMAT:
Return ONLY the objective text, no explanations or formatting.
OBJECTIVE:"""
# Call AI to generate the objective
if self.services and hasattr(self.services, 'ai'):
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, Priority, ProcessingMode
options = AiCallOptions(
operationType=OperationType.ANALYSE_CONTENT,
priority=Priority.BALANCED,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingMode.ADVANCED,
maxCost=0.01,
maxProcessingTime=10
)
response = await self.services.ai.callAi(
prompt=objective_prompt,
placeholders={},
options=options
)
# Extract objective from response
if response and response.strip():
return response.strip()
# Fallback to current task if AI fails
return current_task or original_prompt
except Exception as e:
logger.error(f"Error generating action objective: {str(e)}")
# Fallback to current task
return current_task or original_prompt

View file

@ -58,7 +58,7 @@ class TaskExecutionState:
patterns.append("permission_issues")
return list(set(patterns))
def should_continue(observation, review=None, current_step: int = 0, max_steps: int = 5) -> bool:
def shouldContinue(observation, review=None, current_step: int = 0, max_steps: int = 5) -> bool:
"""Helper to decide if the iterative loop should continue
- Stop if review indicates 'stop' or success criteria are met
- Stop on failure with no retry path

View file

@ -0,0 +1,130 @@
# methodDiscovery.py
# Method discovery and management for workflow execution
import json
import logging
import importlib
import pkgutil
import inspect
from typing import Any, Dict, List
from modules.datamodels.datamodelWorkflow import TaskContext, ReviewContext, DocumentExchange
from modules.workflows.methods.methodBase import MethodBase
# Set up logger
logger = logging.getLogger(__name__)
# Global methods catalog - moved from serviceCenter
methods = {}
def discoverMethods(serviceCenter):
"""Dynamically discover all method classes and their actions in modules methods package"""
try:
# Import the methods package
methodsPackage = importlib.import_module('modules.workflows.methods')
# Discover all modules in the package
for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__):
if not isPkg and name.startswith('method'):
try:
# Import the module
module = importlib.import_module(f'modules.workflows.methods.{name}')
# Find all classes in the module that inherit from MethodBase
for itemName, item in inspect.getmembers(module):
if (inspect.isclass(item) and
issubclass(item, MethodBase) and
item != MethodBase):
# Instantiate the method
methodInstance = item(serviceCenter)
# Use the actions property from MethodBase which handles @action decorator
actions = methodInstance.actions
# Create method info
methodInfo = {
'instance': methodInstance,
'actions': actions,
'description': item.__doc__ or f"Method {itemName}"
}
# Store the method with full class name
methods[itemName] = methodInfo
# Also store with short name for action executor access
shortName = itemName.replace('Method', '').lower()
methods[shortName] = methodInfo
logger.info(f"Discovered method {itemName} (short: {shortName}) with {len(actions)} actions")
except Exception as e:
logger.error(f"Error discovering method {name}: {str(e)}")
continue
logger.info(f"Discovered {len(methods)} method entries total")
except Exception as e:
logger.error(f"Error discovering methods: {str(e)}")
def getMethodsList(serviceCenter):
"""Get a list of available methods with their signatures"""
if not methods:
discoverMethods(serviceCenter)
methodsList = []
for methodName, methodInfo in methods.items():
methodDescription = methodInfo['description']
actionsList = []
for actionName, actionInfo in methodInfo['actions'].items():
actionDescription = actionInfo['description']
parameters = actionInfo['parameters']
# Build parameter signature
paramSig = []
for paramName, paramInfo in parameters.items():
paramType = paramInfo['type']
paramRequired = paramInfo['required']
paramDefault = paramInfo['default']
if paramRequired:
paramSig.append(f"{paramName}: {paramType}")
else:
defaultStr = f" = {paramDefault}" if paramDefault is not None else " = None"
paramSig.append(f"{paramName}: {paramType}{defaultStr}")
paramSignature = f"({', '.join(paramSig)})" if paramSig else "()"
actionsList.append(f"- {actionName}{paramSignature}: {actionDescription}")
actionsStr = "\n".join(actionsList)
methodsList.append(f"**{methodName}**: {methodDescription}\n{actionsStr}")
return "\n\n".join(methodsList)
def getActionParameterSignature(methodName: str, actionName: str, methods: Dict[str, Any]) -> str:
"""Get action parameter signature from method docstring for AI parameter generation"""
try:
if not methods or methodName not in methods:
return ""
methodInstance = methods[methodName]['instance']
if actionName not in methodInstance.actions:
return ""
action_info = methodInstance.actions[actionName]
# Extract parameter descriptions from docstring
docstring = action_info.get('description', '')
paramDescriptions, paramTypes = methodInstance._extractParameterDetails(docstring)
param_list = []
for paramName, paramDesc in paramDescriptions.items():
paramType = paramTypes.get(paramName, 'Any')
if paramDesc:
param_list.append(f"- {paramName} ({paramType}): {paramDesc}")
else:
param_list.append(f"- {paramName} ({paramType})")
return "Required parameters:\n" + "\n".join(param_list)
except Exception as e:
logger.error(f"Error getting action parameter signature for {methodName}.{actionName}: {str(e)}")
return ""

View file

@ -0,0 +1,633 @@
"""
Placeholder Factory
Centralized placeholder extraction functions for all workflow modes.
Each function corresponds to a {{KEY:PLACEHOLDER_NAME}} in prompt templates.
NAMING CONVENTION:
- All functions follow pattern: extract{PlaceholderName}()
- Placeholder names are in UPPER_CASE with underscores
- Function names are in camelCase
MAPPING TABLE:
{{KEY:USER_PROMPT}} -> extractUserPrompt()
{{KEY:AVAILABLE_DOCUMENTS}} -> extractAvailableDocuments()
{{KEY:WORKFLOW_HISTORY}} -> extractWorkflowHistory()
{{KEY:AVAILABLE_METHODS}} -> extractAvailableMethods()
{{KEY:AVAILABLE_CONNECTIONS}} -> extractAvailableConnections()
{{KEY:USER_LANGUAGE}} -> extractUserLanguage()
{{KEY:REVIEW_CONTENT}} -> extractReviewContent()
{{KEY:ACTION_OBJECTIVE}} -> extractActionObjective()
{{KEY:PREVIOUS_ACTION_RESULTS}} -> extractPreviousActionResults()
{{KEY:LEARNINGS_AND_IMPROVEMENTS}} -> extractLearningsAndImprovements()
{{KEY:LATEST_REFINEMENT_FEEDBACK}} -> extractLatestRefinementFeedback()
{{KEY:SELECTED_ACTION}} -> extractSelectedAction()
{{KEY:ACTION_SIGNATURE}} -> extractActionSignature()
{{KEY:ENHANCED_DOCUMENTS}} -> extractEnhancedDocumentContext()
"""
import json
import logging
from typing import Dict, Any, List
from modules.datamodels.datamodelChat import ChatDocument
logger = logging.getLogger(__name__)
from modules.workflows.processing.shared.methodDiscovery import (
getAvailableDocuments,
getMethodsList,
methods,
discoverMethods
)
# ============================================================================
# CORE PLACEHOLDER EXTRACTION FUNCTIONS
# ============================================================================
def extractUserPrompt(context: Any) -> str:
"""Extract user prompt from context. Maps to {{KEY:USER_PROMPT}}"""
if hasattr(context, 'task_step') and context.task_step:
return context.task_step.objective or 'No request specified'
return 'No request specified'
def extractAvailableDocuments(context: Any) -> str:
"""Extract available documents from context. Maps to {{KEY:AVAILABLE_DOCUMENTS}}"""
if hasattr(context, 'available_documents') and context.available_documents:
return context.available_documents
return "No documents available"
def extractWorkflowHistory(service: Any, context: Any) -> str:
"""Extract workflow history from context. Maps to {{KEY:WORKFLOW_HISTORY}}"""
if hasattr(context, 'workflow') and context.workflow:
return getPreviousRoundContext(service, context.workflow) or "No previous workflow rounds - this is the first round."
return "No previous workflow rounds - this is the first round."
def extractAvailableMethods(service: Any) -> str:
"""Extract available methods for action planning. Maps to {{KEY:AVAILABLE_METHODS}}"""
try:
# Get the methods dictionary directly from the global methods variable
if not methods:
discoverMethods(service)
# Create a flat JSON format with compound action names for better AI parsing
available_actions_json = {}
for methodName, methodInfo in methods.items():
# Convert MethodAi -> ai, MethodDocument -> document, etc.
shortName = methodName.replace('Method', '').lower()
for actionName, actionInfo in methodInfo['actions'].items():
# Create compound action name: method.action
compoundActionName = f"{shortName}.{actionName}"
# Get the action description
action_description = actionInfo.get('description', f"Execute {actionName} action")
available_actions_json[compoundActionName] = action_description
return json.dumps(available_actions_json, indent=2, ensure_ascii=False)
except Exception as e:
logger.error(f"Error extracting available methods: {str(e)}")
return json.dumps({}, indent=2, ensure_ascii=False)
def extractUserLanguage(service: Any) -> str:
"""Extract user language from service. Maps to {{KEY:USER_LANGUAGE}}"""
return service.user.language if service and service.user else 'en'
def extractAvailableConnections(service: Any) -> str:
"""Extract available connections. Maps to {{KEY:AVAILABLE_CONNECTIONS}}"""
try:
connections = getConnectionReferenceList(service)
if connections:
return '\n'.join(f"- {conn}" for conn in connections)
return "No connections available"
except Exception as e:
logger.error(f"Error extracting available connections: {str(e)}")
return "No connections available"
def getConnectionReferenceList(services) -> List[str]:
"""Get list of available connections"""
try:
# Get connections from the database
if hasattr(services, 'interfaceDbApp') and hasattr(services, 'user'):
userId = services.user.id
connections = services.interfaceDbApp.getUserConnections(userId)
if connections:
# Format connections as reference strings
connectionRefs = []
for conn in connections:
# Create reference string in format: conn_{authority}_{id}
ref = f"conn_{conn.authority.value}_{conn.id}"
connectionRefs.append(ref)
return connectionRefs
return []
except Exception as e:
logger.error(f"Error getting connection reference list: {str(e)}")
return []
def getPreviousRoundContext(services, context: Any) -> str:
"""Get previous round context for prompt"""
try:
if not context or not hasattr(context, 'workflow_id'):
return "No previous round context available"
workflowId = context.workflow_id
if not workflowId:
return "No previous round context available"
# Get previous round results
previousResults = getattr(context, 'previous_results', [])
if not previousResults:
return "No previous round context available"
contextList = []
for i, result in enumerate(previousResults, 1):
if hasattr(result, 'success') and hasattr(result, 'resultLabel'):
status = "Success" if result.success else "Failed"
contextList.append(f"{i}. {result.resultLabel} - {status}")
elif isinstance(result, dict):
status = "Success" if result.get('success', False) else "Failed"
label = result.get('resultLabel', 'Unknown')
contextList.append(f"{i}. {label} - {status}")
else:
contextList.append(f"{i}. {str(result)}")
return "\n".join(contextList) if contextList else "No previous round context available"
except Exception as e:
logger.error(f"Error getting previous round context: {str(e)}")
return "Error retrieving previous round context"
def extractReviewContent(context: Any) -> str:
"""Extract review content for result validation. Maps to {{KEY:REVIEW_CONTENT}}"""
try:
if hasattr(context, 'action_results') and context.action_results:
# Build result summary
result_summary = ""
for i, result in enumerate(context.action_results):
result_summary += f"\nRESULT {i+1}:\n"
result_summary += f" Success: {result.success}\n"
if result.error:
result_summary += f" Error: {result.error}\n"
if result.documents:
result_summary += f" Documents: {len(result.documents)} document(s)\n"
for doc in result.documents:
# Extract all available metadata without content
doc_metadata = {
"name": getattr(doc, 'documentName', 'Unknown'),
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
"size": getattr(doc, 'size', 'Unknown'),
"created": getattr(doc, 'created', 'Unknown'),
"modified": getattr(doc, 'modified', 'Unknown'),
"typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
"documentId": getattr(doc, 'documentId', 'Unknown'),
"reference": getattr(doc, 'reference', 'Unknown')
}
# Remove 'Unknown' values to keep it clean
doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
result_summary += f" - {json.dumps(doc_metadata, indent=6, ensure_ascii=False)}\n"
else:
result_summary += f" Documents: None\n"
return result_summary
elif hasattr(context, 'observation') and context.observation:
# For observation data, show full content but handle documents specially
if isinstance(context.observation, dict):
# Create a copy to modify
obs_copy = context.observation.copy()
# If there are previews with documents, show only metadata
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
for preview in obs_copy['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
# Replace snippet with metadata indicator
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
else:
return json.dumps(context.observation, ensure_ascii=False)
elif hasattr(context, 'step_result') and context.step_result and 'observation' in context.step_result:
# For observation data in step_result, show full content but handle documents specially
observation = context.step_result['observation']
if isinstance(observation, dict):
# Create a copy to modify
obs_copy = observation.copy()
# If there are previews with documents, show only metadata
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
for preview in obs_copy['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
# Replace snippet with metadata indicator
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
else:
return json.dumps(observation, ensure_ascii=False)
else:
return "No review content available"
except Exception as e:
logger.error(f"Error extracting review content: {str(e)}")
return "No review content available"
# ============================================================================
# REACT MODE SPECIFIC PLACEHOLDERS
# ============================================================================
def extractActionObjective(context: Any, current_task: str, original_prompt: str, additional_data: Dict[str, Any] = None) -> str:
"""Extract action objective for React mode. Maps to {{KEY:ACTION_OBJECTIVE}}"""
# This is a placeholder - the actual implementation will be in placeholderFactoryReactOnly
# since it requires AI generation
return current_task or original_prompt
def extractPreviousActionResults(context: Any) -> str:
"""Extract previous action results for learning context. Maps to {{KEY:PREVIOUS_ACTION_RESULTS}}"""
try:
if not hasattr(context, 'previous_action_results') or not context.previous_action_results:
return "No previous actions executed yet"
results = []
for i, result in enumerate(context.previous_action_results[-5:], 1): # Last 5 results
if hasattr(result, 'resultLabel') and hasattr(result, 'status'):
status = "SUCCESS" if result.status == "completed" else "FAILED"
results.append(f"Action {i}: {result.resultLabel} - {status}")
if hasattr(result, 'error') and result.error:
results.append(f" Error: {result.error}")
return "\n".join(results) if results else "No previous actions executed yet"
except Exception as e:
logger.error(f"Error extracting previous action results: {str(e)}")
return "No previous actions executed yet"
def extractLearningsAndImprovements(context: Any) -> str:
"""Extract learnings and improvements from previous actions. Maps to {{KEY:LEARNINGS_AND_IMPROVEMENTS}}"""
try:
learnings = []
# Get improvements from context
if hasattr(context, 'improvements') and context.improvements and isinstance(context.improvements, list):
learnings.append("IMPROVEMENTS:")
for improvement in context.improvements[-3:]: # Last 3 improvements
learnings.append(f"- {improvement}")
# Get failure patterns
if hasattr(context, 'failure_patterns') and context.failure_patterns and isinstance(context.failure_patterns, list):
learnings.append("FAILURE PATTERNS TO AVOID:")
for pattern in context.failure_patterns[-3:]: # Last 3 patterns
learnings.append(f"- {pattern}")
# Get successful actions
if hasattr(context, 'successful_actions') and context.successful_actions and isinstance(context.successful_actions, list):
learnings.append("SUCCESSFUL APPROACHES:")
for action in context.successful_actions[-3:]: # Last 3 successful
learnings.append(f"- {action}")
return "\n".join(learnings) if learnings else "No learnings available yet"
except Exception as e:
logger.error(f"Error extracting learnings and improvements: {str(e)}")
return "No learnings available yet"
def extractLatestRefinementFeedback(context: Any) -> str:
"""Extract the latest refinement feedback. Maps to {{KEY:LATEST_REFINEMENT_FEEDBACK}}"""
try:
if not hasattr(context, 'previous_review_result') or not context.previous_review_result or not isinstance(context.previous_review_result, list):
return "No previous refinement feedback available"
# Get the most recent refinement decision
latest_decision = context.previous_review_result[-1]
if not isinstance(latest_decision, dict):
return "No previous refinement feedback available"
feedback_parts = []
# Add decision and reason
decision = latest_decision.get('decision', 'unknown')
reason = latest_decision.get('reason', 'No reason provided')
feedback_parts.append(f"Latest Decision: {decision}")
feedback_parts.append(f"Reason: {reason}")
# Add any specific feedback or suggestions
if 'feedback' in latest_decision:
feedback_parts.append(f"Feedback: {latest_decision['feedback']}")
if 'suggestions' in latest_decision:
feedback_parts.append(f"Suggestions: {latest_decision['suggestions']}")
return "\n".join(feedback_parts)
except Exception as e:
logger.error(f"Error extracting latest refinement feedback: {str(e)}")
return "No previous refinement feedback available"
def extractSelectedAction(additional_data: Dict[str, Any]) -> str:
"""Extract selected action from additional data. Maps to {{KEY:SELECTED_ACTION}}"""
return additional_data.get('SELECTED_ACTION', '') if additional_data else ''
def extractActionSignature(additional_data: Dict[str, Any]) -> str:
"""Extract action signature from additional data. Maps to {{KEY:ACTION_SIGNATURE}}"""
return additional_data.get('ACTION_SIGNATURE', '') if additional_data else ''
# ============================================================================
# CONTEXT-AWARE PLACEHOLDER FUNCTIONS (for React mode)
# ============================================================================
def extractMinimalDocumentContext(service: Any, context: Any) -> str:
"""Extract minimal document context (counts only) for React plan selection."""
try:
if hasattr(context, 'workflow') and context.workflow:
# Get document count from workflow
documents = service.workflow.getAvailableDocuments(context.workflow)
if documents and documents != "No documents available":
# Count documents by counting docList and docItem references
doc_count = documents.count("docList:") + documents.count("docItem:")
return f"{doc_count} documents available from previous tasks"
else:
return "No documents available"
return "No documents available"
except Exception as e:
logger.error(f"Error getting minimal document context: {str(e)}")
return "No documents available"
def extractFullDocumentContext(service: Any, context: Any) -> str:
"""Extract full document context with detailed references for parameter generation."""
try:
if hasattr(context, 'workflow') and context.workflow:
return service.workflow.getAvailableDocuments(context.workflow)
return "No documents available"
except Exception as e:
logger.error(f"Error getting full document context: {str(e)}")
return "No documents available"
def extractMinimalConnectionContext(service: Any) -> str:
"""Extract minimal connection context (count only) for React plan selection."""
try:
connections = getConnectionReferenceList(service)
if connections:
return f"{len(connections)} connections available"
return "No connections available"
except Exception as e:
logger.error(f"Error getting minimal connection context: {str(e)}")
return "No connections available"
def extractFullConnectionContext(service: Any) -> str:
"""Extract full connection context with detailed references for parameter generation."""
try:
connections = getConnectionReferenceList(service)
if connections:
return '\n'.join(f"- {conn}" for conn in connections)
return "No connections available"
except Exception as e:
logger.error(f"Error getting full connection context: {str(e)}")
return "No connections available"
def extractUserPromptFromService(service: Any) -> str:
"""Extract user prompt from service (clean and reliable)."""
# Get the current user prompt from services (clean and reliable)
if service and hasattr(service, 'currentUserPrompt') and service.currentUserPrompt:
return service.currentUserPrompt
# Fallback to task step objective if no current prompt found
return 'No request specified'
def extractUserLanguageFromService(service: Any) -> str:
"""Extract user language from service."""
return service.user.language if service and service.user else 'en'
# ============================================================================
# ADDITIONAL PLACEHOLDER EXTRACTION FUNCTIONS (moved from methodDiscovery.py)
# ============================================================================
def extractAvailableDocumentsFromList(context: Any) -> str:
"""Extract available documents from context list. Maps to {{KEY:AVAILABLE_DOCUMENTS}} (alternative implementation)"""
try:
if not context or not hasattr(context, 'available_documents') or not context.available_documents:
return "No documents available"
documents = context.available_documents
if not isinstance(documents, list):
return "No documents available"
docList = []
for i, doc in enumerate(documents, 1):
if isinstance(doc, ChatDocument):
docInfo = f"{i}. **{doc.fileName}**"
if hasattr(doc, 'mimeType') and doc.mimeType:
docInfo += f" ({doc.mimeType})"
if hasattr(doc, 'size') and doc.size:
docInfo += f" - {doc.size} bytes"
docList.append(docInfo)
elif isinstance(doc, dict):
docInfo = f"{i}. **{doc.get('fileName', 'Unknown')}**"
if doc.get('mimeType'):
docInfo += f" ({doc['mimeType']})"
if doc.get('size'):
docInfo += f" - {doc['size']} bytes"
docList.append(docInfo)
else:
docList.append(f"{i}. {str(doc)}")
return "\n".join(docList) if docList else "No documents available"
except Exception as e:
logger.error(f"Error getting available documents: {str(e)}")
return "Error retrieving documents"
def extractWorkflowHistoryFromMessages(services: Any, context: Any) -> str:
"""Extract workflow history from messages. Maps to {{KEY:WORKFLOW_HISTORY}} (alternative implementation)"""
try:
if not context or not hasattr(context, 'workflow_id'):
return "No workflow history available"
workflowId = context.workflow_id
if not workflowId:
return "No workflow history available"
# Get workflow messages
messages = services.interfaceDbChat.getWorkflowMessages(workflowId)
if not messages:
return "No workflow history available"
# Filter for relevant messages (last 10)
recentMessages = messages[-10:] if len(messages) > 10 else messages
historyList = []
for msg in recentMessages:
if hasattr(msg, 'role') and hasattr(msg, 'message'):
role = "User" if msg.role == "user" else "Assistant"
message = msg.message[:200] + "..." if len(msg.message) > 200 else msg.message
historyList.append(f"**{role}**: {message}")
return "\n".join(historyList) if historyList else "No workflow history available"
except Exception as e:
logger.error(f"Error getting workflow history: {str(e)}")
return "Error retrieving workflow history"
def extractAvailableMethodsFromList(services: Any) -> str:
"""Extract available methods as formatted list. Maps to {{KEY:AVAILABLE_METHODS}} (alternative implementation)"""
try:
if not methods:
discoverMethods(services)
return getMethodsList(services)
except Exception as e:
logger.error(f"Error getting available methods: {str(e)}")
return "Error retrieving available methods"
def extractUserLanguageFromServices(services: Any) -> str:
"""Extract user language from services. Maps to {{KEY:USER_LANGUAGE}} (alternative implementation)"""
try:
if hasattr(services, 'user') and hasattr(services.user, 'language'):
return services.user.language or 'en'
return 'en'
except Exception as e:
logger.error(f"Error getting user language: {str(e)}")
return 'en'
def extractReviewContentFromObservation(context: Any) -> str:
"""Extract review content from observation. Maps to {{KEY:REVIEW_CONTENT}} (alternative implementation)"""
try:
if not context or not hasattr(context, 'observation'):
return "No review content available"
observation = context.observation
if not isinstance(observation, dict):
return "No review content available"
reviewParts = []
# Add success status
if 'success' in observation:
reviewParts.append(f"Success: {observation['success']}")
# Add documents count
if 'documentsCount' in observation:
reviewParts.append(f"Documents generated: {observation['documentsCount']}")
# Add previews
if 'previews' in observation and observation['previews']:
reviewParts.append("Document previews:")
for preview in observation['previews']:
if isinstance(preview, dict):
name = preview.get('name', 'Unknown')
mimeType = preview.get('mimeType', 'Unknown')
size = preview.get('contentSize', 'Unknown size')
reviewParts.append(f" - {name} ({mimeType}) - {size}")
# Add notes
if 'notes' in observation and observation['notes']:
reviewParts.append("Notes:")
for note in observation['notes']:
reviewParts.append(f" - {note}")
return "\n".join(reviewParts) if reviewParts else "No review content available"
except Exception as e:
logger.error(f"Error getting review content: {str(e)}")
return "Error retrieving review content"
def extractEnhancedDocumentContext(services: Any) -> str:
"""Extract enhanced document context with full metadata. Maps to {{KEY:ENHANCED_DOCUMENTS}}"""
try:
# Get all documents from the current workflow
workflow = getattr(services, 'currentWorkflow', None)
if not workflow or not hasattr(workflow, 'id'):
return "No workflow context available"
# Get workflow documents from messages
if not hasattr(workflow, 'messages') or not workflow.messages:
return "No documents available"
# Collect all documents from all messages
all_documents = []
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
all_documents.extend(message.documents)
if not all_documents:
return "No documents available"
# Group documents by round/task/action for better organization
docGroups = {}
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
round_num = getattr(message, 'roundNumber', 0)
task_num = getattr(message, 'taskNumber', 0)
action_num = getattr(message, 'actionNumber', 0)
label = getattr(message, 'documentsLabel', 'results')
group_key = f"round{round_num}_task{task_num}_action{action_num}_{label}"
if group_key not in docGroups:
docGroups[group_key] = []
docGroups[group_key].extend(message.documents)
# Format documents by groups with proper docList references
docList = []
for group_key, group_docs in docGroups.items():
# Find the message that contains these documents to get the message ID
message_id = None
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
round_num = getattr(message, 'roundNumber', 0)
task_num = getattr(message, 'taskNumber', 0)
action_num = getattr(message, 'actionNumber', 0)
label = getattr(message, 'documentsLabel', 'results')
msg_group_key = f"round{round_num}_task{task_num}_action{action_num}_{label}"
if msg_group_key == group_key:
message_id = str(message.id)
break
# Generate proper docList reference
if message_id:
docListRef = f"docList:{message_id}:{group_key}"
else:
# Fallback to direct label reference
docListRef = group_key
docList.append(f"\n**{group_key}:**")
docList.append(f"Reference: {docListRef}")
for i, doc in enumerate(group_docs, 1):
if isinstance(doc, ChatDocument):
docInfo = f" {i}. **{doc.fileName}**"
if hasattr(doc, 'mimeType') and doc.mimeType:
docInfo += f" ({doc.mimeType})"
if hasattr(doc, 'size') and doc.size:
docInfo += f" - {doc.size} bytes"
if hasattr(doc, 'created') and doc.created:
docInfo += f" - Created: {doc.created}"
docList.append(docInfo)
elif isinstance(doc, dict):
docInfo = f" {i}. **{doc.get('fileName', 'Unknown')}**"
if doc.get('mimeType'):
docInfo += f" ({doc['mimeType']})"
if doc.get('size'):
docInfo += f" - {doc['size']} bytes"
if doc.get('created'):
docInfo += f" - Created: {doc['created']}"
docList.append(docInfo)
else:
docList.append(f" {i}. {str(doc)}")
return "\n".join(docList) if docList else "No documents available"
except Exception as e:
logger.error(f"Error getting enhanced document context: {str(e)}")
return "Error retrieving document context"

View file

@ -0,0 +1,189 @@
"""
Context-aware placeholder service for different workflow phases.
This module provides different levels of context based on the workflow phase.
"""
import json
import logging
from typing import Dict, Any, Optional
from enum import Enum
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPromptFromService, extractFullDocumentContext,
extractWorkflowHistory, extractUserLanguageFromService,
extractMinimalDocumentContext, extractAvailableMethods,
extractMinimalConnectionContext, extractFullConnectionContext,
extractReviewContent, extractPreviousActionResults,
extractLearningsAndImprovements, extractLatestRefinementFeedback
)
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, Priority, ProcessingMode
logger = logging.getLogger(__name__)
class WorkflowPhase(Enum):
"""Different phases of workflow execution requiring different context levels."""
TASK_PLANNING = "task_planning" # Needs full context for planning
REACT_PLAN_SELECTION = "react_plan_selection" # Needs minimal context for action selection
REACT_PARAMETERS = "react_parameters" # Needs full context for parameter generation
ACTION_PLANNING = "action_planning" # Needs full context for action planning
RESULT_REVIEW = "result_review" # Needs full context for review
class ContextAwarePlaceholders:
"""Context-aware placeholder service that provides different context levels based on workflow phase."""
def __init__(self, services):
self.services = services
async def getPlaceholders(self, phase: WorkflowPhase, context: Any, additional_data: Dict[str, Any] = None) -> Dict[str, str]:
"""
Get placeholders based on workflow phase and context.
Args:
phase: The workflow phase determining context level
context: The workflow context object
additional_data: Additional data for specific phases (e.g., selected action)
Returns:
Dictionary of placeholder key-value pairs
"""
if phase == WorkflowPhase.TASK_PLANNING:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"AVAILABLE_DOCUMENTS": extractFullDocumentContext(self.services, context),
"WORKFLOW_HISTORY": extractWorkflowHistory(self.services, context),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
}
elif phase == WorkflowPhase.REACT_PLAN_SELECTION:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"AVAILABLE_DOCUMENTS": extractMinimalDocumentContext(self.services, context),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
"AVAILABLE_METHODS": extractAvailableMethods(self.services),
"AVAILABLE_CONNECTIONS": extractMinimalConnectionContext(self.services),
}
elif phase == WorkflowPhase.REACT_PARAMETERS:
# Get both original user prompt and current task objective
original_prompt = extractUserPromptFromService(self.services)
current_task = ""
if hasattr(context, 'task_step') and context.task_step and context.task_step.objective:
current_task = context.task_step.objective
# Combine original prompt and current task for better context
combined_prompt = f"Original request: {original_prompt}"
if current_task and current_task != original_prompt:
combined_prompt += f"\n\nCurrent task: {current_task}"
# Generate intelligent action objective
action_objective = await self._generateActionObjective(context, current_task, original_prompt, additional_data)
placeholders = {
"USER_PROMPT": combined_prompt,
"ACTION_OBJECTIVE": action_objective, # AI-generated intelligent objective
"AVAILABLE_DOCUMENTS": extractFullDocumentContext(self.services, context),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
"AVAILABLE_CONNECTIONS": extractFullConnectionContext(self.services),
"PREVIOUS_ACTION_RESULTS": extractPreviousActionResults(context),
"LEARNINGS_AND_IMPROVEMENTS": extractLearningsAndImprovements(context),
"LATEST_REFINEMENT_FEEDBACK": extractLatestRefinementFeedback(context),
}
# Add additional data if provided (e.g., selected action, action signature)
if additional_data:
placeholders.update(additional_data)
return placeholders
elif phase == WorkflowPhase.ACTION_PLANNING:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"AVAILABLE_DOCUMENTS": extractFullDocumentContext(self.services, context),
"WORKFLOW_HISTORY": extractWorkflowHistory(self.services, context),
"AVAILABLE_METHODS": extractAvailableMethods(self.services),
"AVAILABLE_CONNECTIONS": extractFullConnectionContext(self.services),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
}
elif phase == WorkflowPhase.RESULT_REVIEW:
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"REVIEW_CONTENT": extractReviewContent(context),
}
else:
logger.warning(f"Unknown workflow phase: {phase}")
return {
"USER_PROMPT": extractUserPromptFromService(self.services),
"USER_LANGUAGE": extractUserLanguageFromService(self.services),
}
async def _generateActionObjective(self, context: Any, current_task: str, original_prompt: str, additional_data: Dict[str, Any] = None) -> str:
"""Generate intelligent, context-aware action objective using AI."""
try:
# Get the selected action from additional_data
selected_action = additional_data.get('SELECTED_ACTION', '') if additional_data else ''
# Build context for AI objective generation
context_info = {
"original_prompt": original_prompt,
"current_task": current_task,
"selected_action": selected_action,
"available_documents": extractFullDocumentContext(self.services, context),
"available_connections": extractFullConnectionContext(self.services),
"previous_results": extractPreviousActionResults(context),
"learnings": extractLearningsAndImprovements(context),
"refinement_feedback": extractLatestRefinementFeedback(context),
"user_language": extractUserLanguageFromService(self.services)
}
# Create AI prompt for objective generation
objective_prompt = f"""Generate a specific, actionable objective for the selected action.
CONTEXT:
- Original User Request: {context_info['original_prompt']}
- Current Task: {context_info['current_task']}
- Selected Action: {context_info['selected_action']}
- Available Documents: {context_info['available_documents']}
- Available Connections: {context_info['available_connections']}
- Previous Action Results: {context_info['previous_results']}
- Learnings and Improvements: {context_info['learnings']}
- Latest Refinement Feedback: {context_info['refinement_feedback']}
- User Language: {context_info['user_language']}
REQUIREMENTS:
1. Create a SPECIFIC objective that tells the action exactly what to accomplish
2. Include relevant details about documents, connections, recipients, etc.
3. Learn from previous attempts and refinement feedback
4. Make it actionable and concrete
5. Focus on the user's actual intent, not just the task description
6. If this is a retry, incorporate learnings from previous failures
RESPONSE FORMAT:
Return ONLY the objective text, no explanations or formatting.
OBJECTIVE:"""
# Call AI to generate the objective
if self.services and hasattr(self.services, 'ai'):
options = AiCallOptions(
operationType=OperationType.ANALYSE_CONTENT,
priority=Priority.BALANCED,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingMode.ADVANCED,
maxCost=0.01,
maxProcessingTime=10
)
response = await self.services.ai.callAi(
prompt=objective_prompt,
placeholders={},
options=options
)
# Extract objective from response
if response and response.strip():
return response.strip()
# Fallback to current task if AI fails
return current_task or original_prompt
except Exception as e:
logger.error(f"Error generating action objective: {str(e)}")
# Fallback to current task
return current_task or original_prompt

View file

@ -1,371 +0,0 @@
# promptFactory.py
# Enhanced prompt factory with reusable functions
import json
import logging
import importlib
import pkgutil
import inspect
from typing import Any, Dict, List
from modules.datamodels.datamodelWorkflow import TaskContext, ReviewContext, DocumentExchange
from modules.datamodels.datamodelChat import ChatDocument
from modules.services.serviceGeneration.subDocumentUtility import getFileExtension
from modules.workflows.methods.methodBase import MethodBase
# Set up logger
logger = logging.getLogger(__name__)
# Global methods catalog - moved from serviceCenter
methods = {}
def discoverMethods(serviceCenter):
"""Dynamically discover all method classes and their actions in modules methods package"""
try:
# Import the methods package
methodsPackage = importlib.import_module('modules.workflows.methods')
# Discover all modules in the package
for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__):
if not isPkg and name.startswith('method'):
try:
# Import the module
module = importlib.import_module(f'modules.workflows.methods.{name}')
# Find all classes in the module that inherit from MethodBase
for itemName, item in inspect.getmembers(module):
if (inspect.isclass(item) and
issubclass(item, MethodBase) and
item != MethodBase):
# Instantiate the method
methodInstance = item(serviceCenter)
# Use the actions property from MethodBase which handles @action decorator
actions = methodInstance.actions
# Create method info
methodInfo = {
'instance': methodInstance,
'actions': actions,
'description': item.__doc__ or f"Method {itemName}"
}
# Store the method with full class name
methods[itemName] = methodInfo
# Also store with short name for action executor access
shortName = itemName.replace('Method', '').lower()
methods[shortName] = methodInfo
logger.info(f"Discovered method {itemName} (short: {shortName}) with {len(actions)} actions")
except Exception as e:
logger.error(f"Error discovering method {name}: {str(e)}")
continue
logger.info(f"Discovered {len(methods)} method entries total")
except Exception as e:
logger.error(f"Error discovering methods: {str(e)}")
def getMethodsList(serviceCenter):
"""Get a list of available methods with their signatures"""
if not methods:
discoverMethods(serviceCenter)
methodsList = []
for methodName, methodInfo in methods.items():
methodDescription = methodInfo['description']
actionsList = []
for actionName, actionInfo in methodInfo['actions'].items():
actionDescription = actionInfo['description']
parameters = actionInfo['parameters']
# Build parameter signature
paramSig = []
for paramName, paramInfo in parameters.items():
paramType = paramInfo['type']
paramRequired = paramInfo['required']
paramDefault = paramInfo['default']
if paramRequired:
paramSig.append(f"{paramName}: {paramType}")
else:
defaultStr = f" = {paramDefault}" if paramDefault is not None else " = None"
paramSig.append(f"{paramName}: {paramType}{defaultStr}")
paramSignature = f"({', '.join(paramSig)})" if paramSig else "()"
actionsList.append(f"- {actionName}{paramSignature}: {actionDescription}")
actionsStr = "\n".join(actionsList)
methodsList.append(f"**{methodName}**: {methodDescription}\n{actionsStr}")
return "\n\n".join(methodsList)
# Reusable prompt element functions
def getAvailableDocuments(context: Any) -> str:
"""Get available documents for prompt context"""
try:
if not context or not hasattr(context, 'available_documents') or not context.available_documents:
return "No documents available"
documents = context.available_documents
if not isinstance(documents, list):
return "No documents available"
docList = []
for i, doc in enumerate(documents, 1):
if isinstance(doc, ChatDocument):
docInfo = f"{i}. **{doc.fileName}**"
if hasattr(doc, 'mimeType') and doc.mimeType:
docInfo += f" ({doc.mimeType})"
if hasattr(doc, 'size') and doc.size:
docInfo += f" - {doc.size} bytes"
docList.append(docInfo)
elif isinstance(doc, dict):
docInfo = f"{i}. **{doc.get('fileName', 'Unknown')}**"
if doc.get('mimeType'):
docInfo += f" ({doc['mimeType']})"
if doc.get('size'):
docInfo += f" - {doc['size']} bytes"
docList.append(docInfo)
else:
docList.append(f"{i}. {str(doc)}")
return "\n".join(docList) if docList else "No documents available"
except Exception as e:
logger.error(f"Error getting available documents: {str(e)}")
return "Error retrieving documents"
def getWorkflowHistory(services, context: Any) -> str:
"""Get workflow history for prompt context"""
try:
if not context or not hasattr(context, 'workflow_id'):
return "No workflow history available"
workflowId = context.workflow_id
if not workflowId:
return "No workflow history available"
# Get workflow messages
messages = services.interfaceDbChat.getWorkflowMessages(workflowId)
if not messages:
return "No workflow history available"
# Filter for relevant messages (last 10)
recentMessages = messages[-10:] if len(messages) > 10 else messages
historyList = []
for msg in recentMessages:
if hasattr(msg, 'role') and hasattr(msg, 'message'):
role = "User" if msg.role == "user" else "Assistant"
message = msg.message[:200] + "..." if len(msg.message) > 200 else msg.message
historyList.append(f"**{role}**: {message}")
return "\n".join(historyList) if historyList else "No workflow history available"
except Exception as e:
logger.error(f"Error getting workflow history: {str(e)}")
return "Error retrieving workflow history"
def getAvailableMethods(services) -> str:
"""Get available methods for prompt context"""
try:
if not methods:
discoverMethods(services)
return getMethodsList(services)
except Exception as e:
logger.error(f"Error getting available methods: {str(e)}")
return "Error retrieving available methods"
def getEnhancedDocumentContext(services) -> str:
"""Get enhanced document context with full metadata"""
try:
# Get all documents from the current workflow
workflow = getattr(services, 'currentWorkflow', None)
if not workflow or not hasattr(workflow, 'id'):
return "No workflow context available"
# Get workflow documents from messages
if not hasattr(workflow, 'messages') or not workflow.messages:
return "No documents available"
# Collect all documents from all messages
all_documents = []
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
all_documents.extend(message.documents)
if not all_documents:
return "No documents available"
# Group documents by round/task/action for better organization
docGroups = {}
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
round_num = getattr(message, 'roundNumber', 0)
task_num = getattr(message, 'taskNumber', 0)
action_num = getattr(message, 'actionNumber', 0)
label = getattr(message, 'documentsLabel', 'results')
group_key = f"round{round_num}_task{task_num}_action{action_num}_{label}"
if group_key not in docGroups:
docGroups[group_key] = []
docGroups[group_key].extend(message.documents)
# Format documents by groups with proper docList references
docList = []
for group_key, group_docs in docGroups.items():
# Find the message that contains these documents to get the message ID
message_id = None
for message in workflow.messages:
if hasattr(message, 'documents') and message.documents:
round_num = getattr(message, 'roundNumber', 0)
task_num = getattr(message, 'taskNumber', 0)
action_num = getattr(message, 'actionNumber', 0)
label = getattr(message, 'documentsLabel', 'results')
msg_group_key = f"round{round_num}_task{task_num}_action{action_num}_{label}"
if msg_group_key == group_key:
message_id = str(message.id)
break
# Generate proper docList reference
if message_id:
docListRef = f"docList:{message_id}:{group_key}"
else:
# Fallback to direct label reference
docListRef = group_key
docList.append(f"\n**{group_key}:**")
docList.append(f"Reference: {docListRef}")
for i, doc in enumerate(group_docs, 1):
if isinstance(doc, ChatDocument):
docInfo = f" {i}. **{doc.fileName}**"
if hasattr(doc, 'mimeType') and doc.mimeType:
docInfo += f" ({doc.mimeType})"
if hasattr(doc, 'size') and doc.size:
docInfo += f" - {doc.size} bytes"
if hasattr(doc, 'created') and doc.created:
docInfo += f" - Created: {doc.created}"
docList.append(docInfo)
elif isinstance(doc, dict):
docInfo = f" {i}. **{doc.get('fileName', 'Unknown')}**"
if doc.get('mimeType'):
docInfo += f" ({doc['mimeType']})"
if doc.get('size'):
docInfo += f" - {doc['size']} bytes"
if doc.get('created'):
docInfo += f" - Created: {doc['created']}"
docList.append(docInfo)
else:
docList.append(f" {i}. {str(doc)}")
return "\n".join(docList) if docList else "No documents available"
except Exception as e:
logger.error(f"Error getting enhanced document context: {str(e)}")
return "Error retrieving document context"
def getConnectionReferenceList(services) -> List[str]:
"""Get list of available connections"""
try:
# Get connections from the database
if hasattr(services, 'interfaceDbApp') and hasattr(services, 'user'):
userId = services.user.id
connections = services.interfaceDbApp.getUserConnections(userId)
if connections:
# Format connections as reference strings
connectionRefs = []
for conn in connections:
# Create reference string in format: conn_{authority}_{id}
ref = f"conn_{conn.authority.value}_{conn.id}"
connectionRefs.append(ref)
return connectionRefs
return []
except Exception as e:
logger.error(f"Error getting connection reference list: {str(e)}")
return []
def getUserLanguage(services) -> str:
"""Get user language from services"""
try:
if hasattr(services, 'user') and hasattr(services.user, 'language'):
return services.user.language or 'en'
return 'en'
except Exception as e:
logger.error(f"Error getting user language: {str(e)}")
return 'en'
def getReviewContent(context: Any) -> str:
"""Get review content for prompt context"""
try:
if not context or not hasattr(context, 'observation'):
return "No review content available"
observation = context.observation
if not isinstance(observation, dict):
return "No review content available"
reviewParts = []
# Add success status
if 'success' in observation:
reviewParts.append(f"Success: {observation['success']}")
# Add documents count
if 'documentsCount' in observation:
reviewParts.append(f"Documents generated: {observation['documentsCount']}")
# Add previews
if 'previews' in observation and observation['previews']:
reviewParts.append("Document previews:")
for preview in observation['previews']:
if isinstance(preview, dict):
name = preview.get('name', 'Unknown')
mimeType = preview.get('mimeType', 'Unknown')
size = preview.get('contentSize', 'Unknown size')
reviewParts.append(f" - {name} ({mimeType}) - {size}")
# Add notes
if 'notes' in observation and observation['notes']:
reviewParts.append("Notes:")
for note in observation['notes']:
reviewParts.append(f" - {note}")
return "\n".join(reviewParts) if reviewParts else "No review content available"
except Exception as e:
logger.error(f"Error getting review content: {str(e)}")
return "Error retrieving review content"
def getPreviousRoundContext(services, context: Any) -> str:
"""Get previous round context for prompt"""
try:
if not context or not hasattr(context, 'workflow_id'):
return "No previous round context available"
workflowId = context.workflow_id
if not workflowId:
return "No previous round context available"
# Get previous round results
previousResults = getattr(context, 'previous_results', [])
if not previousResults:
return "No previous round context available"
contextList = []
for i, result in enumerate(previousResults, 1):
if hasattr(result, 'success') and hasattr(result, 'resultLabel'):
status = "Success" if result.success else "Failed"
contextList.append(f"{i}. {result.resultLabel} - {status}")
elif isinstance(result, dict):
status = "Success" if result.get('success', False) else "Failed"
label = result.get('resultLabel', 'Unknown')
contextList.append(f"{i}. {label} - {status}")
else:
contextList.append(f"{i}. {str(result)}")
return "\n".join(contextList) if contextList else "No previous round context available"
except Exception as e:
logger.error(f"Error getting previous round context: {str(e)}")
return "Error retrieving previous round context"

View file

@ -1,673 +0,0 @@
"""
Placeholder-based prompt factory for dynamic AI calls.
This module provides prompt templates with placeholders that can be filled dynamically.
"""
import json
import logging
from typing import Dict, Any
logger = logging.getLogger(__name__)
from modules.workflows.processing.shared.promptFactory import (
getAvailableDocuments,
getPreviousRoundContext,
getMethodsList,
getEnhancedDocumentContext,
getConnectionReferenceList,
methods,
discoverMethods
)
def createTaskPlanningPromptTemplate() -> str:
"""Create task planning prompt template with placeholders."""
return """# Task Planning
Break down user requests into logical, executable task steps.
## 📋 Context
### User Request
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
### Previous Workflow Rounds
{{KEY:WORKFLOW_HISTORY}}
## 📝 Task Planning Rules
### Strategic Task Grouping
- **GROUP RELATED ACTIONS** - Combine all actions for the same business topic into ONE task
- **ONE TOPIC PER TASK** - Each task should handle one complete business objective
- **HIGH-LEVEL FOCUS** - Plan strategic outcomes, not implementation steps
- **AVOID MICRO-TASKS** - Don't create separate tasks for each small action
### Task Grouping Examples
- **Research + Analysis + Report** ONE task: "Web research report"
- **Data Collection + Processing + Visualization** ONE task: "Collect and present data"
- **Different topics** (email + flowers) SEPARATE tasks: "Send formal email..." + "Order flowers from Fleurop for delivery to 123 Main St, include card message"
### Retry Handling
- **If retry request**: Analyze previous rounds to understand what failed
- **Learn from mistakes**: Improve the plan based on previous failures
## 📊 Required JSON Structure
```json
{
"overview": "Brief description of the overall plan",
"languageUserDetected": "en",
"userMessage": "User-friendly message explaining the task plan",
"tasks": [
{
"id": "task_1",
"objective": "Clear business objective focusing on what to deliver",
"dependencies": ["task_0"],
"success_criteria": ["measurable criteria 1", "measurable criteria 2"],
"estimated_complexity": "low|medium|high",
"userMessage": "What this task will accomplish"
}
]
}
```
## 🎯 Task Structure Guidelines
### Task ID Format
- Use sequential numbering: `task_1`, `task_2`, `task_3`
- Keep IDs simple and clear
### Objective Writing
- **Be VERY SPECIFIC** - Include exact details needed for action planning
- **Include all requirements** - recipient, attachments, format, recipients, etc.
- **State the complete deliverable** - What exactly will be produced
- **Include context and constraints** - When, where, how, with what
- **Make it actionable** - Clear enough to plan specific actions
### Specific Objective Examples
- **Good**: "Send formal email to ceo and board of directors with annual report as attachment"
- **Bad**: "Handle email communication"
- **Good**: "Order flowers from Fleurop for delivery to 123 Main St, include card message 'Happy Birthday', deliver on March 15th"
- **Bad**: "Order flowers"
### Action Planning Requirements
- **Include all necessary details** - The objective must contain everything needed to plan actions
- **Specify recipients and destinations** - Who should receive what
- **Include file names and formats** - What documents to use/create
- **State timing and deadlines** - When things need to be done
- **Include context and constraints** - Any special requirements or limitations
### Success Criteria
- **Make them measurable** - specific, quantifiable outcomes
- **Focus on deliverables** - what the user will receive
- **Keep criteria realistic** - achievable within the task scope
- **Include all related actions** - success means completing the entire business objective
- **Be specific about requirements** - Include exact details like recipients, formats, deadlines
- **State clear completion criteria** - How to know the task is fully done
### Complexity Estimation
- **Low**: Simple, single-action tasks (1-2 actions)
- **Medium**: Multi-action tasks for one topic (3-5 actions)
- **High**: Complex strategic tasks (6+ actions)
## 🚀 Response Format
Return ONLY the JSON object."""
def createActionDefinitionPromptTemplate() -> str:
"""Create action definition prompt template with placeholders."""
return """# Action Definition
Generate the next action to advance toward completing the task objective.
## 📋 Context
### Task Objective
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
### Workflow History
{{KEY:WORKFLOW_HISTORY}}
### Available Methods
{{KEY:AVAILABLE_METHODS}}
### Available Connections
{{KEY:AVAILABLE_CONNECTIONS}}
### User Language
{{KEY:USER_LANGUAGE}}
## ⚠️ RULES
### Action Names
- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
- **DO NOT separate** method and action names - use the full compound name
### Parameter Guidelines
- **Use exact document references** from AVAILABLE_DOCUMENTS
- **Use exact connection references** from AVAILABLE_CONNECTIONS
- **Include user language** if relevant
- **Avoid unnecessary fields** - host applies defaults
## 📊 Required JSON Structure
```json
{
"actions": [
{
"action": "method.action_name",
"parameters": {},
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
"description": "What this action accomplishes",
"userMessage": "User-friendly message in {{KEY:USER_LANGUAGE}}"
}
]
}
```
## ✅ Correct Example
```json
{
"actions": [
{
"action": "document.extract",
"parameters": {"documentList": ["docList:msg_123:results"]},
"resultLabel": "round1_task1_action1_extract_results",
"description": "Extract data from documents",
"userMessage": "Extracting data from documents"
}
]
}
```
## 🎯 Action Planning Guidelines
### Method Selection
- **Choose appropriate method** based on task requirements
- **Consider available resources** (documents, connections)
- **Match method capabilities** to task objectives
### Parameter Design
- **Use ACTION SIGNATURE** to understand required parameters
- **Convert objective** into appropriate parameter values
- **Include all required parameters** for the action
### Result Labeling
- **Use descriptive labels** that explain what the action produces
- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
- **Make labels meaningful** for future reference
### User Messages
- **Write in user language** ({{KEY:USER_LANGUAGE}})
- **Explain what's happening** in user-friendly terms
- **Keep messages concise** but informative
## 🚀 Response Format
Return ONLY the JSON object."""
def createActionSelectionPromptTemplate() -> str:
"""Create action selection prompt template with placeholders."""
return """# Action Selection
Select exactly one action to advance the task.
## 📋 Context
### Objective
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
### User Language
{{KEY:USER_LANGUAGE}}
### Available Methods
{{KEY:AVAILABLE_METHODS}}
## ⚠️ CRITICAL RULES
### Selection Requirements
- **Return ONLY the compound action name**
- **Do NOT include parameters or prompts**
- **Use EXACT compound action names** from AVAILABLE_METHODS above
- **DO NOT create** new action names
### Action Format
- **Compound action names**: Use exact names from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
- **Single field format**: Use the full compound action name as a single string
## 📝 Required JSON Format
```json
{"action":"method.action_name"}
```
## ✅ Correct Examples
```json
{"action":"ai.process"}
{"action":"document.extract"}
{"action":"web.search"}
```
## 🎯 Selection Guidelines
### Choose Appropriate Action
- **Match action to objective** - select the most relevant action
- **Consider available resources** - ensure required documents/connections are available
- **Think about the next step** - what action will advance the task
### Method Selection
- **AI methods**: For processing, analysis, or generation tasks
- **Document methods**: For document operations (extract, generate, etc.)
- **Web methods**: For web searches or external data retrieval
- **Other methods**: Based on specific requirements
## 🚀 Response Format
Return ONLY the JSON object."""
def createActionParameterPromptTemplate() -> str:
"""Create action parameter prompt template with placeholders."""
return """# Action Parameter Generation
You are an AI assistant tasked with generating parameters for a selected action.
## 🎯 Your Goal
Provide the EXACT parameters required by the ACTION SIGNATURE, using information from the OBJECTIVE, AVAILABLE DOCUMENTS, and AVAILABLE CONNECTIONS.
## ⚠️ CRITICAL RULES
- **MUST respond with a JSON object**
- **All parameters MUST be wrapped in a "parameters" object**
- **ONLY include parameters listed in the ACTION SIGNATURE**
- **Do NOT use code blocks or markdown in your response**
- **Return ONLY the JSON object**
## 📋 Document & Connection References
- **Document references**: Copy the EXACT reference string from AVAILABLE DOCUMENTS (e.g., `docList:msg_UUID:label`)
- **Connection references**: Copy the EXACT reference string from AVAILABLE CONNECTIONS (e.g., `connection:msft:user@domain.com:uuid [status:active, token:valid]`)
- **Do NOT invent, shorten, or modify any references**
- **If unsure**: Use "UNCLEAR_REFERENCE" or "UNCLEAR_OBJECTIVE" and explain in a comment
## 📝 Input Context
### Selected Action
{{KEY:SELECTED_ACTION}}
### Objective
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
### Available Connections
{{KEY:AVAILABLE_CONNECTIONS}}
### User Language
{{KEY:USER_LANGUAGE}}
### Action Requirements
{{KEY:ACTION_SIGNATURE}}
## 📚 Reference Types
### Document References
- **docItem**: Reference to a single document (e.g., "docItem:uuid:filename.pdf")
- **docList**: Reference to a group of documents (e.g., "docList:msg_123:AnalysisResults")
- **Use EXACT reference strings** shown in AVAILABLE_DOCUMENTS
### Connection References
- **Use exact connection references** from AVAILABLE CONNECTIONS
- **Examples**: "connection:msft:user@domain.com:uuid [status:active, token:valid]", "connection:sp:user@domain.com:uuid [status:active, token:valid]"
## 💡 Basic Examples
```json
{"parameters":{"aiPrompt": "Summarize the document"}}
{"parameters":{"documentList": ["docList:msg_UUID:label"]}}
{"parameters":{"connectionReference": "connection:msft:user@domain.com:uuid [status:active, token:valid]"}}
```
## ❌ Wrong Format (DO NOT USE)
```json
{"aiPrompt": "Your prompt here"}
```
```json
{"parameters":{"aiPrompt": "Your prompt here"}}
```
## 🎯 Parameter Guidelines
### Required Parameters
- **Use ACTION SIGNATURE** to understand what parameters are required
- **Convert objective** into appropriate parameter values
- **Include user language** if relevant
- **Avoid unnecessary fields** - host applies defaults
### Document Reference Rules
- **ONLY use exact document reference strings** from AVAILABLE_DOCUMENTS
- **DO NOT add file paths** or individual filenames to document references
- **For documentList parameters**: Use the EXACT reference strings shown in AVAILABLE_DOCUMENTS
### Connection Reference Rules
- **ONLY use exact connection references** from AVAILABLE CONNECTIONS
- **For connectionReference parameters**: Use the exact connection reference from AVAILABLE CONNECTIONS
## 🚀 Response Format
Return your JSON response immediately after this prompt."""
def createRefinementPromptTemplate() -> str:
"""Create refinement prompt template with placeholders."""
return """# Workflow Refinement Decision
Decide the next step based on the observation.
## 📋 Context
### Objective
{{KEY:USER_PROMPT}}
### Observation
{{KEY:REVIEW_CONTENT}}
## ⚠️ CRITICAL RULES
### Data Requirements
- **If user wants DATA** (numbers, lists, calculations): Ensure AI delivers the actual data, not code
- **If user wants DOCUMENTS** (Word, PDF, Excel): Ensure appropriate method is used to create the document
- **If user wants ANALYSIS**: Ensure AI analyzes and delivers insights
- **NEVER accept code when user wants data** - demand the actual data
- **NEVER accept algorithms when user wants results** - demand the actual results
## 🤔 Decision Rules
### Continue Conditions
- The objective is **NOT fulfilled** (user didn't get what they asked for)
- More data or processing is needed
- The current result is incomplete
### Stop Conditions
- The objective is **fulfilled** (user got what they asked for)
- All required data has been delivered
- The task is complete
### Focus
- Focus on what the user actually wants, not what was delivered
- Consider the user's original request carefully
## 📝 Response Format
```json
{"decision":"continue","reason":"Need more data"}
```
### Decision Options
- `"continue"` - Keep working on the objective
- `"stop"` - Objective has been fulfilled
### Reason Examples
- `"Need more data"`
- `"Objective fulfilled"`
- `"User got the requested document"`
- `"Analysis complete"`
## 🎯 Decision Guidelines
### When to Continue
- **Incomplete results** - User didn't get what they asked for
- **Missing data** - Need to gather more information
- **Partial success** - Some but not all requirements met
- **Technical issues** - Action failed and needs retry
### When to Stop
- **Complete success** - User got exactly what they asked for
- **All criteria met** - Success criteria have been achieved
- **Document created** - Required document has been generated
- **Data delivered** - All requested data has been provided
### Quality Assessment
- **Check completeness** - Is the result complete?
- **Verify accuracy** - Is the data correct?
- **Assess usefulness** - Does it meet the user's needs?
- **Consider format** - Is it in the requested format?
## 🚀 Response Format
Return your JSON response immediately after this prompt."""
def createResultReviewPromptTemplate() -> str:
"""Create result review prompt template with placeholders."""
return """# Result Review & Validation
Review task execution outcomes and determine success, retry needs, or failure.
## 📋 Context
### Task Objective
{{KEY:USER_PROMPT}}
### Execution Results
{{KEY:REVIEW_CONTENT}}
## 🔍 Validation Criteria
### Action Assessment
- **Review each action's success/failure status**
- **Check if required documents were produced**
- **Validate document quality and completeness**
- **Assess if success criteria were met**
- **Identify any missing or incomplete outputs**
### Decision Making
- **Determine if retry would help** or if task should be marked as failed
- **Consider business value** and user satisfaction
- **Evaluate technical execution** and results quality
## 📊 Required JSON Structure
```json
{
"status": "success|retry|failed",
"reason": "Detailed explanation of the validation decision",
"improvements": ["specific improvement 1", "specific improvement 2"],
"quality_score": 8,
"met_criteria": ["criteria1", "criteria2"],
"unmet_criteria": ["criteria3", "criteria4"],
"confidence": 0.85,
"userMessage": "User-friendly message explaining the validation result"
}
```
## 🎯 Validation Principles
### Assessment Approach
- **Be thorough but fair** in assessment
- **Focus on business value** and outcomes
- **Consider both technical execution** and business results
- **Provide specific, actionable** improvement suggestions
### Quality Scoring
- **Use quality scores** to track progress across retries
- **Scale 1-10**: 1 = Poor, 5 = Average, 10 = Excellent
- **Consider completeness, accuracy, and usefulness**
### Criteria Evaluation
- **Clearly identify** which success criteria were met vs. unmet
- **List specific criteria** that were achieved
- **Note missing requirements** that need attention
### Confidence Levels
- **Set appropriate confidence levels** based on evidence quality
- **Scale 0.0-1.0**: 0.0 = No confidence, 1.0 = Complete confidence
- **Consider data quality** and result reliability
## 📝 Status Definitions
### Success
- **All objectives met** - User got what they asked for
- **Quality standards met** - Results are complete and accurate
- **No retry needed** - Task is fully complete
### Retry
- **Partial success** - Some but not all objectives met
- **Improvement possible** - Retry could lead to better results
- **Technical issues** - Action failures that can be resolved
### Failed
- **No progress made** - Objectives not achieved
- **Technical limitations** - Cannot be resolved with retry
- **Resource constraints** - Missing required inputs
## 💡 Improvement Suggestions
### Actionable Improvements
- **Be specific** - Don't just say "improve quality"
- **Focus on process** - How to do better next time
- **Consider resources** - What additional inputs might help
- **Technical fixes** - Address specific technical issues
### Examples
- "Use more specific document references from AVAILABLE_DOCUMENTS"
- "Include user language parameter for better localization"
- "Break down complex objective into smaller, focused actions"
- "Verify document references before processing"
## 🚀 Response Format
Return ONLY the JSON object. Do not include any explanatory text."""
# Helper functions to extract content for placeholders
def extractUserPrompt(context) -> str:
"""Extract user prompt from context."""
if hasattr(context, 'task_step') and context.task_step:
return context.task_step.objective or 'No request specified'
return 'No request specified'
def extractAvailableDocuments(context) -> str:
"""Extract available documents from context."""
if hasattr(context, 'available_documents') and context.available_documents:
return context.available_documents
return "No documents available"
def extractWorkflowHistory(service, context) -> str:
"""Extract workflow history from context."""
if hasattr(context, 'workflow') and context.workflow:
return getPreviousRoundContext(service, context.workflow) or "No previous workflow rounds - this is the first round."
return "No previous workflow rounds - this is the first round."
def extractAvailableMethods(service) -> str:
"""Extract available methods for action planning using compound action names."""
try:
# Get the methods dictionary directly from the global methods variable
if not methods:
discoverMethods(service)
# Create a flat JSON format with compound action names for better AI parsing
available_actions_json = {}
for methodName, methodInfo in methods.items():
# Convert MethodAi -> ai, MethodDocument -> document, etc.
shortName = methodName.replace('Method', '').lower()
for actionName, actionInfo in methodInfo['actions'].items():
# Create compound action name: method.action
compoundActionName = f"{shortName}.{actionName}"
# Get the action description
action_description = actionInfo.get('description', f"Execute {actionName} action")
available_actions_json[compoundActionName] = action_description
return json.dumps(available_actions_json, indent=2, ensure_ascii=False)
except Exception as e:
logger.error(f"Error extracting available methods: {str(e)}")
return json.dumps({}, indent=2, ensure_ascii=False)
def extractUserLanguage(service) -> str:
"""Extract user language from service."""
return service.user.language if service and service.user else 'en'
def extractReviewContent(context) -> str:
"""Extract review content from context with full document metadata."""
if hasattr(context, 'action_results') and context.action_results:
# Build result summary
result_summary = ""
for i, result in enumerate(context.action_results):
result_summary += f"\nRESULT {i+1}:\n"
result_summary += f" Success: {result.success}\n"
if result.error:
result_summary += f" Error: {result.error}\n"
if result.documents:
result_summary += f" Documents: {len(result.documents)} document(s)\n"
for doc in result.documents:
# Extract all available metadata without content
doc_metadata = {
"name": getattr(doc, 'documentName', 'Unknown'),
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
"size": getattr(doc, 'size', 'Unknown'),
"created": getattr(doc, 'created', 'Unknown'),
"modified": getattr(doc, 'modified', 'Unknown'),
"typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
"documentId": getattr(doc, 'documentId', 'Unknown'),
"reference": getattr(doc, 'reference', 'Unknown')
}
# Remove 'Unknown' values to keep it clean
doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
result_summary += f" - {json.dumps(doc_metadata, indent=6, ensure_ascii=False)}\n"
else:
result_summary += f" Documents: None\n"
return result_summary
elif hasattr(context, 'observation') and context.observation:
# For observation data, show full content but handle documents specially
if isinstance(context.observation, dict):
# Create a copy to modify
obs_copy = context.observation.copy()
# If there are previews with documents, show only metadata
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
for preview in obs_copy['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
# Replace snippet with metadata indicator
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
else:
return json.dumps(context.observation, ensure_ascii=False)
elif hasattr(context, 'step_result') and context.step_result and 'observation' in context.step_result:
# For observation data in step_result, show full content but handle documents specially
observation = context.step_result['observation']
if isinstance(observation, dict):
# Create a copy to modify
obs_copy = observation.copy()
# If there are previews with documents, show only metadata
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
for preview in obs_copy['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
# Replace snippet with metadata indicator
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
else:
return json.dumps(observation, ensure_ascii=False)
else:
return "No review content available"

View file

@ -0,0 +1,208 @@
"""
Actionplan Mode Prompt Generation
Handles prompt templates and extraction functions for actionplan mode action handling.
"""
import json
import logging
from typing import Dict, Any
logger = logging.getLogger(__name__)
def createActionDefinitionPromptTemplate() -> str:
"""Create action definition prompt template with placeholders."""
return """# Action Definition
Generate the next action to advance toward completing the task objective.
## 📋 Context
### Task Objective
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
### Workflow History
{{KEY:WORKFLOW_HISTORY}}
### Available Methods
{{KEY:AVAILABLE_METHODS}}
### Available Connections
{{KEY:AVAILABLE_CONNECTIONS}}
### User Language
{{KEY:USER_LANGUAGE}}
## ⚠️ RULES
### Action Names
- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
- **DO NOT separate** method and action names - use the full compound name
### Parameter Guidelines
- **Use exact document references** from AVAILABLE_DOCUMENTS
- **Use exact connection references** from AVAILABLE_CONNECTIONS
- **Include user language** if relevant
- **Avoid unnecessary fields** - host applies defaults
## 📊 Required JSON Structure
```json
{
"actions": [
{
"action": "method.action_name",
"parameters": {},
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
"description": "What this action accomplishes",
"userMessage": "User-friendly message in {{KEY:USER_LANGUAGE}}"
}
]
}
```
## ✅ Correct Example
```json
{
"actions": [
{
"action": "document.extract",
"parameters": {"documentList": ["docList:msg_123:results"]},
"resultLabel": "round1_task1_action1_extract_results",
"description": "Extract data from documents",
"userMessage": "Extracting data from documents"
}
]
}
```
## 🎯 Action Planning Guidelines
### Method Selection
- **Choose appropriate method** based on task requirements
- **Consider available resources** (documents, connections)
- **Match method capabilities** to task objectives
### Parameter Design
- **Use ACTION SIGNATURE** to understand required parameters
- **Convert objective** into appropriate parameter values
- **Include all required parameters** for the action
### Result Labeling
- **Use descriptive labels** that explain what the action produces
- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
- **Make labels meaningful** for future reference
### User Messages
- **Write in user language** ({{KEY:USER_LANGUAGE}})
- **Explain what's happening** in user-friendly terms
- **Keep messages concise** but informative
## 🚀 Response Format
Return ONLY the JSON object."""
def createResultReviewPromptTemplate() -> str:
"""Create result review prompt template with placeholders."""
return """# Result Review & Validation
Review task execution outcomes and determine success, retry needs, or failure.
## 📋 Context
### Task Objective
{{KEY:USER_PROMPT}}
### Execution Results
{{KEY:REVIEW_CONTENT}}
## 🔍 Validation Criteria
### Action Assessment
- **Review each action's success/failure status**
- **Check if required documents were produced**
- **Validate document quality and completeness**
- **Assess if success criteria were met**
- **Identify any missing or incomplete outputs**
### Decision Making
- **Determine if retry would help** or if task should be marked as failed
- **Consider business value** and user satisfaction
- **Evaluate technical execution** and results quality
## 📊 Required JSON Structure
```json
{
"status": "success|retry|failed",
"reason": "Detailed explanation of the validation decision",
"improvements": ["specific improvement 1", "specific improvement 2"],
"quality_score": 8,
"met_criteria": ["criteria1", "criteria2"],
"unmet_criteria": ["criteria3", "criteria4"],
"confidence": 0.85,
"userMessage": "User-friendly message explaining the validation result"
}
```
## 🎯 Validation Principles
### Assessment Approach
- **Be thorough but fair** in assessment
- **Focus on business value** and outcomes
- **Consider both technical execution** and business results
- **Provide specific, actionable** improvement suggestions
### Quality Scoring
- **Use quality scores** to track progress across retries
- **Scale 1-10**: 1 = Poor, 5 = Average, 10 = Excellent
- **Consider completeness, accuracy, and usefulness**
### Criteria Evaluation
- **Clearly identify** which success criteria were met vs. unmet
- **List specific criteria** that were achieved
- **Note missing requirements** that need attention
### Confidence Levels
- **Set appropriate confidence levels** based on evidence quality
- **Scale 0.0-1.0**: 0.0 = No confidence, 1.0 = Complete confidence
- **Consider data quality** and result reliability
## 📝 Status Definitions
### Success
- **All objectives met** - User got what they asked for
- **Quality standards met** - Results are complete and accurate
- **No retry needed** - Task is fully complete
### Retry
- **Partial success** - Some but not all objectives met
- **Improvement possible** - Retry could lead to better results
- **Technical issues** - Action failures that can be resolved
### Failed
- **No progress made** - Objectives not achieved
- **Technical limitations** - Cannot be resolved with retry
- **Resource constraints** - Missing required inputs
## 💡 Improvement Suggestions
### Actionable Improvements
- **Be specific** - Don't just say "improve quality"
- **Focus on process** - How to do better next time
- **Consider resources** - What additional inputs might help
- **Technical fixes** - Address specific technical issues
### Examples
- "Use more specific document references from AVAILABLE_DOCUMENTS"
- "Include user language parameter for better localization"
- "Break down complex objective into smaller, focused actions"
- "Verify document references before processing"
## 🚀 Response Format
Return ONLY the JSON object. Do not include any explanatory text."""

View file

@ -0,0 +1,108 @@
"""
React Mode Prompt Generation
Handles prompt templates for react mode action handling.
"""
def createReactPlanSelectionPromptTemplate() -> str:
"""Create action selection prompt template for React mode with minimal placeholders."""
return """Select one action to advance the task.
OBJECTIVE:
{{KEY:USER_PROMPT}}
AVAILABLE_DOCUMENTS:
{{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE_METHODS:
{{KEY:AVAILABLE_METHODS}}
REPLY: Return only a JSON object with the selected action:
{{
"action": "method.action_name"
}}
RULES:
1. Use EXACT action names from AVAILABLE_METHODS
2. Return ONLY JSON - no other text
3. Do NOT use markdown code blocks
4. Do NOT add explanations
"""
def createReactParametersPromptTemplate() -> str:
"""Create comprehensive action parameter prompt template for React mode with all available context."""
return """Generate parameters for this action.
ACTION_OBJECTIVE (the objective for this action to fulfill):
{{KEY:ACTION_OBJECTIVE}}
ACTION_SIGNATURE (the signature of the action to generate parameters for):
{{KEY:ACTION_SIGNATURE}}
AVAILABLE_DOCUMENTS:
{{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE_CONNECTIONS:
{{KEY:AVAILABLE_CONNECTIONS}}
USER_REQUEST (final user prompt to deliver):
{{KEY:USER_PROMPT}}
USER_LANGUAGE:
{{KEY:USER_LANGUAGE}}
PREVIOUS_ACTION_RESULTS:
{{KEY:PREVIOUS_ACTION_RESULTS}}
LEARNINGS_AND_IMPROVEMENTS:
{{KEY:LEARNINGS_AND_IMPROVEMENTS}}
LATEST_REFINEMENT_FEEDBACK:
{{KEY:LATEST_REFINEMENT_FEEDBACK}}
SELECTED_ACTION:
{{KEY:SELECTED_ACTION}}
REPLY: Return only a JSON object with the parameters according to the ACTION_SIGNATURE without any comments in the structure below:
{{
"parameters": {{
"parameter": "value",
}},
"signature": [List of all signatures, you see in the ACTION_SIGNATURE]
}}
RULES:
1. Use ONLY parameter names from ACTION_SIGNATURE
2. Use exact connection references from AVAILABLE_CONNECTIONS for connectionReference parameters
3. Use exact document references from AVAILABLE_DOCUMENTS for documentList parameters
4. Learn from PREVIOUS_ACTION_RESULTS and LEARNINGS_AND_IMPROVEMENTS to avoid repeating mistakes
5. Consider LATEST_REFINEMENT_FEEDBACK when generating parameters
6. Use the ACTION_OBJECTIVE to understand the specific goal for this action
7. Generate parameters that align with the USER_LANGUAGE when applicable
8. Return ONLY JSON - no other text
9. Do NOT use markdown code blocks
10. Do NOT add explanations
"""
def createReactRefinementPromptTemplate() -> str:
"""Create refinement prompt template for React mode with full context placeholders."""
return """Decide the next step based on the observation.
OBJECTIVE:
{{KEY:USER_PROMPT}}
OBSERVATION:
{{KEY:REVIEW_CONTENT}}
REPLY: Return only a JSON object with your decision:
{{
"decision": "continue|stop",
"reason": "brief explanation"
}}
RULES:
1. Use "continue" if objective NOT fulfilled
2. Use "stop" if objective fulfilled
3. Return ONLY JSON - no other text
4. Do NOT use markdown code blocks
5. Do NOT add explanations
"""

View file

@ -0,0 +1,107 @@
"""
Task Planning Prompt Generation
Handles prompt templates and extraction functions for task planning phase.
"""
import json
import logging
from typing import Dict, Any
logger = logging.getLogger(__name__)
def createTaskPlanningPromptTemplate() -> str:
"""Create task planning prompt template with placeholders."""
return """# Task Planning
Break down user requests into logical, executable task steps.
## 📋 Context
### User Request
{{KEY:USER_PROMPT}}
### Available Documents
{{KEY:AVAILABLE_DOCUMENTS}}
### Previous Workflow Rounds
{{KEY:WORKFLOW_HISTORY}}
## 📝 Task Planning Rules
### Strategic Task Grouping
- **GROUP RELATED ACTIONS** - Combine all actions for the same business topic into ONE task
- **ONE TOPIC PER TASK** - Each task should handle one complete business objective
- **HIGH-LEVEL FOCUS** - Plan strategic outcomes, not implementation steps
- **AVOID MICRO-TASKS** - Don't create separate tasks for each small action
### Task Grouping Examples
- **Research + Analysis + Report** ONE task: "Web research report"
- **Data Collection + Processing + Visualization** ONE task: "Collect and present data"
- **Different topics** (email + flowers) SEPARATE tasks: "Send formal email..." + "Order flowers from Fleurop for delivery to 123 Main St, include card message"
### Retry Handling
- **If retry request**: Analyze previous rounds to understand what failed
- **Learn from mistakes**: Improve the plan based on previous failures
## 📊 Required JSON Structure
```json
{
"overview": "Brief description of the overall plan",
"languageUserDetected": "en",
"userMessage": "User-friendly message explaining the task plan",
"tasks": [
{
"id": "task_1",
"objective": "Clear business objective focusing on what to deliver",
"dependencies": ["task_0"],
"success_criteria": ["measurable criteria 1", "measurable criteria 2"],
"estimated_complexity": "low|medium|high",
"userMessage": "What this task will accomplish"
}
]
}
```
## 🎯 Task Structure Guidelines
### Task ID Format
- Use sequential numbering: `task_1`, `task_2`, `task_3`
- Keep IDs simple and clear
### Objective Writing
- **Be VERY SPECIFIC** - Include exact details needed for action planning
- **Include all requirements** - recipient, attachments, format, recipients, etc.
- **State the complete deliverable** - What exactly will be produced
- **Include context and constraints** - When, where, how, with what
- **Make it actionable** - Clear enough to plan specific actions
### Specific Objective Examples
- **Good**: "Send formal email to ceo and board of directors with annual report as attachment"
- **Bad**: "Handle email communication"
- **Good**: "Order flowers from Fleurop for delivery to 123 Main St, include card message 'Happy Birthday', deliver on March 15th"
- **Bad**: "Order flowers"
### Action Planning Requirements
- **Include all necessary details** - The objective must contain everything needed to plan actions
- **Specify recipients and destinations** - Who should receive what
- **Include file names and formats** - What documents to use/create
- **State timing and deadlines** - When things need to be done
- **Include context and constraints** - Any special requirements or limitations
### Success Criteria
- **Make them measurable** - specific, quantifiable outcomes
- **Focus on deliverables** - what the user will receive
- **Keep criteria realistic** - achievable within the task scope
- **Include all related actions** - success means completing the entire business objective
- **Be specific about requirements** - Include exact details like recipients, formats, deadlines
- **State clear completion criteria** - How to know the task is fully done
### Complexity Estimation
- **Low**: Simple, single-action tasks (1-2 actions)
- **Medium**: Multi-action tasks for one topic (3-5 actions)
- **High**: Complex strategic tasks (6+ actions)
## 🚀 Response Format
Return ONLY the JSON object."""

View file

@ -1,87 +0,0 @@
"""
React-specific prompt templates for dynamic AI calls.
These templates are tailored for the React mode's iterative process.
"""
def createReactPlanSelectionPromptTemplate() -> str:
"""Create action selection prompt template for React mode with minimal placeholders."""
return """Select one action to advance the task.
OBJECTIVE:
{{KEY:USER_PROMPT}}
AVAILABLE_DOCUMENTS:
{{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE_METHODS:
{{KEY:AVAILABLE_METHODS}}
REPLY: Return only a JSON object with the selected action:
{{
"action": "method.action_name"
}}
RULES:
1. Use EXACT action names from AVAILABLE_METHODS
2. Return ONLY JSON - no other text
3. Do NOT use markdown code blocks
4. Do NOT add explanations
"""
def createReactParametersPromptTemplate() -> str:
"""Create ultra-simple action parameter prompt template for React mode."""
return """Generate parameters for this action.
ACTION_SIGNATURE:
{{KEY:ACTION_SIGNATURE}}
AVAILABLE_DOCUMENTS:
{{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE_CONNECTIONS:
{{KEY:AVAILABLE_CONNECTIONS}}
USER_REQUEST:
{{KEY:USER_PROMPT}}
REPLY: Return only a JSON object with the parameters according to the ACTION_SIGNATURE without any comments in the structure below:
{{
"parameters": {{
"parameter": "value",
}},
"signature": [List of all signatures, you see in the ACTION_SIGNATURE]
}}
RULES:
1. Use ONLY parameter names from ACTION_SIGNATURE
2. Use exact connection references from AVAILABLE_CONNECTIONS for connectionReference parameters
3. Use exact document references from AVAILABLE_DOCUMENTS for documentList parameters
4. Return ONLY JSON - no other text
5. Do NOT use markdown code blocks
6. Do NOT add explanations
"""
def createReactRefinementPromptTemplate() -> str:
"""Create refinement prompt template for React mode with full context placeholders."""
return """Decide the next step based on the observation.
OBJECTIVE:
{{KEY:USER_PROMPT}}
OBSERVATION:
{{KEY:REVIEW_CONTENT}}
REPLY: Return only a JSON object with your decision:
{{
"decision": "continue|stop",
"reason": "brief explanation"
}}
RULES:
1. Use "continue" if objective NOT fulfilled
2. Use "stop" if objective fulfilled
3. Return ONLY JSON - no other text
4. Do NOT use markdown code blocks
5. Do NOT add explanations
"""

View file

@ -5,9 +5,9 @@ import logging
from typing import Dict, Any, Optional, List
from modules.datamodels.datamodelWorkflow import TaskStep, TaskContext, TaskPlan, TaskResult, ReviewResult
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.workflows.processing.modes.baseMode import BaseMode
from modules.workflows.processing.modes.actionplanMode import ActionplanMode
from modules.workflows.processing.modes.reactMode import ReactMode
from modules.workflows.processing.modes.modeBase import BaseMode
from modules.workflows.processing.modes.modeActionplan import ActionplanMode
from modules.workflows.processing.modes.modeReact import ReactMode
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,19 @@
{
"id": "msg_4fce3b47-1595-4190-a09a-7b8c2483d9dd",
"workflowId": "8630c862-d9f3-4332-9d6c-6664a39edd73",
"parentMessageId": null,
"message": "Sende eine formelle E-Mail an peter.muster@domain.com von meinem valueon account aus, um meinen Termin von 10 Uhr auf Freitag zu scheiben. lege diese datei im mail als anhang bei und erfasse eine zusammenfasung im mail.",
"role": "user",
"status": "first",
"sequenceNr": 7,
"publishedAt": 1759651201.5949264,
"roundNumber": 2,
"taskNumber": 0,
"actionNumber": 0,
"documentsLabel": "round2_task0_action0_context",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1 @@
Sende eine formelle E-Mail an peter.muster@domain.com von meinem valueon account aus, um meinen Termin von 10 Uhr auf Freitag zu scheiben. lege diese datei im mail als anhang bei und erfasse eine zusammenfasung im mail.

View file

@ -0,0 +1,19 @@
{
"id": "msg_b85c3e84-c119-4634-ad19-11a735e1039d",
"workflowId": "8630c862-d9f3-4332-9d6c-6664a39edd73",
"parentMessageId": null,
"message": "📋 **Task Plan**\n\nI will help you send a formal email to reschedule your appointment, including the specified file and a summary.\n\n💬 I will compose and send a formal email to reschedule your appointment, ensuring all required elements are included.\n\n",
"role": "assistant",
"status": "step",
"sequenceNr": 8,
"publishedAt": 1759651206.7063708,
"roundNumber": 2,
"taskNumber": 1,
"actionNumber": 0,
"documentsLabel": "task_plan",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1,6 @@
📋 **Task Plan**
I will help you send a formal email to reschedule your appointment, including the specified file and a summary.
💬 I will compose and send a formal email to reschedule your appointment, ensuring all required elements are included.

View file

@ -0,0 +1,19 @@
{
"id": "msg_cb9e3372-52b5-4254-98e6-7552efb2b248",
"workflowId": "8630c862-d9f3-4332-9d6c-6664a39edd73",
"parentMessageId": null,
"message": "🚀 **Task 1/1**\n\n💬 I will compose and send a formal email to reschedule your appointment, ensuring all required elements are included.",
"role": "assistant",
"status": "step",
"sequenceNr": 9,
"publishedAt": 1759651207.017333,
"roundNumber": 2,
"taskNumber": 1,
"actionNumber": 0,
"documentsLabel": "task_1_start",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1,3 @@
🚀 **Task 1/1**
💬 I will compose and send a formal email to reschedule your appointment, ensuring all required elements are included.

View file

@ -0,0 +1,19 @@
{
"id": "msg_dafc2f81-528f-4c61-991e-53fbb863a9a8",
"workflowId": "8630c862-d9f3-4332-9d6c-6664a39edd73",
"parentMessageId": null,
"message": "**Action 1/1 (outlook.composeAndSendEmailWithContext)**\n\n✅ Compose and send formal email from valueon account to peter.muster@domain.com to reschedule 10am appointment to Friday, including file attachment and appointment summary\n\n",
"role": "assistant",
"status": "step",
"sequenceNr": 10,
"publishedAt": 1759651220.387675,
"roundNumber": 2,
"taskNumber": 1,
"actionNumber": 1,
"documentsLabel": "round2_task1_action1_results",
"actionId": "action_4a3eb40f-a97d-4043-94c3-4fedfc5b1c8d",
"actionMethod": "outlook",
"actionName": "composeAndSendEmailWithContext",
"success": null,
"documents": []
}

View file

@ -0,0 +1,4 @@
**Action 1/1 (outlook.composeAndSendEmailWithContext)**
✅ Compose and send formal email from valueon account to peter.muster@domain.com to reschedule 10am appointment to Friday, including file attachment and appointment summary

View file

@ -0,0 +1,12 @@
{
"id": "94fc49c9-55e5-4b03-a437-e79c26483651",
"messageId": "msg_dafc2f81-528f-4c61-991e-53fbb863a9a8",
"fileId": "a0196528-6ba3-4bc9-abef-7ae25aad0c76",
"fileName": "ai_generated_email_draft_20251005-080020.json",
"fileSize": 1173,
"mimeType": "application/json",
"roundNumber": 2,
"taskNumber": 1,
"actionNumber": 1,
"actionId": "action_4a3eb40f-a97d-4043-94c3-4fedfc5b1c8d"
}

View file

@ -0,0 +1,19 @@
{
"id": "msg_998654a7-b4b3-444a-9d25-ecabd0117735",
"workflowId": "8630c862-d9f3-4332-9d6c-6664a39edd73",
"parentMessageId": null,
"message": "Workflow completed.\n\nProcessed 2 user inputs and generated 9 responses.\nWorkflow status: running",
"role": "assistant",
"status": "last",
"sequenceNr": 12,
"publishedAt": 1759651221.848871,
"roundNumber": 2,
"taskNumber": 0,
"actionNumber": 0,
"documentsLabel": "workflow_feedback",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1,4 @@
Workflow completed.
Processed 2 user inputs and generated 9 responses.
Workflow status: running

View file

@ -0,0 +1,19 @@
{
"id": "msg_402e3653-1500-441f-adf0-d4ea90980d4e",
"workflowId": "8630c862-d9f3-4332-9d6c-6664a39edd73",
"parentMessageId": null,
"message": "🎯 **Task 1/1**\n\n✅ Email draft successfully created with attachment and summary as requested\n📊 Score 8/10",
"role": "assistant",
"status": "step",
"sequenceNr": 11,
"publishedAt": 1759651221.6639369,
"roundNumber": 2,
"taskNumber": 1,
"actionNumber": 0,
"documentsLabel": "task_1_completion",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1,4 @@
🎯 **Task 1/1**
✅ Email draft successfully created with attachment and summary as requested
📊 Score 8/10