gateway/modules/workflows/processing/modes/modeDynamic.py
2025-11-30 11:03:23 +01:00

1165 lines
59 KiB
Python

# modeDynamic.py
# Dynamic mode implementation for workflows
import json
import logging
import re
import time
from datetime import datetime, timezone
from typing import List, Dict, Any
from modules.datamodels.datamodelChat import (
TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus,
ActionResult, Observation, ObservationPreview, ReviewResult
)
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.workflows.processing.modes.modeBase import BaseMode
from modules.workflows.processing.shared.stateTools import checkWorkflowStopped
from modules.shared.timeUtils import parseTimestamp
from modules.workflows.processing.shared.executionState import TaskExecutionState, shouldContinue
from modules.workflows.processing.shared.promptGenerationActionsDynamic import (
generateDynamicPlanSelectionPrompt,
generateDynamicParametersPrompt,
generateDynamicRefinementPrompt
)
from modules.workflows.processing.shared.placeholderFactory import extractReviewContent
from modules.workflows.processing.adaptive import ContentValidator, LearningEngine, ProgressTracker
from modules.workflows.processing.adaptive.adaptiveLearningEngine import AdaptiveLearningEngine
logger = logging.getLogger(__name__)
class DynamicMode(BaseMode):
"""Dynamic mode implementation - iterative plan-act-observe-refine loop"""
def __init__(self, services):
super().__init__(services)
# Initialize adaptive components
self.learningEngine = LearningEngine()
self.adaptiveLearningEngine = AdaptiveLearningEngine() # New enhanced learning engine
self.contentValidator = ContentValidator(services, self.adaptiveLearningEngine)
self.progressTracker = ProgressTracker()
self.currentIntent = None
# Placeholder service no longer used; prompts are generated directly
async def generateActionItems(self, taskStep: TaskStep, workflow: ChatWorkflow,
previousResults: List = None, enhancedContext: TaskContext = None) -> List[ActionItem]:
"""Dynamic mode doesn't use batch action generation - actions are generated iteratively"""
# Dynamic mode generates actions one at a time in the execution loop
return []
async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> TaskResult:
"""Execute task using Dynamic mode - iterative plan-act-observe-refine loop"""
# Get task index from workflow state
taskIndex = workflow.getTaskIndex()
logger.info(f"=== STARTING TASK {taskIndex}: {taskStep.objective} ===")
# Use workflow-level intent from planning phase (stored in workflow object)
# This avoids redundant intent analysis - intent was already analyzed during userintention phase
if hasattr(workflow, '_workflowIntent') and workflow._workflowIntent:
self.workflowIntent = workflow._workflowIntent
logger.info(f"Using workflow intent from userintention phase")
else:
# Fallback: use empty dict if not available (shouldn't happen in normal flow)
self.workflowIntent = {}
logger.warning(f"Workflow intent not found in workflow object, using empty dict")
# Task-level intent: Use task-specific fields from TaskStep if available, otherwise inherit from workflow
# Task can override workflow intent (e.g., workflow wants PDF, task needs CSV)
# IMPORTANT: taskIntent is used for task-level tracking, not workflow-level
self.taskIntent = {}
# Add task objective - this is what we track progress against
self.taskIntent['taskObjective'] = taskStep.objective
if taskStep.dataType:
self.taskIntent['dataType'] = taskStep.dataType
elif self.workflowIntent.get('dataType'):
self.taskIntent['dataType'] = self.workflowIntent['dataType']
if taskStep.expectedFormats:
self.taskIntent['expectedFormats'] = taskStep.expectedFormats
elif self.workflowIntent.get('expectedFormats'):
self.taskIntent['expectedFormats'] = self.workflowIntent['expectedFormats']
if hasattr(taskStep, 'qualityRequirements') and taskStep.qualityRequirements:
self.taskIntent['qualityRequirements'] = taskStep.qualityRequirements
elif self.workflowIntent.get('qualityRequirements'):
self.taskIntent['qualityRequirements'] = self.workflowIntent['qualityRequirements']
# Store taskIntent in workflow object so it's accessible from services
workflow._taskIntent = self.taskIntent
logger.info(f"Task intent (task-level): {self.taskIntent}")
logger.info(f"Task objective: {taskStep.objective}")
logger.info(f"Task format info: dataType={taskStep.dataType}, expectedFormats={taskStep.expectedFormats}")
# NEW: Reset progress tracking for new task
self.progressTracker.reset()
# Update workflow object before executing task
self._updateWorkflowBeforeExecutingTask(taskIndex)
# Create task start message (totalTasks not needed - removed from signature)
await self.messageCreator.createTaskStartMessage(taskStep, workflow, taskIndex, None)
state = TaskExecutionState(taskStep)
# Dynamic mode uses max_steps instead of max_retries
state.max_steps = max(1, int(getattr(workflow, 'maxSteps', 5)))
logger.info(f"Using Dynamic mode execution with max_steps: {state.max_steps}")
step = 1
decision = None
while step <= state.max_steps:
checkWorkflowStopped(self.services)
# Update workflow[currentAction] for UI
self._updateWorkflowBeforeExecutingAction(step)
try:
t0 = time.time()
selection = await self._planSelect(context)
logger.info(f"Dynamic step {step}: Selected action: {selection}")
# Create user-friendly message BEFORE action execution
# Action intention message is now handled by the standard message creator in _actExecute
result = await self._actExecute(context, selection, taskStep, workflow, step)
observation = self._observeBuild(result)
# Note: resultLabel is already set correctly in _observeBuild from actionResult.resultLabel
# Content validation (against original cleaned user prompt / workflow intent)
if getattr(self, 'workflowIntent', None) and result.documents:
# Pass ALL documents to validator - validator decides what to validate (generic approach)
# Pass taskStep so validator can use task.objective and format fields
# Pass action name so validator knows which action created the documents
actionName = selection.get('action', 'unknown')
validationResult = await self.contentValidator.validateContent(result.documents, self.workflowIntent, taskStep, actionName)
observation.contentValidation = validationResult
quality_score = validationResult.get('qualityScore', 0.0)
if quality_score is None:
quality_score = 0.0
logger.info(f"Content validation: {validationResult['overallSuccess']} (quality: {quality_score:.2f})")
# NEW: Record validation result for adaptive learning
actionValue = selection.get('action', 'unknown')
actionContext = {
'actionName': actionValue,
'workflowId': context.workflowId
}
self.adaptiveLearningEngine.recordValidationResult(
validationResult,
actionContext,
context.workflowId,
step
)
# NEW: Learn from feedback - use taskIntent (task-level), not workflowIntent
feedback = self._collectFeedback(result, validationResult, self.taskIntent)
self.learningEngine.learnFromFeedback(feedback, context, self.taskIntent)
# NEW: Update progress - use taskIntent (task-level), not workflowIntent
self.progressTracker.updateOperation(result, validationResult, self.taskIntent)
decision = await self._refineDecide(context, observation)
# Store refinement decision in context for next iteration
if not hasattr(context, 'previousReviewResult') or context.previousReviewResult is None:
context.previousReviewResult = []
if decision: # Only append if decision is not None
context.previousReviewResult.append(decision)
# Store next action guidance from decision for use in next iteration
if decision and decision.status == "continue" and decision.nextAction:
# Set nextActionGuidance directly (now defined in TaskContext model)
context.nextActionGuidance = {
"action": decision.nextAction,
"parameters": decision.nextActionParameters or {},
"objective": decision.nextActionObjective or decision.reason or ""
}
logger.info(f"Stored next action guidance: {decision.nextAction} with parameters {decision.nextActionParameters}")
# Update context with learnings from this step
if decision and decision.reason:
if not hasattr(context, 'improvements'):
context.improvements = []
context.improvements.append(f"Step {step}: {decision.reason}")
# Create user-friendly message AFTER action execution
# Action completion message is now handled by the standard message creator in _actExecute
except Exception as e:
logger.error(f"Dynamic step {step} error: {e}")
break
# NEW: Use adaptive stopping logic
progressState = self.progressTracker.getCurrentProgress()
continueByProgress = self.progressTracker.shouldContinue(progressState, observation.contentValidation if observation.contentValidation else {})
# Use Observation Pydantic model directly (decision is ReviewResult model)
continueByReview = shouldContinue(observation, decision, step, state.max_steps)
if not continueByProgress or not continueByReview:
logger.info(f"Stopping at step {step}: progress={continueByProgress}, review={continueByReview}")
break
step += 1
# Summarize task result for dynamic mode
status = TaskStatus.COMPLETED
success = True
# Get feedback from last decision if available
lastDecision = context.previousReviewResult[-1] if hasattr(context, 'previousReviewResult') and context.previousReviewResult else None
feedback = lastDecision.reason if lastDecision and isinstance(lastDecision, ReviewResult) else 'Completed'
if lastDecision and isinstance(lastDecision, ReviewResult) and lastDecision.status == 'success':
success = True
# Create proper ReviewResult for completion message
completionReviewResult = ReviewResult(
status='success',
reason=feedback,
qualityScore=lastDecision.qualityScore if lastDecision and isinstance(lastDecision, ReviewResult) else 8.0,
metCriteria=[],
improvements=[]
)
# Create task completion message (totalTasks not needed - removed from signature)
await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, None, completionReviewResult)
return TaskResult(
taskId=taskStep.id,
status=status,
success=success,
feedback=feedback,
error=None if success else feedback
)
async def _planSelect(self, context: TaskContext) -> Dict[str, Any]:
"""Plan: select exactly one action. Returns {"action": {method, name}}"""
# Check if we have concrete next action guidance from previous refinement decision
# Check for nextActionGuidance (now defined in TaskContext model)
if context.nextActionGuidance:
guidance = context.nextActionGuidance
actionName = guidance.get("action")
parameters = guidance.get("parameters", {})
objective = guidance.get("objective", "")
if actionName:
logger.info(f"Using guided next action: {actionName} (from refinement decision)")
# Create selection dict from guidance
selection = {
"action": actionName,
"actionObjective": objective,
"parameters": parameters
}
# Clear guidance after use (one-time use)
context.nextActionGuidance = None
return selection
# Normal planning: use AI to select action
bundle = generateDynamicPlanSelectionPrompt(self.services, context, self.adaptiveLearningEngine)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
# Centralized AI call for plan selection (uses static planning parameters)
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
# Create options for documentation/consistency (currently not passed to callAiPlanning API)
options = AiCallOptions(
operationType=OperationTypeEnum.PLAN,
priority=PriorityEnum.QUALITY,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.10,
maxProcessingTime=30
)
response = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
placeholders=placeholders,
debugType="dynamic"
)
# Parse response using structured parsing with ActionDefinition model
from modules.shared.jsonUtils import parseJsonWithModel, tryParseJson
from modules.datamodels.datamodelWorkflow import ActionDefinition
# CRITICAL: Extract requiredInputDocuments from raw JSON BEFORE parsing as ActionDefinition
# ActionDefinition model doesn't have requiredInputDocuments field, so it gets lost during parsing
# tryParseJson already handles markdown code blocks via extractJsonString internally
rawJson, parseError, _ = tryParseJson(response)
requiredInputDocuments = None
requiredConnection = None
if parseError:
logger.warning(f"Error parsing raw JSON for requiredInputDocuments extraction: {parseError}")
if isinstance(rawJson, dict):
requiredInputDocuments = rawJson.get('requiredInputDocuments')
requiredConnection = rawJson.get('requiredConnection')
if requiredInputDocuments:
logger.info(f"Extracted requiredInputDocuments from raw JSON: {requiredInputDocuments}")
try:
# Parse response string as ActionDefinition
actionDef = parseJsonWithModel(response, ActionDefinition)
# Convert to dict for compatibility with existing code
selection = actionDef.model_dump()
except ValueError as e:
logger.error(f"Failed to parse ActionDefinition from response: {e}")
raise ValueError(f"Invalid action selection response: {e}")
if 'action' not in selection or not isinstance(selection['action'], str):
raise ValueError("Selection missing 'action' as string")
# Validate document references - prevent AI from inventing Message IDs
# Convert string references to typed DocumentReferenceList (from raw JSON, not from parsed model)
if requiredInputDocuments:
stringRefs = requiredInputDocuments
try:
if isinstance(stringRefs, list):
# Validate string references first
self._validateDocumentReferences(stringRefs, context)
# Convert to typed DocumentReferenceList
from modules.datamodels.datamodelDocref import DocumentReferenceList
docList = DocumentReferenceList.from_string_list(stringRefs)
selection['documentList'] = docList
logger.info(f"Converted requiredInputDocuments to documentList: {len(docList.references)} references")
elif stringRefs:
# Single string reference
self._validateDocumentReferences([stringRefs], context)
from modules.datamodels.datamodelDocref import DocumentReferenceList
docList = DocumentReferenceList.from_string_list([stringRefs])
selection['documentList'] = docList
logger.info(f"Converted requiredInputDocuments to documentList: {len(docList.references)} references")
except Exception as e:
logger.error(f"Error converting requiredInputDocuments to documentList: {e}")
raise # Re-raise to fail fast if document conversion fails
else:
# No documents required - this is normal for actions that don't need input documents
logger.debug(f"No requiredInputDocuments found in raw JSON response (normal for actions without document requirements)")
# Convert connection reference if present (from raw JSON, not from parsed model)
if requiredConnection:
selection['connectionReference'] = requiredConnection
# Enforce spec: Stage 1 must NOT include 'parameters'
if 'parameters' in selection:
# Remove to avoid accidental carryover
try:
del selection['parameters']
except Exception:
selection['parameters'] = None
return selection
def _validateDocumentReferences(self, document_refs: List[str], context: TaskContext) -> None:
"""Validate that document references exist in the current workflow"""
if not document_refs:
return
# Get available documents from the current workflow
try:
available_docs = self.services.chat.getAvailableDocuments(self.services.workflow)
if not available_docs or available_docs == "No documents available":
logger.warning("No documents available for validation")
return
# Extract all valid references from available documents
valid_refs = []
for line in available_docs.split('\n'):
if 'docList:' in line or 'docItem:' in line:
# Extract reference from line like " - docList:msg_xxx:label" or " - docItem:xxx:filename with spaces"
ref_match = re.search(r'(docList:[^\s]+|docItem:[^\s]+(?:\s+[^\s]+)*)', line)
if ref_match:
valid_refs.append(ref_match.group(1))
# Prefer non-empty documents: the available_docs index is already filtered to skip empty docs
preferred_refs = set(valid_refs)
# Check if all provided references are valid and prefer non-empty
for ref in document_refs:
if ref in preferred_refs:
# Exact match - valid
continue
# For docItem references, check if documentId matches (filename is optional)
if ref.startswith('docItem:'):
# Extract documentId from provided reference
provided_parts = ref[8:].split(':', 1) # Remove "docItem:" prefix
provided_doc_id = provided_parts[0] if provided_parts else None
if provided_doc_id:
# Check if any available reference has the same documentId
found_match = False
for valid_ref in valid_refs:
if valid_ref.startswith('docItem:'):
valid_parts = valid_ref[8:].split(':', 1)
valid_doc_id = valid_parts[0] if valid_parts else None
if valid_doc_id == provided_doc_id:
found_match = True
break
if found_match:
# DocumentId matches - valid (filename is optional)
continue
# No match found
logger.error(f"Invalid or empty document reference: {ref}")
logger.error(f"Available references: {valid_refs}")
raise ValueError(f"Document reference '{ref}' not found or refers to empty document. Use only non-empty references from AVAILABLE_DOCUMENTS_INDEX.")
except Exception as e:
logger.error(f"Error validating document references: {str(e)}")
raise ValueError(f"Failed to validate document references: {str(e)}")
async def _actExecute(self, context: TaskContext, selection: Dict[str, Any], taskStep: TaskStep,
workflow: ChatWorkflow, stepIndex: int) -> ActionResult:
"""Act: request minimal parameters then execute selected action"""
compoundActionName = selection.get('action', '')
actionObjective = selection.get('actionObjective', '')
# Action-level intent: Extract from dynamic plan selection prompt response
# Action intent analysis is now integrated into generateDynamicPlanSelectionPrompt
# Extract intent fields from selection response
actionIntent = {}
if actionObjective:
# Extract intent fields from selection response (if provided by AI)
if 'dataType' in selection:
actionIntent['dataType'] = selection.get('dataType')
if 'expectedFormats' in selection:
actionIntent['expectedFormats'] = selection.get('expectedFormats')
if 'qualityRequirements' in selection:
actionIntent['qualityRequirements'] = selection.get('qualityRequirements')
if 'successCriteria' in selection:
actionIntent['successCriteria'] = selection.get('successCriteria')
# If no intent fields in selection, inherit from task intent
if not actionIntent:
taskIntent = getattr(workflow, '_taskIntent', None)
if taskIntent:
actionIntent = taskIntent.copy()
logger.info(f"Using task intent as action intent (no intent fields in selection)")
else:
logger.info(f"Action intent extracted from selection: {actionIntent}")
# Store actionIntent in workflow object so it's accessible from services
workflow._actionIntent = actionIntent
else:
# No actionObjective - fallback to task intent
actionIntent = getattr(workflow, '_taskIntent', None) or {}
logger.warning("No actionObjective provided, using task intent as fallback")
# Parse compound action name (e.g., "ai.webResearch" -> method="ai", action="webResearch")
if '.' not in compoundActionName:
raise ValueError(f"Invalid compound action name: {compoundActionName}. Expected format: method.action")
methodName, actionName = compoundActionName.split('.', 1)
# Always request parameters in Stage 2 (spec: Stage 1 must not provide them)
logger.info("Requesting parameters in Stage 2 based on Stage 1 outputs")
# Update context from Stage 1 selection (replaces SimpleNamespace workaround)
# Convert dict selection to ActionDefinition if needed
from modules.datamodels.datamodelWorkflow import ActionDefinition
if isinstance(selection, dict):
# Create ActionDefinition from dict for updateFromSelection
actionDef = ActionDefinition(
action=selection.get('action', ''),
actionObjective=selection.get('actionObjective', ''),
parametersContext=selection.get('parametersContext', ''),
learnings=selection.get('learnings', [])
)
context.updateFromSelection(actionDef)
elif isinstance(selection, ActionDefinition):
context.updateFromSelection(selection)
else:
# Fallback: create empty ActionDefinition
context.updateFromSelection(ActionDefinition(action='', actionObjective=''))
# Build and send the Stage 2 parameters prompt (always)
# Use context directly (no SimpleNamespace workaround)
bundle = generateDynamicParametersPrompt(self.services, context, compoundActionName, self.adaptiveLearningEngine)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
# Centralized AI call for parameter suggestion (uses static planning parameters)
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
# Create options for documentation/consistency (currently not passed to callAiPlanning API)
options = AiCallOptions(
operationType=OperationTypeEnum.PLAN,
priority=PriorityEnum.QUALITY,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.10,
maxProcessingTime=30
)
paramsResp = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
placeholders=placeholders,
debugType="paramplan"
)
# Parse JSON response - Stage 2 only returns parameters structure, not full ActionDefinition
from modules.shared.jsonUtils import tryParseJson
jsonObj, parseError, cleanedStr = tryParseJson(paramsResp)
if parseError or not isinstance(jsonObj, dict):
logger.error(f"Failed to parse JSON from parameters response: {parseError}")
logger.error(f"Response was: {paramsResp[:500]}...")
raise ValueError(f"AI parameters response invalid JSON: {parseError}")
# Extract parameters from response (Stage 2 only provides parameters, not full ActionDefinition)
parameters = jsonObj.get('parameters', {})
if not isinstance(parameters, dict):
raise ValueError("AI parameters response missing 'parameters' object")
# Extract userMessage from Stage 2 response if available
# Stage 2 can override Stage 1 userMessage with more specific message
userMessage = jsonObj.get('userMessage')
if userMessage:
selection['userMessage'] = userMessage
# Merge Stage 1 resource selections into Stage 2 parameters (only if action expects them)
try:
# Use typed documentList from selection (required)
# Check both top-level selection and selection['parameters'] (for guided actions)
from modules.datamodels.datamodelDocref import DocumentReferenceList
docList = selection.get('documentList')
# If not found at top level, check in selection['parameters'] (guided action case)
if not docList and isinstance(selection, dict) and 'parameters' in selection:
docListParam = selection['parameters'].get('documentList')
if docListParam:
# Convert string list back to DocumentReferenceList if needed
if isinstance(docListParam, list) and all(isinstance(x, str) for x in docListParam):
docList = DocumentReferenceList.from_string_list(docListParam)
elif isinstance(docListParam, DocumentReferenceList):
docList = docListParam
if docList and isinstance(docList, DocumentReferenceList):
# Check if action actually has documentList parameter by checking action definition
methodName, actionName = compoundActionName.split('.', 1)
from modules.workflows.processing.shared.methodDiscovery import methods as _methods
if methodName in _methods:
methodInstance = _methods[methodName]['instance']
if actionName in methodInstance.actions:
action_info = methodInstance.actions[actionName]
docstring = action_info.get('description', '')
# Extract parameter names from docstring to check if documentList exists
paramDescriptions, _ = methodInstance._extractParameterDetails(docstring)
if 'documentList' in paramDescriptions:
# Convert DocumentReferenceList to string list for database serialization
# Action methods will convert it back to DocumentReferenceList when needed
parameters['documentList'] = docList.to_string_list()
logger.info(f"Added documentList to parameters: {len(docList.references)} references")
elif 'documentList' not in parameters and isinstance(selection, dict) and 'parameters' in selection:
# Fallback: if documentList is already in selection['parameters'] as a list, preserve it
# This handles guided actions where documentList is already in the right format
docListParam = selection['parameters'].get('documentList')
if docListParam and isinstance(docListParam, list):
parameters['documentList'] = docListParam
logger.info(f"Preserved documentList from selection parameters: {len(docListParam)} references")
# Use connectionReference from selection (required)
connectionRef = selection.get('connectionReference')
if connectionRef:
# Check if action actually has connectionReference parameter
methodName, actionName = compoundActionName.split('.', 1)
from modules.workflows.processing.shared.methodDiscovery import methods as _methods
if methodName in _methods:
methodInstance = _methods[methodName]['instance']
if actionName in methodInstance.actions:
action_info = methodInstance.actions[actionName]
docstring = action_info.get('description', '')
# Extract parameter names from docstring to check if connectionReference exists
paramDescriptions, _ = methodInstance._extractParameterDetails(docstring)
if 'connectionReference' in paramDescriptions:
parameters['connectionReference'] = connectionRef
logger.info(f"Added connectionReference to parameters: {connectionRef}")
except Exception as e:
logger.warning(f"Error merging Stage 1 resources into Stage 2 parameters: {e}")
pass
# Apply minimal defaults in-code (language)
if 'language' not in parameters and hasattr(self.services, 'user') and getattr(self.services.user, 'language', None):
parameters['language'] = self.services.user.language
# Build a synthetic ActionItem for execution routing and labels
currentRound = getattr(self.services.workflow, 'currentRound', 0)
currentTask = getattr(self.services.workflow, 'currentTask', 0)
resultLabel = f"round{currentRound}_task{currentTask}_action{stepIndex}_results"
# User message is generated by AI in the action selection/parameters prompt
# Extract from selection if available (from Stage 1 or Stage 2)
userMessage = None
if hasattr(selection, 'userMessage') and selection.get('userMessage'):
userMessage = selection.get('userMessage')
elif isinstance(selection, dict) and 'userMessage' in selection:
userMessage = selection['userMessage']
taskAction = self._createActionItem({
"execMethod": methodName,
"execAction": actionName,
"execParameters": parameters,
"execResultLabel": resultLabel,
"status": TaskStatus.PENDING,
"userMessage": userMessage # User message from AI prompt (if provided)
})
# Execute using existing single action flow (message creation is handled internally)
result = await self.actionExecutor.executeSingleAction(taskAction, workflow, taskStep)
return result
def _observeBuild(self, actionResult: ActionResult) -> Observation:
"""Observe: build compact observation object from ActionResult with full document metadata"""
previews = []
notes = []
if actionResult and actionResult.documents:
# Process all documents and show full metadata
for doc in actionResult.documents:
# Extract all available metadata without content
name = getattr(doc, 'fileName', None) or getattr(doc, 'documentName', 'Unknown')
mimeType = getattr(doc, 'mimeType', None)
size = getattr(doc, 'size', None)
created = getattr(doc, 'created', None)
modified = getattr(doc, 'modified', None)
typeGroup = getattr(doc, 'typeGroup', None)
documentId = getattr(doc, 'documentId', None)
reference = getattr(doc, 'reference', None)
# Add content size indicator instead of actual content
contentSize = None
if hasattr(doc, 'documentData') and doc.documentData:
if isinstance(doc.documentData, dict) and 'content' in doc.documentData:
contentLength = len(str(doc.documentData['content']))
contentSize = f"{contentLength} characters"
else:
contentLength = len(str(doc.documentData))
contentSize = f"{contentLength} characters"
# Create ObservationPreview with only non-None values
preview = ObservationPreview(
name=name if name != 'Unknown' else 'Unknown Document',
mimeType=mimeType if mimeType and mimeType != 'Unknown' else None,
size=str(size) if size and size != 'Unknown' else None,
created=str(created) if created and created != 'Unknown' else None,
modified=str(modified) if modified and modified != 'Unknown' else None,
typeGroup=str(typeGroup) if typeGroup and typeGroup != 'Unknown' else None,
documentId=str(documentId) if documentId and documentId != 'Unknown' else None,
reference=str(reference) if reference and reference != 'Unknown' else None,
contentSize=contentSize
)
previews.append(preview)
# Extract comment if available
if hasattr(doc, 'documentData') and doc.documentData:
data = getattr(doc, 'documentData', None)
if isinstance(data, dict):
comment = data.get("comment", "")
if comment:
notes.append(f"Document '{name}': {comment}")
# Build observation with optional content analysis
contentAnalysis = None
if self.currentIntent and actionResult and actionResult.documents:
contentAnalysis = self._analyzeContent(actionResult.documents)
observation = Observation(
success=bool(actionResult.success) if actionResult else False,
resultLabel=actionResult.resultLabel or "" if actionResult else "",
documentsCount=len(actionResult.documents) if actionResult and actionResult.documents else 0,
previews=previews,
notes=notes,
contentAnalysis=contentAnalysis
)
return observation
def _analyzeContent(self, documents: List[Any]) -> Dict[str, Any]:
"""Analyzes content of documents for adaptive learning"""
try:
if not documents:
return {"contentType": "none", "contentSnippet": "", "intentMatch": False}
# Extract content from first document
firstDoc = documents[0]
content = ""
if hasattr(firstDoc, 'documentData'):
data = firstDoc.documentData
if isinstance(data, dict) and 'content' in data:
content = str(data['content'])
else:
content = str(data)
# Classify content type
contentType = self._classifyContent(content)
# Create content snippet
contentSnippet = content[:200] + "..." if len(content) > 200 else content
# Assess intent match
intentMatch = self._assessIntentMatch(content, self.currentIntent)
return {
"contentType": contentType,
"contentSnippet": contentSnippet,
"intentMatch": intentMatch
}
except Exception as e:
logger.error(f"Error analyzing content: {str(e)}")
return {"contentType": "error", "contentSnippet": "", "intentMatch": False}
def _classifyContent(self, content: str) -> str:
"""Classifies the type of content"""
if not content:
return "empty"
# Check for code
codeIndicators = ['def ', 'function', 'import ', 'class ', 'for ', 'while ', 'if ']
if any(indicator in content.lower() for indicator in codeIndicators):
return "code"
# Check for numbers
if re.search(r'\b\d+\b', content):
return "numbers"
# Check for structured content
if any(indicator in content for indicator in ['\n', '\t', '|', '-', '*', '1.', '2.']):
return "structured"
# Default to text
return "text"
def _assessIntentMatch(self, content: str, intent: Dict[str, Any]) -> bool:
"""Assesses if content matches the user intent"""
if not intent:
return False
dataType = intent.get("dataType", "unknown")
if dataType == "numbers":
# Check if content contains actual numbers, not code
hasNumbers = bool(re.search(r'\b\d+\b', content))
isNotCode = not any(keyword in content.lower() for keyword in ['def ', 'function', 'import '])
return hasNumbers and isNotCode
elif dataType == "text":
# Check if content is readable text
words = re.findall(r'\b\w+\b', content)
return len(words) > 5
elif dataType == "documents":
# Check if content is suitable for document creation
hasStructure = any(indicator in content for indicator in ['\n', '\t', '|', '-', '*'])
hasContent = len(content.strip()) > 50
return hasStructure and hasContent
return True # Default to match for unknown types
def _collectFeedback(self, result: Any, validation: Dict[str, Any], taskIntent: Dict[str, Any]) -> Dict[str, Any]:
"""Collects comprehensive feedback from action execution"""
try:
# Extract content summary
contentDelivered = ""
if result.documents:
firstDoc = result.documents[0]
if hasattr(firstDoc, 'documentData'):
data = firstDoc.documentData
if isinstance(data, dict) and 'content' in data:
content = str(data['content'])
contentDelivered = content[:100] + "..." if len(content) > 100 else content
else:
contentDelivered = str(data)[:100] + "..." if len(str(data)) > 100 else str(data)
return {
"actionAttempted": result.resultLabel or "unknown",
"parametersUsed": {}, # Would be extracted from action context
"contentDelivered": contentDelivered,
"intentMatchScore": validation.get('qualityScore', 0),
"qualityScore": validation.get('qualityScore', 0),
"issuesFound": validation.get('improvementSuggestions', []),
"learningOpportunities": validation.get('improvementSuggestions', []),
"userSatisfaction": None, # Would be collected from user feedback
"timestamp": datetime.now(timezone.utc).timestamp()
}
except Exception as e:
logger.error(f"Error collecting feedback: {str(e)}")
return {
"actionAttempted": "unknown",
"parametersUsed": {},
"contentDelivered": "",
"intentMatchScore": 0,
"qualityScore": 0,
"issuesFound": [],
"learningOpportunities": [],
"userSatisfaction": None,
"timestamp": datetime.now(timezone.utc).timestamp()
}
async def _refineDecide(self, context: TaskContext, observation: Observation) -> ReviewResult:
"""Refine: decide continue or stop, with reason"""
# Create proper ReviewContext for extractReviewContent
from modules.datamodels.datamodelChat import ReviewContext
# Convert observation to dict for extractReviewContent (temporary compatibility)
observationDict = {
'success': observation.success,
'resultLabel': observation.resultLabel,
'documentsCount': observation.documentsCount,
'previews': [p.model_dump(exclude_none=True) if hasattr(p, 'model_dump') else p.dict() for p in observation.previews] if observation.previews else [],
'notes': observation.notes,
'contentValidation': observation.contentValidation if observation.contentValidation else {},
'contentAnalysis': observation.contentAnalysis if observation.contentAnalysis else {}
}
reviewContext = ReviewContext(
taskStep=context.taskStep,
taskActions=[],
actionResults=[], # Dynamic mode doesn't have action results in this context
stepResult={'observation': observationDict},
workflowId=context.workflowId,
previousResults=[]
)
baseReviewContent = extractReviewContent(reviewContext)
placeholders = {"REVIEW_CONTENT": baseReviewContent}
# NEW: Add content validation to review content
enhancedReviewContent = placeholders.get("REVIEW_CONTENT", "")
if observation.contentValidation:
validation = observation.contentValidation
enhancedReviewContent += f"\n\nCONTENT VALIDATION:\n"
enhancedReviewContent += f"Overall Success: {validation.get('overallSuccess', False)}\n"
quality_score = validation.get('qualityScore', 0.0)
if quality_score is None:
quality_score = 0.0
enhancedReviewContent += f"Quality Score: {quality_score:.2f}\n"
gap_analysis = validation.get('gapAnalysis', '')
if gap_analysis:
enhancedReviewContent += f"Gap Analysis: {gap_analysis}\n"
if validation.get('improvementSuggestions'):
enhancedReviewContent += f"Improvement Suggestions: {', '.join(validation['improvementSuggestions'])}\n"
# NEW: Add content analysis to review content
if observation.contentAnalysis:
analysis = observation.contentAnalysis
enhancedReviewContent += f"\nCONTENT ANALYSIS:\n"
enhancedReviewContent += f"Content Type: {analysis.get('contentType', 'unknown')}\n"
enhancedReviewContent += f"Intent Match: {analysis.get('intentMatch', False)}\n"
if analysis.get('contentSnippet'):
enhancedReviewContent += f"Content Preview: {analysis['contentSnippet']}\n"
# NEW: Add progress state to review content
progressState = self.progressTracker.getCurrentProgress()
enhancedReviewContent += f"\nPROGRESS STATE:\n"
enhancedReviewContent += f"Completed Objectives: {len(progressState['completedObjectives'])}\n"
enhancedReviewContent += f"Partial Achievements: {len(progressState['partialAchievements'])}\n"
enhancedReviewContent += f"Failed Attempts: {len(progressState['failedAttempts'])}\n"
enhancedReviewContent += f"Current Phase: {progressState['currentPhase']}\n"
if progressState['nextActionsSuggested']:
enhancedReviewContent += f"Next Action Suggestions: {', '.join(progressState['nextActionsSuggested'])}\n"
# Update placeholders with enhanced review content
placeholders["REVIEW_CONTENT"] = enhancedReviewContent
bundle = generateDynamicRefinementPrompt(self.services, context, enhancedReviewContent)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
# Centralized AI call for refinement decision (uses static planning parameters)
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
# Create options for documentation/consistency (currently not passed to callAiPlanning API)
options = AiCallOptions(
operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED,
compressPrompt=True,
compressContext=False,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
maxProcessingTime=30
)
resp = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
placeholders=placeholders,
debugType="refinement"
)
# Parse response using structured parsing with ReviewResult model
from modules.shared.jsonUtils import parseJsonWithModel
from modules.datamodels.datamodelChat import ReviewResult
if not resp:
return ReviewResult(
status="continue",
reason="default",
qualityScore=5.0
)
try:
# Parse response string as ReviewResult (prompt now correctly asks for "status")
decision = parseJsonWithModel(resp, ReviewResult)
return decision
except ValueError as e:
logger.warning(f"Failed to parse ReviewResult from response: {e}. Using default.")
return ReviewResult(
status="continue",
reason="default",
qualityScore=5.0
)
async def _createDynamicActionMessage(self, workflow: ChatWorkflow, selection: Dict[str, Any],
step: int, maxSteps: int, taskIndex: int, messageType: str,
result: ActionResult = None, observation: Observation = None):
"""Create user-friendly messages for Dynamic workflow actions"""
try:
action = selection.get('action', {})
method = action.get('method', '')
actionName = action.get('name', '')
# Get user language
userLanguage = self.services.user.language if self.services and self.services.user else 'en'
if messageType == "before":
# Message BEFORE action execution
userMessage = await self._generateActionIntentionMessage(method, actionName, userLanguage)
messageContent = f"🔄 **Step {step}**\n\n{userMessage}"
status = "step"
actionProgress = "pending"
documentsLabel = f"action_{step}_intention"
elif messageType == "after":
# Message AFTER action execution
userMessage = await self._generateActionResultMessage(method, actionName, result, observation, userLanguage)
successIcon = "" if result and result.success else ""
messageContent = f"{successIcon} **Step {step} Complete**\n\n{userMessage}"
status = "step"
actionProgress = "success" if result and result.success else "fail"
documentsLabel = observation.resultLabel if observation else f"action_{step}_result"
else:
return
# Create workflow message
messageData = {
"workflowId": workflow.id,
"role": "assistant",
"message": messageContent,
"status": status,
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": self.services.utils.timestampGetUtc(),
"documentsLabel": documentsLabel,
"documents": [],
"roundNumber": workflow.currentRound,
"taskNumber": taskIndex,
"actionNumber": step,
"actionProgress": actionProgress
}
self.services.chat.storeMessageWithDocuments(workflow, messageData, [])
except Exception as e:
logger.error(f"Error creating Dynamic action message: {str(e)}")
async def _generateActionIntentionMessage(self, method: str, actionName: str, userLanguage: str):
"""Generate user-friendly message explaining what action will do"""
try:
# Create a simple AI prompt to generate user-friendly action descriptions
prompt = f"""Generate a brief, user-friendly message explaining what the {method}.{actionName} action will do.
User language: {userLanguage}
Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly message
response = await self.services.ai.callAiPlanning(
prompt=prompt,
placeholders=None,
debugType="userfriendlymessage"
)
return response.strip() if response else f"Executing {method}.{actionName} action..."
except Exception as e:
logger.error(f"Error generating action intention message: {str(e)}")
return f"Executing {method}.{actionName} action..."
async def _generateActionResultMessage(self, method: str, actionName: str, result: ActionResult,
observation: Observation, userLanguage: str):
"""Generate user-friendly message explaining action results"""
try:
# Build result context
resultContext = ""
if result and result.documents:
docCount = len(result.documents)
resultContext = f"Generated {docCount} document(s)"
elif observation and observation.documentsCount > 0:
docCount = observation.documentsCount
resultContext = f"Generated {docCount} document(s)"
# Create AI prompt for result message
prompt = f"""Generate a brief, user-friendly message explaining the result of the {method}.{actionName} action.
User language: {userLanguage}
Success: {result.success if result else 'Unknown'}
Result context: {resultContext}
Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly result message
response = await self.services.ai.callAiPlanning(
prompt=prompt,
placeholders=None,
debugType="userfriendlyresult"
)
return response.strip() if response else f"{method}.{actionName} action completed"
except Exception as e:
logger.error(f"Error generating action result message: {str(e)}")
return f"{method}.{actionName} action completed"
def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
"""Creates a new task action for Dynamic mode"""
try:
import uuid
# Ensure ID is present
if "id" not in actionData or not actionData["id"]:
actionData["id"] = f"action_{uuid.uuid4()}"
# Ensure required fields
if "status" not in actionData:
actionData["status"] = TaskStatus.PENDING
if "execMethod" not in actionData:
logger.error("execMethod is required for task action")
return None
if "execAction" not in actionData:
logger.error("execAction is required for task action")
return None
if "execParameters" not in actionData:
actionData["execParameters"] = {}
# Use generic field separation based on ActionItem model
simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
# Create action in database
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
# Convert to ActionItem model
return ActionItem(
id=createdAction["id"],
execMethod=createdAction["execMethod"],
execAction=createdAction["execAction"],
execParameters=createdAction.get("execParameters", {}),
execResultLabel=createdAction.get("execResultLabel"),
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
status=createdAction.get("status", TaskStatus.PENDING),
error=createdAction.get("error"),
retryCount=createdAction.get("retryCount", 0),
retryMax=createdAction.get("retryMax", 3),
processingTime=createdAction.get("processingTime"),
timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
result=createdAction.get("result"),
resultDocuments=createdAction.get("resultDocuments", []),
userMessage=createdAction.get("userMessage")
)
except Exception as e:
logger.error(f"Error creating task action: {str(e)}")
return None
def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
"""Update workflow object before executing a task"""
try:
workflow = self.services.workflow
updateData = {
"currentTask": taskNumber,
"currentAction": 0,
"totalActions": 0
}
# Update workflow object
workflow.currentTask = taskNumber
workflow.currentAction = 0
workflow.totalActions = 0
# Update in database
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}: {updateData}")
except Exception as e:
logger.error(f"Error updating workflow before executing task: {str(e)}")
def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
"""Update workflow object before executing an action"""
try:
workflow = self.services.workflow
updateData = {
"currentAction": actionNumber
}
# Update workflow object
workflow.currentAction = actionNumber
# Update in database
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}: {updateData}")
except Exception as e:
logger.error(f"Error updating workflow before executing action: {str(e)}")
def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
"""Creates a new task action for Dynamic mode"""
try:
import uuid
# Ensure ID is present
if "id" not in actionData or not actionData["id"]:
actionData["id"] = f"action_{uuid.uuid4()}"
# Ensure required fields
if "status" not in actionData:
actionData["status"] = TaskStatus.PENDING
if "execMethod" not in actionData:
logger.error("execMethod is required for task action")
return None
if "execAction" not in actionData:
logger.error("execAction is required for task action")
return None
if "execParameters" not in actionData:
actionData["execParameters"] = {}
# Use generic field separation based on ActionItem model
simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
# Create action in database
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
# Convert to ActionItem model
return ActionItem(
id=createdAction["id"],
execMethod=createdAction["execMethod"],
execAction=createdAction["execAction"],
execParameters=createdAction.get("execParameters", {}),
execResultLabel=createdAction.get("execResultLabel"),
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
status=createdAction.get("status", TaskStatus.PENDING),
error=createdAction.get("error"),
retryCount=createdAction.get("retryCount", 0),
retryMax=createdAction.get("retryMax", 3),
processingTime=createdAction.get("processingTime"),
timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
result=createdAction.get("result"),
resultDocuments=createdAction.get("resultDocuments", []),
userMessage=createdAction.get("userMessage")
)
except Exception as e:
logger.error(f"Error creating task action: {str(e)}")
return None